master 2c2e712779f8 cached
486 files
3.7 MB
982.3k tokens
2458 symbols
1 requests
Download .txt
Showing preview only (3,931K chars total). Download the full file or copy to clipboard to get everything.
Repository: qianlima-lab/time-series-ptms
Branch: master
Commit: 2c2e712779f8
Files: 486
Total size: 3.7 MB

Directory structure:
gitextract_owuozh1h/

├── .idea/
│   ├── .gitignore
│   ├── deployment.xml
│   ├── inspectionProfiles/
│   │   ├── Project_Default.xml
│   │   └── profiles_settings.xml
│   ├── modules.xml
│   ├── time-series-ptms.iml
│   └── vcs.xml
├── README.md
├── ts_anomaly_detection_methods/
│   ├── README.md
│   ├── anomaly_transformer/
│   │   ├── ATmodelbatch.py
│   │   ├── datautils.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── anomaly_transformer_model.py
│   │   │   ├── dilated_conv.py
│   │   │   ├── encoder.py
│   │   │   └── losses.py
│   │   ├── tasks/
│   │   │   ├── __init__.py
│   │   │   └── anomaly_detection.py
│   │   ├── train.py
│   │   ├── trainATbatch.py
│   │   ├── ts2vec.py
│   │   └── utils.py
│   └── other_anomaly_baselines/
│       ├── AT_solver.py
│       ├── ATmodelbatch.py
│       ├── README.md
│       ├── dataset_read_test.py
│       ├── datautils.py
│       ├── dcdetector_solver.py
│       ├── donut.py
│       ├── exp_anomaly_detection.py
│       ├── hello_test_evo.py
│       ├── lstm_vae.py
│       ├── metrics/
│       │   ├── AUC.py
│       │   ├── Matthews_correlation_coefficient.py
│       │   ├── affiliation/
│       │   │   ├── _affiliation_zone.py
│       │   │   ├── _integral_interval.py
│       │   │   ├── _single_ground_truth_event.py
│       │   │   ├── generics.py
│       │   │   └── metrics.py
│       │   ├── combine_all_scores.py
│       │   ├── customizable_f1_score.py
│       │   ├── evaluate_utils.py
│       │   ├── evaluator.py
│       │   ├── f1_score_f1_pa.py
│       │   ├── f1_series.py
│       │   ├── fc_score.py
│       │   ├── metrics.py
│       │   ├── precision_at_k.py
│       │   └── vus/
│       │       ├── analysis/
│       │       │   ├── robustness_eval.py
│       │       │   └── score_computation.py
│       │       ├── metrics.py
│       │       ├── models/
│       │       │   ├── distance.py
│       │       │   └── feature.py
│       │       └── utils/
│       │           ├── metrics.py
│       │           └── slidingWindows.py
│       ├── models/
│       │   ├── AnomalyTransformer.py
│       │   ├── DCdetector.py
│       │   ├── GPT4TS.py
│       │   ├── TimesNet.py
│       │   ├── __init__.py
│       │   ├── dilated_conv.py
│       │   ├── donut_model.py
│       │   ├── encoder.py
│       │   ├── losses.py
│       │   └── lstm_vae_model.py
│       ├── new_dataset_read_test.py
│       ├── scripts/
│       │   ├── at_zeta0.sh
│       │   ├── at_zeta1.sh
│       │   ├── generator_sh.py
│       │   ├── kpi.sh
│       │   ├── multi_at.sh
│       │   ├── ucr_at.sh
│       │   ├── ucr_at_delta_0.sh
│       │   ├── ucr_at_delta_1.sh
│       │   ├── ucr_at_delta_1_2.sh
│       │   ├── ucr_at_zeta0.sh
│       │   ├── uni_at.sh
│       │   └── yahoo.sh
│       ├── spot.py
│       ├── tasks/
│       │   ├── __init__.py
│       │   └── anomaly_detection.py
│       ├── train.py
│       ├── trainATbatch.py
│       ├── train_at_multi.py
│       ├── train_at_uni.py
│       ├── train_dcdetector.py
│       ├── train_dcdetector_nui.py
│       ├── train_donut.py
│       ├── train_donut_multi.py
│       ├── train_dspot.py
│       ├── train_dspot_multi.py
│       ├── train_gpt4ts.py
│       ├── train_gpt4ts_uni.py
│       ├── train_lstm_vae.py
│       ├── train_lstm_vae_multi.py
│       ├── train_spot.py
│       ├── train_spot_multi.py
│       ├── train_timesnet.py
│       ├── train_timesnet_uni.py
│       ├── train_ts2vec.py
│       ├── train_ts2vec_multi.py
│       ├── ts2vec.py
│       └── utils.py
├── ts_classification_methods/
│   ├── .gitignore
│   ├── README.md
│   ├── data/
│   │   ├── __init__.py
│   │   ├── dataloader.py
│   │   └── preprocessing.py
│   ├── environment.yaml
│   ├── gpt4ts/
│   │   ├── __init__.py
│   │   ├── gpt4ts_utils.py
│   │   ├── main_gpt4ts.py
│   │   ├── main_gpt4ts_ucr.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── embed.py
│   │   │   ├── gpt4ts.py
│   │   │   └── loss.py
│   │   └── scripts/
│   │       └── generator_gpt4ts.py
│   ├── model/
│   │   ├── __init__.py
│   │   ├── loss.py
│   │   └── tsm_model.py
│   ├── patchtst/
│   │   ├── __init__.py
│   │   ├── main_patchtst_iota.py
│   │   ├── main_patchtst_ucr.py
│   │   ├── mian_patchtst.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── attention.py
│   │   │   ├── basics.py
│   │   │   ├── heads.py
│   │   │   ├── patchTST.py
│   │   │   ├── pos_encoding.py
│   │   │   └── revin.py
│   │   ├── patch_mask.py
│   │   └── scripts/
│   │       └── generator_patchtst.py
│   ├── result_tsm/
│   │   ├── ChlorineConcentration/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── Crop/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── ECG5000/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── ElectricDevices/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── FordA/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── FordB/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── NonInvasiveFetalECGThorax1/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── NonInvasiveFetalECGThorax2/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── StarLightCurves/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── TwoPatterns/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryAll/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryX/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryY/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryZ/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   └── Wafer/
│   │       ├── classifier_weights.pt
│   │       ├── fcn_reconstruction_pretrain_weights.pt
│   │       ├── pretrain_weights.pt
│   │       └── rnn_reconstruction_pretrain_weights.pt
│   ├── scripts/
│   │   ├── dilated_single_norm.sh
│   │   ├── fcn_lin_set_norm.sh
│   │   ├── fcn_lin_single_norm.sh
│   │   ├── generator_dilated.py
│   │   ├── generator_fcn.py
│   │   ├── generator_pretrain_cls.py
│   │   └── transfer_pretrain_finetune.sh
│   ├── selftime_cls/
│   │   ├── __init__.py
│   │   ├── config/
│   │   │   ├── CricketX_config.json
│   │   │   ├── DodgerLoopDay_config.json
│   │   │   ├── InsectWingbeatSound_config.json
│   │   │   ├── MFPT_config.json
│   │   │   ├── UWaveGestureLibraryAll_config.json
│   │   │   └── XJTU_config.json
│   │   ├── dataloader/
│   │   │   ├── TSC_data_loader.py
│   │   │   ├── __init__.py
│   │   │   └── ucr2018.py
│   │   ├── dataprepare.py
│   │   ├── evaluation/
│   │   │   ├── __init__.py
│   │   │   └── eval_ssl.py
│   │   ├── model/
│   │   │   ├── __init__.py
│   │   │   ├── model_RelationalReasoning.py
│   │   │   └── model_backbone.py
│   │   ├── optim/
│   │   │   ├── __init__.py
│   │   │   ├── pretrain.py
│   │   │   ├── pytorchtools.py
│   │   │   └── train.py
│   │   ├── scripts/
│   │   │   └── ucr.sh
│   │   ├── train_ssl.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── augmentation.py
│   │       ├── datasets.py
│   │       ├── helper.py
│   │       ├── transforms.py
│   │       ├── utils.py
│   │       └── utils_plot.py
│   ├── test/
│   │   ├── __init__.py
│   │   ├── train_uea_test.py
│   │   └── uea_test.py
│   ├── timesnet/
│   │   ├── __init__.py
│   │   ├── main_timesnet.py
│   │   ├── main_timesnet_ucr.py
│   │   ├── models/
│   │   │   ├── Conv_Blocks.py
│   │   │   ├── Embed.py
│   │   │   ├── SelfAttention_Family.py
│   │   │   ├── TimesNet.py
│   │   │   ├── Transformer.py
│   │   │   ├── Transformer_EncDec.py
│   │   │   └── __init__.py
│   │   └── scripts/
│   │       └── generator_timesnet.py
│   ├── tloss_cls/
│   │   ├── default_hyperparameters.json
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   └── triplet_loss.py
│   │   ├── networks/
│   │   │   ├── __init__.py
│   │   │   ├── causal_cnn.py
│   │   │   └── lstm.py
│   │   ├── scikit_wrappers.py
│   │   ├── scripts/
│   │   │   ├── ucr.sh
│   │   │   └── uea.sh
│   │   ├── transfer_ucr.py
│   │   ├── ucr.py
│   │   ├── uea.py
│   │   └── utils.py
│   ├── train.py
│   ├── ts2vec_cls/
│   │   ├── __init__.py
│   │   ├── datautils.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── dilated_conv.py
│   │   │   ├── encoder.py
│   │   │   └── losses.py
│   │   ├── result/
│   │   │   └── ts2vec_tsm_train_val_b8_single_norm_0409_cls_result.csv
│   │   ├── scripts/
│   │   │   ├── generator_ts2vec.py
│   │   │   ├── generator_ts2vec_uea.py
│   │   │   ├── ts2vec_fcn_set_norm.sh
│   │   │   ├── ts2vec_fcn_single_norm.sh
│   │   │   ├── ts2vec_tsm_set_norm.sh
│   │   │   ├── ts2vec_tsm_single_norm.sh
│   │   │   └── ts2vec_tsm_uea.sh
│   │   ├── tasks/
│   │   │   ├── __init__.py
│   │   │   ├── _eval_protocols.py
│   │   │   └── classification.py
│   │   ├── train.py
│   │   ├── train_fcn.py
│   │   ├── train_tsm.py
│   │   ├── train_tsm_uea.py
│   │   ├── ts2vec.py
│   │   └── utils.py
│   ├── tsm_utils.py
│   ├── tst_cls/
│   │   ├── scripts/
│   │   │   ├── classification.sh
│   │   │   └── pretrain_finetune.sh
│   │   └── src/
│   │       ├── __init__.py
│   │       ├── dataprepare.py
│   │       ├── datasets/
│   │       │   ├── __init__.py
│   │       │   ├── data.py
│   │       │   ├── dataset.py
│   │       │   ├── datasplit.py
│   │       │   └── utils.py
│   │       ├── main.py
│   │       ├── models/
│   │       │   ├── __init__.py
│   │       │   ├── loss.py
│   │       │   └── ts_transformer.py
│   │       ├── optimizers.py
│   │       ├── options.py
│   │       ├── running.py
│   │       └── utils/
│   │           ├── __init__.py
│   │           ├── analysis.py
│   │           └── utils.py
│   ├── tstcc_cls/
│   │   ├── __init__.py
│   │   ├── config_files/
│   │   │   ├── ucr_Configs.py
│   │   │   └── uea_Configs.py
│   │   ├── dataloader/
│   │   │   ├── augmentations.py
│   │   │   └── dataloader.py
│   │   ├── main.py
│   │   ├── main_ucr.py
│   │   ├── main_uea.py
│   │   ├── models/
│   │   │   ├── TC.py
│   │   │   ├── attention.py
│   │   │   ├── loss.py
│   │   │   └── model.py
│   │   ├── result/
│   │   │   └── tstcc_0327_cls_result.csv
│   │   ├── scripts/
│   │   │   ├── fivefold_tstcc_ucr.sh
│   │   │   ├── fivefold_tstcc_uea.sh
│   │   │   ├── generator_ucr.py
│   │   │   ├── generator_uea.py
│   │   │   └── part_uea_tstcc.sh
│   │   ├── trainer/
│   │   │   └── trainer.py
│   │   └── utils.py
│   ├── visualize.py
│   └── visuals/
│       ├── GunPoint/
│       │   ├── classifier_NonInvasiveFetalECGThorax1_linear.pt
│       │   ├── direct_dilated_classifier.pt
│       │   ├── direct_dilated_encoder.pt
│       │   ├── direct_fcn_classifier.pt
│       │   ├── direct_fcn_encoder.pt
│       │   ├── encoder_NonInvasiveFetalECGThorax1_linear.pt
│       │   ├── supervised_classifier_ElectricDevices_linear.pt
│       │   ├── supervised_classifier_UWaveGestureLibraryX_linear.pt
│       │   ├── supervised_encoder_ElectricDevices_linear.pt
│       │   ├── supervised_encoder_UWaveGestureLibraryX_linear.pt
│       │   ├── unsupervised_classifier_UWaveGestureLibraryX_linear.pt
│       │   └── unsupervised_encoder_UWaveGestureLibraryX_linear.pt
│       ├── MixedShapesSmallTrain/
│       │   ├── direct_fcn_linear_encoder_weights.pt
│       │   ├── fcn_linear_encoder_finetune_weights_ElectricDevices.pt
│       │   └── fcn_linear_encoder_finetune_weights_UWaveGestureLibraryZ.pt
│       └── Wine/
│           ├── direct_fcn_encoder.pt
│           ├── direct_fcn_linear_encoder_weights.pt
│           ├── encoder_Crop_linear.pt
│           ├── encoder_NonInvasiveFetalECGThorax1_linear.pt
│           └── encoder_UWaveGestureLibraryZ_linear.pt
└── ts_forecasting_methods/
    ├── CoST/
    │   ├── CODEOWNERS
    │   ├── CODE_OF_CONDUCT.md
    │   ├── LICENSE.txt
    │   ├── README.md
    │   ├── SECURITY.md
    │   ├── cost.py
    │   ├── datasets/
    │   │   ├── PLACE_DATASETS_HERE
    │   │   ├── electricity.py
    │   │   └── m5.py
    │   ├── datautils.py
    │   ├── models/
    │   │   ├── __init__.py
    │   │   ├── dilated_conv.py
    │   │   └── encoder.py
    │   ├── requirements.txt
    │   ├── scripts/
    │   │   ├── ETT_CoST.sh
    │   │   ├── Electricity_CoST.sh
    │   │   ├── M5_CoST.sh
    │   │   └── Weather_CoST.sh
    │   ├── tasks/
    │   │   ├── __init__.py
    │   │   ├── _eval_protocols.py
    │   │   └── forecasting.py
    │   ├── train.py
    │   └── utils.py
    ├── Other_baselines/
    │   ├── README.md
    │   ├── __init__.py
    │   ├── data_config.yml
    │   ├── data_provider/
    │   │   ├── __init__.py
    │   │   ├── data_factory.py
    │   │   ├── data_factory_tempo.py
    │   │   ├── data_loader.py
    │   │   ├── data_loader_tempo.py
    │   │   ├── m4.py
    │   │   └── uea.py
    │   ├── exp/
    │   │   ├── __init__.py
    │   │   ├── exp_basic.py
    │   │   ├── exp_basic_patch.py
    │   │   ├── exp_long_term_forecasting.py
    │   │   ├── exp_main.py
    │   │   └── exp_short_term_forecasting.py
    │   ├── layers/
    │   │   ├── AutoCorrelation.py
    │   │   ├── Autoformer_EncDec.py
    │   │   ├── Conv_Blocks.py
    │   │   ├── Embed.py
    │   │   ├── PatchTST_backbone.py
    │   │   ├── PatchTST_layers.py
    │   │   ├── RevIN.py
    │   │   ├── SelfAttention_Family.py
    │   │   ├── Transformer_EncDec.py
    │   │   └── __init__.py
    │   ├── models/
    │   │   ├── Autoformer.py
    │   │   ├── DLinear.py
    │   │   ├── GPT4TS.py
    │   │   ├── Informer.py
    │   │   ├── LogTrans.py
    │   │   ├── PatchTST.py
    │   │   ├── PatchTST_raw.py
    │   │   ├── TCN.py
    │   │   ├── TEMPO.py
    │   │   ├── TimesNet.py
    │   │   ├── __init__.py
    │   │   └── iTransformer.py
    │   ├── train_autoformer.py
    │   ├── train_cost.py
    │   ├── train_dlinear.py
    │   ├── train_gpt4ts.py
    │   ├── train_informer.py
    │   ├── train_itransformer.py
    │   ├── train_logtrans.py
    │   ├── train_patchtst.py
    │   ├── train_tcn.py
    │   ├── train_tempo.py
    │   ├── train_timesnet.py
    │   ├── train_ts2vec.py
    │   └── utils/
    │       ├── ADFtest.py
    │       ├── __init__.py
    │       ├── augmentation.py
    │       ├── dtw.py
    │       ├── dtw_metric.py
    │       ├── losses.py
    │       ├── m4_summary.py
    │       ├── masking.py
    │       ├── metrics.py
    │       ├── print_args.py
    │       ├── rev_in.py
    │       ├── timefeatures.py
    │       ├── tools.py
    │       └── tools_tempo.py
    ├── README.md
    ├── SupervisedBaselines/
    │   ├── Dockerfile
    │   ├── LICENSE
    │   ├── Makefile
    │   ├── README.md
    │   ├── data_provider/
    │   │   ├── __init__.py
    │   │   ├── data_factory.py
    │   │   └── data_loader.py
    │   ├── environment.yml
    │   ├── exp/
    │   │   ├── __init__.py
    │   │   ├── exp_basic.py
    │   │   ├── exp_informer.py
    │   │   └── exp_main.py
    │   ├── layers/
    │   │   ├── AutoCorrelation.py
    │   │   ├── Autoformer_EncDec.py
    │   │   ├── Embed.py
    │   │   ├── SelfAttention_Family.py
    │   │   ├── Transformer_EncDec.py
    │   │   └── __init__.py
    │   ├── requirements.txt
    │   ├── run.py
    │   └── utils/
    │       ├── __init__.py
    │       ├── download_data.py
    │       ├── masking.py
    │       ├── metrics.py
    │       ├── timefeatures.py
    │       └── tools.py
    └── ts2vec/
        ├── README.md
        ├── __init__.py
        ├── data_provider/
        │   ├── __init__.py
        │   ├── data_factory.py
        │   ├── data_loader.py
        │   ├── m4.py
        │   ├── metrics.py
        │   ├── tools.py
        │   └── uea.py
        ├── datautils.py
        ├── forecasting_datasets_load_test.py
        ├── models/
        │   ├── __init__.py
        │   ├── dilated_conv.py
        │   ├── encoder.py
        │   └── losses.py
        ├── requirements.txt
        ├── scripts/
        │   ├── electricity.sh
        │   ├── ett.sh
        │   ├── kpi.sh
        │   ├── ucr.sh
        │   ├── uea.sh
        │   └── yahoo.sh
        ├── tasks/
        │   ├── __init__.py
        │   ├── _eval_protocols.py
        │   ├── anomaly_detection.py
        │   ├── classification.py
        │   └── forecasting.py
        ├── train.py
        ├── ts2vec.py
        └── utils.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .idea/.gitignore
================================================
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml


================================================
FILE: .idea/deployment.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="PublishConfigData" remoteFilesAllowedToDisappearOnAutoupload="false">
    <serverData>
      <paths name="hm@222.201.187.49:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@116.56.134.138:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@202.38.247.104:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@202.38.247.12:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@202.38.247.12:22 (2)">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@222.201.144.244:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@222.201.144.245:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@222.201.145.184:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@5560n6l068.oicp.vip:38981">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz@5560n6l068.oicp.vip:53987">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
      <paths name="lz_theta@222.201.145.184:22">
        <serverdata>
          <mappings>
            <mapping local="$PROJECT_DIR$" web="/" />
          </mappings>
        </serverdata>
      </paths>
    </serverData>
  </component>
</project>

================================================
FILE: .idea/inspectionProfiles/Project_Default.xml
================================================
<component name="InspectionProjectProfileManager">
  <profile version="1.0">
    <option name="myName" value="Project Default" />
    <inspection_tool class="PyInterpreterInspection" enabled="false" level="WARNING" enabled_by_default="false" />
    <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
      <option name="ignoredPackages">
        <value>
          <list size="4">
            <item index="0" class="java.lang.String" itemvalue="scienceplots" />
            <item index="1" class="java.lang.String" itemvalue="latextable" />
            <item index="2" class="java.lang.String" itemvalue="texttable" />
            <item index="3" class="java.lang.String" itemvalue="overrides" />
          </list>
        </value>
      </option>
    </inspection_tool>
  </profile>
</component>

================================================
FILE: .idea/inspectionProfiles/profiles_settings.xml
================================================
<component name="InspectionProjectProfileManager">
  <settings>
    <option name="USE_PROJECT_PROFILE" value="false" />
    <version value="1.0" />
  </settings>
</component>

================================================
FILE: .idea/modules.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="ProjectModuleManager">
    <modules>
      <module fileurl="file://$PROJECT_DIR$/.idea/time-series-ptms.iml" filepath="$PROJECT_DIR$/.idea/time-series-ptms.iml" />
    </modules>
  </component>
</project>

================================================
FILE: .idea/time-series-ptms.iml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
  <component name="NewModuleRootManager">
    <content url="file://$MODULE_DIR$" />
    <orderEntry type="inheritedJdk" />
    <orderEntry type="sourceFolder" forTests="false" />
  </component>
  <component name="PyDocumentationSettings">
    <option name="format" value="GOOGLE" />
    <option name="myDocStringFormat" value="Google" />
  </component>
</module>

================================================
FILE: .idea/vcs.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="VcsDirectoryMappings">
    <mapping directory="$PROJECT_DIR$" vcs="Git" />
  </component>
</project>

================================================
FILE: README.md
================================================
# [A Survey on Time-Series Pre-Trained Models](https://arxiv.org/pdf/2305.10716v2)

This is the training code for our paper *"[A Survey on Time-Series Pre-Trained Models](https://arxiv.org/pdf/2305.10716v2)"*, which has been accepted for publication in the IEEE Transactions on Knowledge and Data Engineering (TKDE-24).

## Overview

Time-Series Mining (TSM) is an important research area since it shows great potential in practical applications. Deep learning models that rely on massive labeled data have been utilized for TSM successfully. However, constructing a large-scale well-labeled dataset is difficult due to data annotation costs. 
Recently, pre-trained models have gradually attracted attention in the time series domain due to their remarkable performance in computer vision and natural language processing. In this survey, we provide a comprehensive review of Time-Series Pre-Trained Models (TS-PTMs), aiming to guide the understanding, applying, and studying TS-PTMs. 
Specifically, we first briefly introduce the typical deep learning models employed in TSM. Then, we give an overview of TS-PTMs according to the pre-training techniques. The main categories we explore include supervised, unsupervised, and self-supervised TS-PTMs.
Further, extensive experiments involving  27 methods, 434 datasets, and 679 transfer learning scenarios are conducted to analyze the advantages and disadvantages of transfer learning strategies, Transformer-based models, and representative TS-PTMs. Finally, we point out some potential directions of TS-PTMs for future work.

<p align="center">
    <img src="pictures/framework.jpg" width="1000" align="center">
</p>


## Datasets
The datasets used in this project are as follows:
### Time-Series Classification
* [128 UCR datasets](https://www.cs.ucr.edu/~eamonn/time_series_data_2018/UCRArchive_2018.zip)
* [30 UEA datasets](http://www.timeseriesclassification.com/Downloads/Archives/Multivariate2018_arff.zip)
* [SleepEEG dataset](https://www.physionet.org/content/sleep-edfx/1.0.0/) 
* [Epilepsy dataset](https://repositori.upf.edu/handle/10230/42894) 
* [FD-A and FD-B datasets](https://mb.uni-paderborn.de/en/kat/main-research/datacenter/bearing-datacenter/data-sets-and-download) 
* [HAR dataset](https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones) 
* [Gesture dataset](http://www.timeseriesclassification.com/description.php?Dataset=UWaveGestureLibrary) 
* [ECG dataset](https://physionet.org/content/challenge-2017/1.0.0/) 
* [EMG dataset](https://physionet.org/content/emgdb/1.0.0/) 

### Time-Series Forecasting
* [ETDataset (including 4 datasets)](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014)
* [Electricity](https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014)
* [Traffic](http://pems.dot.ca.gov)
* [Weather](https://www.bgc-jena.mpg.de/wetter)
* [Exchange](https://github.com/laiguokun/multivariate-time-series-data)
* [ILI](https://gis.cdc.gov/grasp/fluview/fluportaldashboard.html)

### Time-Series Anomaly Detection
* [Yahoo dataset](https://webscope.sandbox.yahoo.com/catalog.php?datatype=s&did=70) 
* [KPI dataset](http://test-10056879.file.myqcloud.com/10056879/test/20180524_78431960010324/KPI%E5%BC%82%E5%B8%B8%E6%A3%80%E6%B5%8B%E5%86%B3%E8%B5%9B%E6%95%B0%E6%8D%AE%E9%9B%86.zip)
* [250 UCR anomaly detection datasets](https://wu.renjie.im/research/anomaly-benchmarks-are-flawed/#ucr-time-series-anomaly-archiv) 
* [MSL dataset](https://github.com/khundman/telemanom) 
* [SMAP dataset](https://github.com/eBay/RANSynCoders) 
* [PSM dataset](https://github.com/khundman/telemanom) 
* [SMD dataset](https://github.com/NetManAIOps/OmniAnomaly) 
* [SWaT dataset](https://itrust.sutd.edu.sg/itrust-labs_datasets/dataset_info/#swat) 
* [NIPS-TS-SWAN dataset](https://github.com/datamllab/tods/tree/benchmark/benchmark) 
* [NIPS-TS-GECCO dataset](https://github.com/datamllab/tods/tree/benchmark/benchmark) 



## Pre-Trained Models on Time Series Classification
- [x] [FCN](https://github.com/cauchyturing/UCR_Time_Series_Classification_Deep_Learning_Baseline)
- [x] [FCN Encoder+CNN Decoder](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_classification_methods/model/tsm_model.py)
- [x] [FCN Encoder+RNN Decoder](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_classification_methods/model/tsm_model.py)
- [x] [TCN](https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries)
- [x] [Transformer](https://github.com/gzerveas/mvts_transformer)
- [x] [TST](https://github.com/gzerveas/mvts_transformer)
- [x] [T-Loss](https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries)
- [x] [SelfTime](https://github.com/haoyfan/SelfTime)
- [x] [TS-TCC](https://github.com/emadeldeen24/TS-TCC)
- [x] [TS2Vec](https://github.com/zhihanyue/ts2vec)
- [x] [TimesNet](https://github.com/thuml/TimesNet)
- [x] [PatchTST](https://github.com/yuqinie98/PatchTST)
- [x] [GPT4TS](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)

For details, please refer to [ts_classification_methods/README](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_classification_methods/README.md).

## Pre-Trained Models on Time Series Forecasting

- [x] [LogTrans](https://github.com/AIStream-Peelout/flow-forecast)
- [x] [TCN](https://github.com/locuslab/TCN)
- [x] [Informer](https://github.com/zhouhaoyi/Informer2020)
- [x] [Autoformer](https://github.com/thuml/autoformer)
- [x] [TS2Vec](https://github.com/zhihanyue/ts2vec)
- [x] [CoST](https://github.com/salesforce/CoST)
- [x] [TimesNet](https://github.com/thuml/TimesNet)
- [x] [PatchTST](https://github.com/yuqinie98/PatchTST)
- [x] [DLinear](https://github.com/vivva/DLinear)
- [x] [GPT4TS](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)
- [x] [TEMPO](https://github.com/DC-research/TEMPO)
- [x] [iTransformer](https://github.com/thuml/iTransformer)

For details, please refer to [ts_forecating_methods/README](https://github.com/qianlima-lab/transfer-to-transformer-tsm/blob/master/ts_forecasting_methods/README.md).

## Pre-Trained Models on Time Series Anomaly Detection

- [x] [SPOT](https://github.com/limjcst/ads-evt)
- [x] [DSPOT](https://github.com/limjcst/ads-evt)
- [x] [LSTM-VAE](https://github.com/SchindlerLiang/VAE-for-Anomaly-Detection)
- [x] [DONUT](https://github.com/NetManAIOps/donut)
- [x] [Spectral Residual (SR)](https://dl.acm.org/doi/10.1145/3292500.3330680)
- [x] [Anomaly Transformer (AT)](https://github.com/spencerbraun/anomaly_transformer_pytorch)
- [x] [TS2Vec](https://github.com/zhihanyue/ts2vec)
- [x] [TimesNet](https://github.com/thuml/TimesNet)
- [x] [GPT4TS](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)
- [x] [DCdetector](https://github.com/DAMO-DI-ML/KDD2023-DCdetector)

For details, please refer to [ts_anomaly_detection_methods/README](https://github.com/qianlima-lab/transfer-to-transformer-tsm/blob/master/ts_anomaly_detection_methods/README.md).

## Acknowledgments
We thank the anonymous reviewers for their helpful feedback. We thank Professor **Eamonn Keogh** from UCR and all the people who have contributed to the UCR\&UEA time series archives and other time series datasets. The authors would like to thank 
Professor **Garrison W. Cottrell** from UCSD, and **Chuxin Chen**, **Xidi Cai**, **Yu Chen**, and **Peitian Ma** from SCUT for the helpful suggestions. 

## Citation
If you use this code for your research, please cite our paper:
```
@article{ma2024survey,
  title={A survey on time-series pre-trained models},
  author={Ma, Qianli and Liu, Zhen and Zheng, Zhenjing and Huang, Ziyang and Zhu, Siying and Yu, Zhongzhong and Kwok, James T},
  journal={IEEE Transactions on Knowledge and Data Engineering},
  year={2024}
}
```



================================================
FILE: ts_anomaly_detection_methods/README.md
================================================
This is the time-series anomaly detection training code for our paper *"A Survey on Time-Series Pre-Trained Models"*

## Baselines

|  ID  |                            Method                            | Year |   Press   |                         Source Code                          |
| :--: | :----------------------------------------------------------: | :--: | :-------: | :----------------------------------------------------------: |
|  1   |  [SPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |
|  2   | [DSPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |
|  3   | [LSTM-VAE](https://ieeexplore.ieee.org/abstract/document/8279425) | 2018 | IEEE RA.L | [github_link](https://github.com/SchindlerLiang/VAE-for-Anomaly-Detection) |
|  4   | [DONUT](https://dl.acm.org/doi/abs/10.1145/3178876.3185996)  | 2018 |    WWW    |     [github_link](https://github.com/NetManAIOps/donut)      |
|  5   |  [Spectral Residual (SR)*](https://dl.acm.org/doi/abs/10.1145/3292500.3330680)   | 2019 |    KDD    |                              -                               |
|  6   |            [Anomaly Transformer (AT)](https://arxiv.org/abs/2110.02642)            | 2022 |   ICLR    | [github_link](https://github.com/spencerbraun/anomaly_transformer_pytorch) |
|  7   | [TS2Vec](https://www.aaai.org/AAAI22Papers/AAAI-8809.YueZ.pdf) | 2022 |   AAAI    |      [github_link](https://github.com/yuezhihan/ts2vec)      |
|  8   | [TimesNet](https://openreview.net/pdf?id=ju_Uqw384Oq) | 2023 |   ICLR    |      [github_link](https://github.com/thuml/TimesNet)      |
|  9   | [GPT4TS](https://arxiv.org/abs/2302.11939) | 2023 |   NeurIPS    |      [github_link](https://github.com/DAMO-DI-ML/NeurIPS2023-One-Fits-All)      |
|  10   | [DCdetector](https://arxiv.org/abs/2306.10347) | 2023 |   KDD    |      [github_link](https://github.com/DAMO-DI-ML/KDD2023-DCdetector)      |


For details, please refer to [ts_anomaly_detection_methods/other_anomaly_baselines/README](https://github.com/qianlima-lab/time-series-ptms/blob/master/ts_anomaly_detection_methods/other_anomaly_baselines/README.md)


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/ATmodelbatch.py
================================================
import math

import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
from utils import data_slice,split_N_pad
import time
from torch.utils.data import DataLoader, TensorDataset, SequentialSampler

if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.DoubleTensor')
else:
    torch.set_default_tensor_type('torch.DoubleTensor')

class AnomalyAttention(nn.Module):
    def __init__(self, N, d_model):
        super(AnomalyAttention, self).__init__()
        self.d_model = d_model
        self.N = N

        self.Wq = nn.Linear(d_model, d_model, bias=False)
        self.Wk = nn.Linear(d_model, d_model, bias=False)
        self.Wv = nn.Linear(d_model, d_model, bias=False)
        self.Ws = nn.Linear(d_model, 1, bias=False)
        self.Q = self.K = self.V = self.sigma = torch.zeros((N, d_model))
        self.P = torch.zeros((N, N))
        self.S = torch.zeros((N, N))

    def forward(self, x):
        #x :[batch,N,d_model]
        self.initialize(x)
        self.S = self.series_association()
        self.P = self.prior_association()
        Z = self.reconstruction()
        return Z

    def initialize(self, x):
        self.Q = self.Wq(x)
        self.K = self.Wk(x)
        self.V = self.Wv(x)
        self.sigma = self.Ws(x)

    @staticmethod
    def gaussian_kernel(mean, sigma):
        normalize = 1 / (math.sqrt(2 * torch.pi) * torch.abs(sigma))
        return normalize * torch.exp(-0.5 * (mean / sigma).pow(2))

    def prior_association(self):
        # qwe = torch.from_numpy(
        #     np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])
        # ).cuda
        qwe = torch.from_numpy(
            np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])
        )
        if torch.cuda.is_available():
            qwe = qwe.cuda()
        #原 gaussian: [batch,N,N]
        #因为是高斯所以这里行列求和都一样
        gaussian = self.gaussian_kernel(qwe.double(), self.sigma)
        gaussian /= gaussian.sum(dim=-1).view(-1,self.N,1)
        return gaussian

    def series_association(self):
        # 原 [N,N]
        # return F.softmax(self.Q @ self.K.T / math.sqrt(self.d_model), dim=0)
        # 现 [batch,N,N],是列方向的softmax?,应该是不对的,得改成行方向的softmax,根据下游的reconstruction来看
        return F.softmax(torch.matmul(self.Q,self.K.transpose(1,2)) / math.sqrt(self.d_model), dim=2)

    def reconstruction(self):
        return torch.matmul(self.S,self.V)

class AnomalyTransformerBlock(nn.Module):
    def __init__(self, N, d_model):
        super().__init__()
        self.N, self.d_model = N, d_model

        self.attention = AnomalyAttention(self.N, self.d_model)
        self.ln1 = nn.LayerNorm(self.d_model)
        self.ff = nn.Sequential(nn.Linear(self.d_model, self.d_model), nn.ReLU())
        self.ln2 = nn.LayerNorm(self.d_model)

    def forward(self, x):
        # x: [batch,N,d_model]
        x_identity = x
        x = self.attention(x)
        z = self.ln1(x + x_identity)
        z_identity = z
        z = self.ff(z)
        z = self.ln2(z + z_identity)
        
        # z: [batch,N,d_model]
        return z

class AnomalyTransformer(nn.Module):
    def __init__(self,batch_size, N, in_channel, d_model, layers, lambda_):
        super().__init__()
        self.batch_size = batch_size
        self.in_channel = in_channel
        self.N = N
        self.d_model = d_model

        self.input2hidden = nn.Linear(self.in_channel,self.d_model)
        self.hidden2output = nn.Linear(self.d_model,self.in_channel)
        self.blocks = nn.ModuleList(
            [AnomalyTransformerBlock(self.N, self.d_model) for _ in range(layers)]
        )
        self.output = None
        self.lambda_ = lambda_

        self.P_layers = []
        self.S_layers = []
    def to_string(self):
        return 'in_channel:%d_N:%d_dmodel:%d_' % (self.in_channel,self.N,self.d_model)

    def forward(self, x):
        
        # x: [batch,N,in_channel]
        self.P_layers = []
        self.S_layers = []
        x = self.input2hidden(x)
        for idx, block in enumerate(self.blocks):
            x = block(x)
            # x: [batch,N,d_model]
            self.P_layers.append(block.attention.P)
            self.S_layers.append(block.attention.S)
        self.output = self.hidden2output(x)
        # output: [batch,N,in_channel]
        return self.output
    
    # def layer_association_discrepancy(self, Pl, Sl, x):
    #     rowwise_kl = lambda row: (
    #         F.kl_div(Pl[row, :], Sl[row, :]) + F.kl_div(Sl[row, :], Pl[row, :])
    #     )
    #     ad_vector = torch.concat(
    #         [rowwise_kl(row).unsqueeze(0) for row in range(Pl.shape[0])]
    #     )
    #     return ad_vector
    # ad_vector: [N]
    
    # def rowwise_kl (self,Pl,Sl,idx,row):
    #     return F.kl_div(Pl[idx,row, :], Sl[idx,row, :]) + F.kl_div(Sl[idx,row, :], Pl[idx,row, :])
    # def layer_association_discrepancy(self, Pl, Sl, x):
        
    #     wholetmp=[]
    #     for idx in range(Pl.shape[0]):
    #         rowtmp=[]
    #         for row in range(Pl.shape[1]):
    #             rowtmp.append(self.rowwise_kl(Pl,Sl,idx,row).unsqueeze(0))
    #         wholetmp.append(torch.cat(rowtmp))
                
    #     ad_vector = torch.cat( 
    #         wholetmp
    #     ).reshape([-1,Pl.shape[1]])
    #     #ad_vector: [batch,N]
    #     return ad_vector
    
    def rowwise_kl(self, row, Pl, Sl, eps=1e-4):
        Pl_r = Pl[:,row,:]
        Sl_r = Sl[:,row,:]
        Pl_r = (Pl_r+ eps) / torch.sum(Pl_r + eps, dim=-1, keepdims=True)
        Sl_r = (Sl_r + eps) / torch.sum(Sl_r+ eps, dim=-1, keepdims=True)
        '''TODO:改这个函数'''
        ret = torch.sum( 
            F.kl_div( torch.log(Pl_r), Sl_r, reduction='none') + F.kl_div( torch.log(Sl_r), Pl_r, reduction='none'), dim=1
         )
        return ret
    def layer_association_discrepancy(self, Pl, Sl, x):
        ad_vector = torch.concat(
            [self.rowwise_kl(row, Pl, Sl).unsqueeze(1) for row in range(Pl.shape[1])], dim=1
        )
        return ad_vector

    def association_discrepancy(self, P_list, S_list, x):
        
        ret = (1 / len(P_list)) * sum(
            [
                self.layer_association_discrepancy(P, S, x)
                for P, S in zip(P_list, S_list)
            ]
        )
        # ret: [batch,N]
        return ret

    def loss_function(self, x_hat, P_list, S_list, lambda_, x):
        #P_list: [layers,batch,N,N]
        #S_list: [layers,batch,N,N]
        frob_norm = torch.linalg.matrix_norm(x_hat - x, ord="fro")
        ret = frob_norm - (
            lambda_
            * torch.linalg.norm(self.association_discrepancy(P_list, S_list, x),dim=1, ord=1)
        )
        return ret.mean()

    def min_loss(self, x):
        
        P_list = self.P_layers
        S_list = [S.detach() for S in self.S_layers]
        # S_list = self.S_layers
        lambda_ = -self.lambda_
        return self.loss_function(self.output, P_list, S_list, lambda_, x)

    def max_loss(self, x):
        P_list = [P.detach() for P in self.P_layers]
        # P_list = self.P_layers
        S_list = self.S_layers
        lambda_ = self.lambda_
        return self.loss_function(self.output, P_list, S_list, lambda_, x)
    
    def anomaly_score_whole(self, x):
        # x:[length,dim]
        x = np.array(split_N_pad(x.reshape([-1,1]),self.N))
        '''TODO:测试data_slice'''
        data = torch.from_numpy(x)
        if torch.cuda.is_available():
            data = data.cuda()
        dataset = TensorDataset(data)
        dataloader = DataLoader(dataset, batch_size=min(self.batch_size, len(dataset)), shuffle=False, drop_last=False)
        scores=[]
        for step, batch in enumerate(dataloader):
            batch=batch[0]
            score = self.anomaly_score(batch)
            scores.append(score)
        return torch.cat(scores).flatten()
            
    

    def anomaly_score(self, x):
        # 原 x:[N,in_channel]
        output = self.forward(x)
        tmp = -self.association_discrepancy(self.P_layers, self.S_layers, x)
        ad = F.softmax(
            tmp, dim=0
        )
        assert ad.shape[1] == self.N

        # norm = torch.tensor(
        #     [
        #         torch.linalg.norm(x[i, :] - self.output[i, :], ord=2)
        #         for i in range(self.N)
        #     ]
        # )
        norm = []
        for idx in range(x.shape[0]):
            tmp = torch.tensor(
                [
                    torch.linalg.norm(x[idx,i, :] - self.output[idx,i, :], ord=2)
                    for i in range(self.N)
                ]
            )
            norm.append(tmp)
        norm = torch.cat(norm).reshape([-1,self.N])
        assert norm.shape[1] == self.N
        score = torch.mul(ad, norm)
        return score


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/datautils.py
================================================
import os
import numpy as np
import pandas as pd
import math
import random
from datetime import datetime
import pickle
from utils import pkl_load, pad_nan_to_target,data_slice,pad_zero_to_target
from scipy.io.arff import loadarff
from sklearn.preprocessing import StandardScaler, MinMaxScaler

def load_UCR(dataset):
    train_file = os.path.join('datasets/UCR', dataset, dataset + "_TRAIN.tsv")
    test_file = os.path.join('datasets/UCR', dataset, dataset + "_TEST.tsv")
    train_df = pd.read_csv(train_file, sep='\t', header=None)
    test_df = pd.read_csv(test_file, sep='\t', header=None)
    train_array = np.array(train_df)
    test_array = np.array(test_df)

    # Move the labels to {0, ..., L-1}
    labels = np.unique(train_array[:, 0])
    transform = {}
    for i, l in enumerate(labels):
        transform[l] = i

    train = train_array[:, 1:].astype(np.float64)
    train_labels = np.vectorize(transform.get)(train_array[:, 0])
    test = test_array[:, 1:].astype(np.float64)
    test_labels = np.vectorize(transform.get)(test_array[:, 0])

    # Normalization for non-normalized datasets
    # To keep the amplitude information, we do not normalize values over
    # individual time series, but on the whole dataset
    if dataset not in [
        'AllGestureWiimoteX',
        'AllGestureWiimoteY',
        'AllGestureWiimoteZ',
        'BME',
        'Chinatown',
        'Crop',
        'EOGHorizontalSignal',
        'EOGVerticalSignal',
        'Fungi',
        'GestureMidAirD1',
        'GestureMidAirD2',
        'GestureMidAirD3',
        'GesturePebbleZ1',
        'GesturePebbleZ2',
        'GunPointAgeSpan',
        'GunPointMaleVersusFemale',
        'GunPointOldVersusYoung',
        'HouseTwenty',
        'InsectEPGRegularTrain',
        'InsectEPGSmallTrain',
        'MelbournePedestrian',
        'PickupGestureWiimoteZ',
        'PigAirwayPressure',
        'PigArtPressure',
        'PigCVP',
        'PLAID',
        'PowerCons',
        'Rock',
        'SemgHandGenderCh2',
        'SemgHandMovementCh2',
        'SemgHandSubjectCh2',
        'ShakeGestureWiimoteZ',
        'SmoothSubspace',
        'UMD'
    ]:
        return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels
    
    mean = np.nanmean(train)
    std = np.nanstd(train)
    train = (train - mean) / std
    test = (test - mean) / std
    return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels

def load_anomaly(name):
    res = pkl_load(f'datasets/{name}.pkl')
    return res['all_train_data'], res['all_train_labels'], res['all_train_timestamps'], \
           res['all_test_data'],  res['all_test_labels'],  res['all_test_timestamps'], \
           res['delay']


def gen_ano_train_data(all_train_data):
    maxl = np.max([ len(all_train_data[k]) for k in all_train_data ])
    pretrain_data = []
    for k in all_train_data:
        train_data = pad_zero_to_target(all_train_data[k], maxl, axis=0)
        pretrain_data.append(train_data)
    pretrain_data = np.expand_dims(np.stack(pretrain_data), 2)
    return pretrain_data

if __name__ == '__main__':
    dataset='yahoo'
    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = load_anomaly(dataset)
    train_data = gen_ano_train_data(all_train_data)
    train_data_s = data_slice(train_data, 100)

================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/models/__init__.py
================================================
from .encoder import TSEncoder


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/models/anomaly_transformer_model.py
================================================
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class AnomalyAttention(nn.Module):
    def __init__(self, N, d_model):
        super(AnomalyAttention, self).__init__()
        self.d_model = d_model
        self.N = N

        self.Wq = nn.Linear(d_model, d_model, bias=False)
        self.Wk = nn.Linear(d_model, d_model, bias=False)
        self.Wv = nn.Linear(d_model, d_model, bias=False)
        self.Ws = nn.Linear(d_model, 1, bias=False)

        self.Q = torch.zeros((N, d_model))
        self.K = torch.zeros((N, d_model))
        self.V = torch.zeros((N, d_model))
        self.sigma = torch.zeros((N, 1))

        self.P = torch.zeros((N, N))
        self.S = torch.zeros((N, N))

    def forward(self, x):
        # x: N x d_model

        self.initialize(x)
        self.P = self.prior_association()
        self.S = self.series_association()
        Z = self.reconstruction() # N x d_model

        return Z

    def initialize(self, x):
        self.Q = self.Wq(x)
        self.K = self.Wk(x)
        self.V = self.Wv(x)
        self.sigma = self.Ws(x)

    @staticmethod
    def gaussian_kernel(mean, sigma):
        normalize = 1 / (math.sqrt(2 * torch.pi) * sigma)
        return normalize * torch.exp(-0.5 * (mean / sigma).pow(2))

    def prior_association(self):
        p = torch.from_numpy(
            np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])
        )
        gaussian = self.gaussian_kernel(p.float(), self.sigma)
        gaussian /= gaussian.sum(dim=-1).view(-1, 1)

        return gaussian

    def series_association(self):
        return F.softmax((self.Q @ self.K.T) / math.sqrt(self.d_model), dim=0)

    def reconstruction(self):
        return self.S @ self.V


class AnomalyTransformerBlock(nn.Module):
    def __init__(self, N, d_model):
        super().__init__()
        self.N, self.d_model = N, d_model

        self.attention = AnomalyAttention(self.N, self.d_model)
        self.ln1 = nn.LayerNorm(self.d_model)
        self.ff = nn.Sequential(nn.Linear(self.d_model, self.d_model), nn.ReLU())
        self.ln2 = nn.LayerNorm(self.d_model)

    def forward(self, x):
        x_identity = x
        x = self.attention(x)
        z = self.ln1(x + x_identity)

        z_identity = z
        z = self.ff(z)
        z = self.ln2(z + z_identity)

        return z


class AnomalyTransformer(nn.Module):
    def __init__(self, N, in_channel, d_model, layers, lambda_):
        super().__init__()
        self.in_channel = in_channel
        self.N = N
        self.d_model = d_model

        self.input2hidden = nn.Linear(self.in_channel, self.d_model)
        self.blocks = nn.ModuleList(
            [AnomalyTransformerBlock(self.N, self.d_model) for _ in range(layers)]
        )
        self.output = None
        self.lambda_ = lambda_

        self.P_layers = []
        self.S_layers = []

    def forward(self, x):
        x = self.input2hidden(x)

        for idx, block in enumerate(self.blocks):
            x = block(x)
            self.P_layers.append(block.attention.P)
            self.S_layers.append(block.attention.S)

        self.output = x # N x d_model
        return x 

    def layer_association_discrepancy(self, Pl, Sl, x):
        rowwise_kl = lambda row: (
            F.kl_div(Pl[row, :], Sl[row, :]) + F.kl_div(Sl[row, :], Pl[row, :])
        )
        ad_vector = torch.concat(
            [rowwise_kl(row).unsqueeze(0) for row in range(Pl.shape[0])]
        )
        return ad_vector

    def association_discrepancy(self, P_list, S_list, x):

        return (1 / len(P_list)) * sum(
            [
                self.layer_association_discrepancy(P, S, x)
                for P, S in zip(P_list, S_list)
            ]
        )

    def loss_function(self, x_hat, P_list, S_list, lambda_, x):
        frob_norm = torch.linalg.matrix_norm(x_hat - x, ord="fro")
        return frob_norm - (
            lambda_
            * torch.linalg.norm(self.association_discrepancy(P_list, S_list, x), ord=1)
        )

    def min_loss(self, x):
        P_list = self.P_layers
        S_list = [S.detach() for S in self.S_layers]
        lambda_ = -self.lambda_
        return self.loss_function(self.output, P_list, S_list, lambda_, x)

    def max_loss(self, x):
        P_list = [P.detach() for P in self.P_layers]
        S_list = self.S_layers
        lambda_ = self.lambda_
        return self.loss_function(self.output, P_list, S_list, lambda_, x)

    def anomaly_score(self, x):
        ad = F.softmax(
            -self.association_discrepancy(self.P_layers, self.S_layers, x), dim=0
        )

        assert ad.shape[0] == self.N

        norm = torch.tensor(
            [
                torch.linalg.norm(x[i, :] - self.output[i, :], ord=2)
                for i in range(self.N)
            ]
        )

        assert norm.shape[0] == self.N

        score = torch.mul(ad, norm)

        return score

================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/models/dilated_conv.py
================================================
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np

class SamePadConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1):
        super().__init__()
        self.receptive_field = (kernel_size - 1) * dilation + 1
        padding = self.receptive_field // 2
        self.conv = nn.Conv1d(
            in_channels, out_channels, kernel_size,
            padding=padding,
            dilation=dilation,
            groups=groups
        )
        self.remove = 1 if self.receptive_field % 2 == 0 else 0
        
    def forward(self, x):
        out = self.conv(x)
        if self.remove > 0:
            out = out[:, :, : -self.remove]
        return out
    
class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, dilation, final=False):
        super().__init__()
        self.conv1 = SamePadConv(in_channels, out_channels, kernel_size, dilation=dilation)
        self.conv2 = SamePadConv(out_channels, out_channels, kernel_size, dilation=dilation)
        self.projector = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels or final else None
    
    def forward(self, x):
        residual = x if self.projector is None else self.projector(x)
        x = F.gelu(x)
        x = self.conv1(x)
        x = F.gelu(x)
        x = self.conv2(x)
        return x + residual

class DilatedConvEncoder(nn.Module):
    def __init__(self, in_channels, channels, kernel_size):
        super().__init__()
        self.net = nn.Sequential(*[
            ConvBlock(
                channels[i-1] if i > 0 else in_channels,
                channels[i],
                kernel_size=kernel_size,
                dilation=2**i,
                final=(i == len(channels)-1)
            )
            for i in range(len(channels))
        ])
        
    def forward(self, x):
        return self.net(x)


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/models/encoder.py
================================================
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from .dilated_conv import DilatedConvEncoder

def generate_continuous_mask(B, T, n=5, l=0.1):
    res = torch.full((B, T), True, dtype=torch.bool)
    if isinstance(n, float):
        n = int(n * T)
    n = max(min(n, T // 2), 1)
    
    if isinstance(l, float):
        l = int(l * T)
    l = max(l, 1)
    
    for i in range(B):
        for _ in range(n):
            t = np.random.randint(T-l+1)
            res[i, t:t+l] = False
    return res

def generate_binomial_mask(B, T, p=0.5):
    return torch.from_numpy(np.random.binomial(1, p, size=(B, T))).to(torch.bool)

class TSEncoder(nn.Module):
    def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, mask_mode='binomial'):
        super().__init__()
        self.input_dims = input_dims
        self.output_dims = output_dims
        self.hidden_dims = hidden_dims
        self.mask_mode = mask_mode
        self.input_fc = nn.Linear(input_dims, hidden_dims)
        self.feature_extractor = DilatedConvEncoder(
            hidden_dims,
            [hidden_dims] * depth + [output_dims],
            kernel_size=3
        )
        self.repr_dropout = nn.Dropout(p=0.1)
        
    def forward(self, x, mask=None):  # x: B x T x input_dims
        nan_mask = ~x.isnan().any(axis=-1)
        x[~nan_mask] = 0
        x = self.input_fc(x)  # B x T x Ch
        
        # generate & apply mask
        if mask is None:
            if self.training:
                mask = self.mask_mode
            else:
                mask = 'all_true'
        
        if mask == 'binomial':
            mask = generate_binomial_mask(x.size(0), x.size(1)).to(x.device)
        elif mask == 'continuous':
            mask = generate_continuous_mask(x.size(0), x.size(1)).to(x.device)
        elif mask == 'all_true':
            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)
        elif mask == 'all_false':
            mask = x.new_full((x.size(0), x.size(1)), False, dtype=torch.bool)
        elif mask == 'mask_last':
            mask = x.new_full((x.size(0), x.size(1)), True, dtype=torch.bool)
            mask[:, -1] = False
        
        mask &= nan_mask
        x[~mask] = 0
        
        # conv encoder
        x = x.transpose(1, 2)  # B x Ch x T
        x = self.repr_dropout(self.feature_extractor(x))  # B x Co x T
        x = x.transpose(1, 2)  # B x T x Co
        
        return x
        

================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/models/losses.py
================================================
import torch
from torch import nn
import torch.nn.functional as F

def hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):
    loss = torch.tensor(0., device=z1.device)
    d = 0
    while z1.size(1) > 1:
        if alpha != 0:
            loss += alpha * instance_contrastive_loss(z1, z2)
        if d >= temporal_unit:
            if 1 - alpha != 0:
                loss += (1 - alpha) * temporal_contrastive_loss(z1, z2)
        d += 1
        z1 = F.max_pool1d(z1.transpose(1, 2), kernel_size=2).transpose(1, 2)
        z2 = F.max_pool1d(z2.transpose(1, 2), kernel_size=2).transpose(1, 2)
    if z1.size(1) == 1:
        if alpha != 0:
            loss += alpha * instance_contrastive_loss(z1, z2)
        d += 1
    return loss / d

def instance_contrastive_loss(z1, z2):
    B, T = z1.size(0), z1.size(1)
    if B == 1:
        return z1.new_tensor(0.)
    z = torch.cat([z1, z2], dim=0)  # 2B x T x C
    z = z.transpose(0, 1)  # T x 2B x C
    sim = torch.matmul(z, z.transpose(1, 2))  # T x 2B x 2B
    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # T x 2B x (2B-1)
    logits += torch.triu(sim, diagonal=1)[:, :, 1:]
    logits = -F.log_softmax(logits, dim=-1)
    
    i = torch.arange(B, device=z1.device)
    loss = (logits[:, i, B + i - 1].mean() + logits[:, B + i, i].mean()) / 2
    return loss

def temporal_contrastive_loss(z1, z2):
    B, T = z1.size(0), z1.size(1)
    if T == 1:
        return z1.new_tensor(0.)
    z = torch.cat([z1, z2], dim=1)  # B x 2T x C
    sim = torch.matmul(z, z.transpose(1, 2))  # B x 2T x 2T
    logits = torch.tril(sim, diagonal=-1)[:, :, :-1]    # B x 2T x (2T-1)
    logits += torch.triu(sim, diagonal=1)[:, :, 1:]
    logits = -F.log_softmax(logits, dim=-1)
    
    t = torch.arange(T, device=z1.device)
    loss = (logits[:, t, T + t - 1].mean() + logits[:, T + t, t].mean()) / 2
    return loss


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/tasks/__init__.py
================================================
from .anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart,np_shift,eval_ad_result


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/tasks/anomaly_detection.py
================================================
import numpy as np
import time
from sklearn.metrics import f1_score, precision_score, recall_score
import bottleneck as bn
import pdb

# consider delay threshold and missing segments
def get_range_proba(predict, label, delay=7):
    splits = np.where(label[1:] != label[:-1])[0] + 1
    is_anomaly = label[0] == 1
    new_predict = np.array(predict)
    pos = 0

    for sp in splits:
        if is_anomaly:
            if 1 in predict[pos:min(pos + delay + 1, sp)]:
                new_predict[pos: sp] = 1
            else:
                new_predict[pos: sp] = 0
        is_anomaly = not is_anomaly
        pos = sp
    sp = len(label)

    if is_anomaly:  # anomaly in the end
        if 1 in predict[pos: min(pos + delay + 1, sp)]:
            new_predict[pos: sp] = 1
        else:
            new_predict[pos: sp] = 0

    return new_predict


# set missing = 0
def reconstruct_label(timestamp, label):
    timestamp = np.asarray(timestamp, np.int64)
    index = np.argsort(timestamp)

    timestamp_sorted = np.asarray(timestamp[index])
    interval = np.min(np.diff(timestamp_sorted))

    label = np.asarray(label, np.int64)
    label = np.asarray(label[index])

    idx = (timestamp_sorted - timestamp_sorted[0]) // interval

    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)
    new_label[idx] = label

    return new_label


def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay):
    labels = []
    pred = []
    for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):
        assert test_pred.shape == test_labels.shape == test_timestamps.shape
        test_labels = reconstruct_label(test_timestamps, test_labels)
        test_pred = reconstruct_label(test_timestamps, test_pred)
        test_pred = get_range_proba(test_pred, test_labels, delay)
        labels.append(test_labels)
        pred.append(test_pred)
    labels = np.concatenate(labels)
    pred = np.concatenate(pred)
    return {
        'f1': f1_score(labels, pred),
        'precision': precision_score(labels, pred),
        'recall': recall_score(labels, pred)
    }


def np_shift(arr, num, fill_value=np.nan):
    result = np.empty_like(arr)
    if num > 0:
        result[:num] = fill_value
        result[num:] = arr[:-num]
    elif num < 0:
        result[num:] = fill_value
        result[:num] = arr[-num:]
    else:
        result[:] = arr
    return result


def eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):
    t = time.time()
    
    all_train_repr = {}
    all_test_repr = {}
    all_train_repr_wom = {}
    all_test_repr_wom = {}
    for k in all_train_data:
        print(k)
        train_data = all_train_data[k]
        test_data = all_test_data[k]

        full_repr = model.encode(
            np.concatenate([train_data, test_data]).reshape(1, -1, 1),
            mask='mask_last',
            casual=True,
            sliding_length=1,
            sliding_padding=200,
            batch_size=256
        ).squeeze()
        all_train_repr[k] = full_repr[:len(train_data)] # (n_timestamps, repr-dims)
        all_test_repr[k] = full_repr[len(train_data):] # (n_timestamps, repr-dims)

        full_repr_wom = model.encode(
            np.concatenate([train_data, test_data]).reshape(1, -1, 1),
            casual=True,
            sliding_length=1,
            sliding_padding=200,
            batch_size=256
        ).squeeze()
        all_train_repr_wom[k] = full_repr_wom[:len(train_data)] # (n_timestamps, repr-dims)
        all_test_repr_wom[k] = full_repr_wom[len(train_data):] # (n_timestamps, repr-dims)

        # print(np.shape(all_train_repr[k]))
        # print(np.shape(all_test_repr[k]))
        # print(np.shape(all_train_repr_wom[k]))
        # print(np.shape(all_test_repr_wom[k]))
        # print("#####################")
        # raise Exception('my personal exception!')
        
    pdb.set_trace()
    res_log = []
    labels_log = []
    timestamps_log = []
    for k in all_train_data:
        train_data = all_train_data[k]
        train_labels = all_train_labels[k]
        train_timestamps = all_train_timestamps[k]

        test_data = all_test_data[k]
        test_labels = all_test_labels[k]
        test_timestamps = all_test_timestamps[k]

        train_err = np.abs(all_train_repr_wom[k] - all_train_repr[k]).sum(axis=1)
        test_err = np.abs(all_test_repr_wom[k] - all_test_repr[k]).sum(axis=1)

        ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)
        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]
        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]
        train_err_adj = train_err_adj[22:]

        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)
        test_res = (test_err_adj > thr) * 1

        for i in range(len(test_res)):
            if i >= delay and test_res[i-delay:i].sum() >= 1:
                test_res[i] = 0

        res_log.append(test_res)
        labels_log.append(test_labels)
        timestamps_log.append(test_timestamps)
    t = time.time() - t
    pdb.set_trace()
    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)
    eval_res['infer_time'] = t
    return res_log, eval_res


def eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):
    t = time.time()
    
    all_data = {}
    all_repr = {}
    all_repr_wom = {}
    for k in all_train_data:
        all_data[k] = np.concatenate([all_train_data[k], all_test_data[k]])
        all_repr[k] = model.encode(
            all_data[k].reshape(1, -1, 1),
            mask='mask_last',
            casual=True,
            sliding_length=1,
            sliding_padding=200,
            batch_size=256
        ).squeeze()
        all_repr_wom[k] = model.encode(
            all_data[k].reshape(1, -1, 1),
            casual=True,
            sliding_length=1,
            sliding_padding=200,
            batch_size=256
        ).squeeze()
        
    res_log = []
    labels_log = []
    timestamps_log = []
    for k in all_data:
        data = all_data[k]
        labels = np.concatenate([all_train_labels[k], all_test_labels[k]])
        timestamps = np.concatenate([all_train_timestamps[k], all_test_timestamps[k]])
        
        err = np.abs(all_repr_wom[k] - all_repr[k]).sum(axis=1)
        ma = np_shift(bn.move_mean(err, 21), 1)
        err_adj = (err - ma) / ma
        
        MIN_WINDOW = len(data) // 10
        thr = bn.move_mean(err_adj, len(err_adj), MIN_WINDOW) + 4 * bn.move_std(err_adj, len(err_adj), MIN_WINDOW)
        res = (err_adj > thr) * 1
        
        for i in range(len(res)):
            if i >= delay and res[i-delay:i].sum() >= 1:
                res[i] = 0

        res_log.append(res[MIN_WINDOW:])
        labels_log.append(labels[MIN_WINDOW:])
        timestamps_log.append(timestamps[MIN_WINDOW:])
    t = time.time() - t
    
    eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay)
    eval_res['infer_time'] = t
    return res_log, eval_res



================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/train.py
================================================
import torch
import numpy as np
import argparse
import os
import sys
import time
import datetime
from ts2vec import TS2Vec
import tasks
import pdb
import datautils
from utils import init_dl_program, name_with_datetime, pkl_save, data_dropout

def save_checkpoint_callback(
    save_every=1,
    unit='epoch'
):
    assert unit in ('epoch', 'iter')
    def callback(model, loss):
        n = model.n_epochs if unit == 'epoch' else model.n_iters
        if n % save_every == 0:
            model.save(f'{run_dir}/model_{n}.pkl')
    return callback

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset', help='The dataset name')
    parser.add_argument('run_name', help='The folder name used to save model, output and evaluation metrics. This can be set to any word')
    parser.add_argument('--loader', type=str, required=False, default= 'anomaly', help='The data loader used to load the experimental data. This can be set to anomaly or anomaly_coldstart')
    parser.add_argument('--gpu', type=int, default=0, help='The gpu no. used for training and inference (defaults to 0)')
    parser.add_argument('--batch-size', type=int, default=8, help='The batch size (defaults to 8)')
    parser.add_argument('--lr', type=float, default=0.001, help='The learning rate (defaults to 0.001)')
    parser.add_argument('--repr-dims', type=int, default=320, help='The representation dimension (defaults to 320)')
    parser.add_argument('--max-train-length', type=int, default=3000, help='For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length> (defaults to 3000)')
    parser.add_argument('--iters', type=int, default=10, help='The number of iterations')
    parser.add_argument('--epochs', type=int, default=100, help='The number of epochs')
    parser.add_argument('--save-every', type=int, default=1, help='Save the checkpoint every <save_every> iterations/epochs')
    parser.add_argument('--seed', type=int, default=123, help='The random seed')
    parser.add_argument('--max-threads', type=int, default=4, help='The maximum allowed number of threads used by this process')
    parser.add_argument('--eval', type=bool, default=False, help='Whether to perform evaluation after training')
    parser.add_argument('--irregular', type=float, default=0, help='The ratio of missing observations (defaults to 0)')
    args = parser.parse_args()
    
    print("Dataset:", args.dataset)
    print("Arguments:", str(args))
    
    device = init_dl_program(args.gpu, seed=args.seed, max_threads=args.max_threads)
    
    print('Loading data... ', end='')
    if args.loader == 'anomaly':
        task_type = 'anomaly_detection'
        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)
        train_data = datautils.gen_ano_train_data(all_train_data)
        
    elif args.loader == 'anomaly_coldstart':
        task_type = 'anomaly_detection_coldstart'
        all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(args.dataset)
        train_data, _, _, _ = datautils.load_UCR('FordA')
        
    else:
        raise ValueError(f"Unknown loader {args.loader}.")
        
        
    if args.irregular > 0:
        raise ValueError(f"Task type {task_type} is not supported when irregular>0.")
    print('done')
    
    config = dict(
        batch_size=args.batch_size,
        lr=args.lr,
        output_dims=args.repr_dims,
        max_train_length=args.max_train_length
    )
    
    if args.save_every is not None:
        unit = 'epoch' if args.epochs is not None else 'iter'
        config[f'after_{unit}_callback'] = save_checkpoint_callback(args.save_every, unit)

    run_dir = 'training/' + args.dataset + '__' + name_with_datetime(args.run_name)
    os.makedirs(run_dir, exist_ok=True)
    
    t = time.time()
    
    model = TS2Vec(
        input_dims=train_data.shape[-1],
        device=device,
        **config
    )
    # loss_log = model.fit(
    #     train_data,
    #     n_epochs=args.epochs,
    #     n_iters=args.iters,
    #     verbose=True
    # )
    model.save(f'{run_dir}/model.pkl')

    t = time.time() - t
    print(f"\nTraining time: {datetime.timedelta(seconds=t)}")
    print("Training time(seconds): ", t)

    if args.eval:
        if task_type == 'anomaly_detection':
            out, eval_res = tasks.eval_anomaly_detection(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)
        elif task_type == 'anomaly_detection_coldstart':
            out, eval_res = tasks.eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)
        else:
            assert False
        pkl_save(f'{run_dir}/out.pkl', out)
        pkl_save(f'{run_dir}/eval_res.pkl', eval_res)
        print('Evaluation result:', eval_res)

    print("Finished.")


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/trainATbatch.py
================================================
import logging

import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset, SequentialSampler
from utils import data_slice
import datautils
import pdb
from transformers.optimization import AdamW, get_cosine_schedule_with_warmup
from sklearn.metrics import f1_score
import tasks
from ATmodelbatch import AnomalyTransformer
import time
import bottleneck as bn
import argparse
import os
import pickle

if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.DoubleTensor')
else:
    torch.set_default_tensor_type('torch.DoubleTensor')

logger = logging.getLogger(__name__)


class Config:
	
    window_size=100
    shuffle=True
    epochs=500
    warmup_ratio= 0.1
    lr= 10e-4
    adam_epsilon= 1e-6
    batch_size = 512
    
    in_channel=1
    dataset_name = "kpi"
    d_model=512
    layers=3
    lambda_=3
    
    save_dir = './save_models'
    save_every_epoch = 2
    
    is_train=True
    is_eval=True


def train(config,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):
    
    train_data = datautils.gen_ano_train_data(all_train_data)
    config.in_channel = train_data.shape[-1]
    train_data = data_slice(train_data,config.window_size)
    train_data = torch.from_numpy(train_data)
    
    if torch.cuda.is_available():
        train_data = train_data.cuda()
        
    train_dataset = TensorDataset(train_data)
    train_dataloader = DataLoader(train_dataset, batch_size=min(config.batch_size, len(train_dataset)), shuffle=config.shuffle, drop_last=True,generator=torch.Generator(device='cuda:0'))

    total_steps = int(len(train_dataloader) * config.epochs)
    warmup_steps = max(int(total_steps * config.warmup_ratio), 200)
    optimizer = AdamW(
        model.parameters(),
        lr=config.lr,
        eps=config.adam_epsilon,
    )
    scheduler = get_cosine_schedule_with_warmup(
        optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps
    )
    print("Total steps: {}".format(total_steps))
    print("Warmup steps: {}".format(warmup_steps))


    for epoch in range(int(config.epochs)):
        print(epoch)
        if (epoch+1) % config.save_every_epoch == 0:
            path = config.save_dir+'/'+model.to_string()+'_epoch:%d' % (epoch+1)
            os.makedirs(path,exist_ok=True)
            torch.save(model,path+'/model.pt')
            pdb.set_trace()
            f1,pre,recall = evaluate(config,epoch+1,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)
            print('epoch:%d\tf1:%f\tp:%f\tr:%f' % (epoch+1,f1,pre,recall))
            
        model.zero_grad()
        for step, batch in enumerate(train_dataloader):
            batch=batch[0]
            model(batch)
            min_loss = model.min_loss(batch)
            max_loss = model.max_loss(batch)
            #print('minloss:%f\tmaxloss:%f' % (min_loss.detach().cpu(),max_loss.detach().cpu()))
            optimizer.zero_grad()
            min_loss.backward(retain_graph=True)
            max_loss.backward()
            optimizer.step()
            scheduler.step()
            

def evaluate(config,cur_epoch,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay):
    res_log = []
    labels_log = []
    timestamps_log = []
    t = time.time()
    for k in all_train_data:
        train_data = all_train_data[k]
        train_labels = all_train_labels[k]
        train_timestamps = all_train_timestamps[k]
        train_length = train_labels.shape[0]

        test_data = all_test_data[k]
        test_labels = all_test_labels[k]
        test_timestamps = all_test_timestamps[k]
        test_length = test_labels.shape[0]
        
        train_err = model.anomaly_score_whole(train_data).detach().cpu().numpy()
        test_err = model.anomaly_score_whole(test_data).detach().cpu().numpy()
        
        train_err = train_err[:train_length]
        test_err = test_err[:test_length]
        
        ma = tasks.np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)
        train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]
        test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]
        train_err_adj = train_err_adj[22:]

        thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)
        test_res = (test_err_adj > thr) * 1

        for i in range(len(test_res)):
            if i >= delay and test_res[i-delay:i].sum() >= 1:
                test_res[i] = 0
        res_log.append(test_res)
        labels_log.append(test_labels)
        timestamps_log.append(test_timestamps)
        
    t = time.time() - t
    eval_res = tasks.eval_ad_result(res_log, labels_log, timestamps_log, delay)
    eval_res['infer_time'] = t
    '''
    eval_res:{'f1':,'p':,'r':,}
    '''
    '''save_results'''
    path = config.save_dir+'/'+model.to_string()+'_epoch:%d' % (cur_epoch)
    os.makedirs(path,exist_ok=True)
    with open(path+'/res_log.pkl','wb') as f:
        pickle.dump(res_log,f)
    with open(path+'/eval_res.pkl','wb') as f:
        pickle.dump(eval_res,f)
    with open(path+'/results.txt','w') as f:
        f.write('f1:%f\tp:%f\tr:%f\n' % (eval_res['f1'],eval_res['precision'],eval_res['recall']))
            
    return eval_res['f1'],eval_res['precision'],eval_res['recall']

def main(config):
    
    all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(config.dataset_name)

    
    print('data loaded!')
    model = AnomalyTransformer(config.batch_size,config.window_size,config.in_channel,config.d_model,config.layers,config.lambda_)
    print('model builded!')
    print('train start!')
    if config.is_train:
        model.train()
        train(config,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)
        '''save_trained_model'''
        path = config.save_dir+'/'+model.to_string()+'_epoch:%d' % (config.epochs)
        os.makedirs(path,exist_ok=True)
        torch.save(model,path+'/model.pt')
    
    print('train finished! evaluating...')
    if config.is_eval:
        model.eval()
        res_log,eval_res = evaluate(config,config.epochs,model,all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay)

        
    print('evaluate finished!')
    
if __name__ == "__main__":
    config = Config()
    main(config)


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/ts2vec.py
================================================
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
from models import TSEncoder
from models.losses import hierarchical_contrastive_loss
from utils import take_per_row, split_with_nan, centerize_vary_length_series, torch_pad_nan
import math
import pdb

class TS2Vec:
    '''The TS2Vec model'''
    
    def __init__(
        self,
        input_dims,
        output_dims=320,
        hidden_dims=64,
        depth=10,
        device='cuda',
        lr=0.001,
        batch_size=16,
        max_train_length=None,
        temporal_unit=0,
        after_iter_callback=None,
        after_epoch_callback=None
    ):
        ''' Initialize a TS2Vec model.
        
        Args:
            input_dims (int): The input dimension. For a univariate time series, this should be set to 1.
            output_dims (int): The representation dimension.
            hidden_dims (int): The hidden dimension of the encoder.
            depth (int): The number of hidden residual blocks in the encoder.
            device (int): The gpu used for training and inference.
            lr (int): The learning rate.
            batch_size (int): The batch size.
            max_train_length (Union[int, NoneType]): The maximum allowed sequence length for training. For sequence with a length greater than <max_train_length>, it would be cropped into some sequences, each of which has a length less than <max_train_length>.
            temporal_unit (int): The minimum unit to perform temporal contrast. When training on a very long sequence, this param helps to reduce the cost of time and memory.
            after_iter_callback (Union[Callable, NoneType]): A callback function that would be called after each iteration.
            after_epoch_callback (Union[Callable, NoneType]): A callback function that would be called after each epoch.
        '''
        
        super().__init__()
        self.device = device
        self.lr = lr
        self.batch_size = batch_size
        self.max_train_length = max_train_length
        self.temporal_unit = temporal_unit
        
        self._net = TSEncoder(input_dims=input_dims, output_dims=output_dims, hidden_dims=hidden_dims, depth=depth).to(self.device)
        self.net = torch.optim.swa_utils.AveragedModel(self._net)
        self.net.update_parameters(self._net)
        
        self.after_iter_callback = after_iter_callback
        self.after_epoch_callback = after_epoch_callback
        
        self.n_epochs = 0
        self.n_iters = 0
    
    def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):
        ''' Training the TS2Vec model.
        
        Args:
            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.
            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.
            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.
            verbose (bool): Whether to print the training loss after each epoch.
            
        Returns:
            loss_log: a list containing the training losses on each epoch.
        '''
        assert train_data.ndim == 3
        pdb.set_trace()
        
        if n_iters is None and n_epochs is None:
            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters
        
        if self.max_train_length is not None:
            sections = train_data.shape[1] // self.max_train_length
            if sections >= 2:
                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)

        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0)
        if temporal_missing[0] or temporal_missing[-1]:
            train_data = centerize_vary_length_series(train_data)
                
        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)]
        
        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))
        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)
        
        optimizer = torch.optim.AdamW(self._net.parameters(), lr=self.lr)
        
        loss_log = []
        
        while True:
            if n_epochs is not None and self.n_epochs >= n_epochs:
                break
            
            cum_loss = 0
            n_epoch_iters = 0
            
            interrupted = False
            for batch in train_loader:
                if n_iters is not None and self.n_iters >= n_iters:
                    interrupted = True
                    break
                
                x = batch[0]  #(batch_size, n_timestamps, n_features)
                # print("#####################")
                # raise Exception('my personal exception!')

                if self.max_train_length is not None and x.size(1) > self.max_train_length:
                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)
                    x = x[:, window_offset : window_offset + self.max_train_length]
                x = x.to(self.device)
                
                ts_l = x.size(1)
                crop_l = np.random.randint(low=2 ** (self.temporal_unit + 1), high=ts_l+1)
                crop_left = np.random.randint(ts_l - crop_l + 1)
                crop_right = crop_left + crop_l
                crop_eleft = np.random.randint(crop_left + 1)
                crop_eright = np.random.randint(low=crop_right, high=ts_l + 1)
                crop_offset = np.random.randint(low=-crop_eleft, high=ts_l - crop_eright + 1, size=x.size(0))
                
                optimizer.zero_grad()
                
                out1 = self._net(take_per_row(x, crop_offset + crop_eleft, crop_right - crop_eleft)) 
                out1 = out1[:, -crop_l:]
                
                out2 = self._net(take_per_row(x, crop_offset + crop_left, crop_eright - crop_left))
                out2 = out2[:, :crop_l]
                
                loss = hierarchical_contrastive_loss(
                    out1,
                    out2,
                    temporal_unit=self.temporal_unit
                )
                
                loss.backward()
                optimizer.step()
                self.net.update_parameters(self._net)
                    
                cum_loss += loss.item()
                n_epoch_iters += 1
                
                self.n_iters += 1
                
                if self.after_iter_callback is not None:
                    self.after_iter_callback(self, loss.item())
            
            if interrupted:
                break
            
            cum_loss /= n_epoch_iters
            loss_log.append(cum_loss)
            if verbose:
                print(f"Epoch #{self.n_epochs}: loss={cum_loss}")
            self.n_epochs += 1
            
            if self.after_epoch_callback is not None:
                self.after_epoch_callback(self, cum_loss)
            
        return loss_log
    
    def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_window=None):
        out = self.net(x.to(self.device, non_blocking=True), mask)
        if encoding_window == 'full_series':
            if slicing is not None:
                out = out[:, slicing]
            out = F.max_pool1d(
                out.transpose(1, 2),
                kernel_size = out.size(1),
            ).transpose(1, 2)
            
        elif isinstance(encoding_window, int):
            out = F.max_pool1d(
                out.transpose(1, 2),
                kernel_size = encoding_window,
                stride = 1,
                padding = encoding_window // 2
            ).transpose(1, 2)
            if encoding_window % 2 == 0:
                out = out[:, :-1]
            if slicing is not None:
                out = out[:, slicing]
            
        elif encoding_window == 'multiscale':
            p = 0
            reprs = []
            while (1 << p) + 1 < out.size(1):
                t_out = F.max_pool1d(
                    out.transpose(1, 2),
                    kernel_size = (1 << (p + 1)) + 1,
                    stride = 1,
                    padding = 1 << p
                ).transpose(1, 2)
                if slicing is not None:
                    t_out = t_out[:, slicing]
                reprs.append(t_out)
                p += 1
            out = torch.cat(reprs, dim=-1)
            
        else:
            if slicing is not None:
                out = out[:, slicing]
            
        return out.cpu()
    
    def encode(self, data, mask=None, encoding_window=None, casual=False, sliding_length=None, sliding_padding=0, batch_size=None):
        ''' Compute representations using the model.
        
        Args:
            data (numpy.ndarray): This should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.
            mask (str): The mask used by encoder can be specified with this parameter. This can be set to 'binomial', 'continuous', 'all_true', 'all_false' or 'mask_last'.
            encoding_window (Union[str, int]): When this param is specified, the computed representation would the max pooling over this window. This can be set to 'full_series', 'multiscale' or an integer specifying the pooling kernel size.
            casual (bool): When this param is set to True, the future informations would not be encoded into representation of each timestamp.
            sliding_length (Union[int, NoneType]): The length of sliding window. When this param is specified, a sliding inference would be applied on the time series.
            sliding_padding (int): This param specifies the contextual data length used for inference every sliding windows.
            batch_size (Union[int, NoneType]): The batch size used for inference. If not specified, this would be the same batch size as training.
            
        Returns:
            repr: The representations for data.
        '''
        assert self.net is not None, 'please train or load a net first'
        assert data.ndim == 3
        if batch_size is None:
            batch_size = self.batch_size
        n_samples, ts_l, _ = data.shape

        org_training = self.net.training
        self.net.eval()
        
        dataset = TensorDataset(torch.from_numpy(data).to(torch.float))
        loader = DataLoader(dataset, batch_size=batch_size)
        
        with torch.no_grad():
            output = []
            for batch in loader:
                x = batch[0]
                if sliding_length is not None:
                    reprs = []
                    if n_samples < batch_size:
                        calc_buffer = []
                        calc_buffer_l = 0
                    for i in range(0, ts_l, sliding_length):
                        l = i - sliding_padding
                        r = i + sliding_length + (sliding_padding if not casual else 0)
                        x_sliding = torch_pad_nan(
                            x[:, max(l, 0) : min(r, ts_l)],
                            left=-l if l<0 else 0,
                            right=r-ts_l if r>ts_l else 0,
                            dim=1
                        )
                        if n_samples < batch_size:
                            if calc_buffer_l + n_samples > batch_size:
                                out = self._eval_with_pooling(
                                    torch.cat(calc_buffer, dim=0),
                                    mask,
                                    slicing=slice(sliding_padding, sliding_padding+sliding_length),
                                    encoding_window=encoding_window
                                )
                                reprs += torch.split(out, n_samples)
                                calc_buffer = []
                                calc_buffer_l = 0
                            calc_buffer.append(x_sliding)
                            calc_buffer_l += n_samples
                        else:
                            out = self._eval_with_pooling(
                                x_sliding,
                                mask,
                                slicing=slice(sliding_padding, sliding_padding+sliding_length),
                                encoding_window=encoding_window
                            )
                            reprs.append(out)

                    if n_samples < batch_size:
                        if calc_buffer_l > 0:
                            out = self._eval_with_pooling(
                                torch.cat(calc_buffer, dim=0),
                                mask,
                                slicing=slice(sliding_padding, sliding_padding+sliding_length),
                                encoding_window=encoding_window
                            )
                            reprs += torch.split(out, n_samples)
                            calc_buffer = []
                            calc_buffer_l = 0
                    
                    out = torch.cat(reprs, dim=1)
                    if encoding_window == 'full_series':
                        out = F.max_pool1d(
                            out.transpose(1, 2).contiguous(),
                            kernel_size = out.size(1),
                        ).squeeze(1)
                else:
                    out = self._eval_with_pooling(x, mask, encoding_window=encoding_window)
                    if encoding_window == 'full_series':
                        out = out.squeeze(1)
                        
                output.append(out)
                
            output = torch.cat(output, dim=0)
            
        self.net.train(org_training)
        return output.numpy()
    
    def save(self, fn):
        ''' Save the model to a file.
        
        Args:
            fn (str): filename.
        '''
        torch.save(self.net.state_dict(), fn)
    
    def load(self, fn):
        ''' Load the model from a file.
        
        Args:
            fn (str): filename.
        '''
        state_dict = torch.load(fn, map_location=self.device)
        self.net.load_state_dict(state_dict)
    


================================================
FILE: ts_anomaly_detection_methods/anomaly_transformer/utils.py
================================================
import os
import numpy as np
import pickle
import torch
import random
from datetime import datetime

def pkl_save(name, var):
    with open(name, 'wb') as f:
        pickle.dump(var, f)

def pkl_load(name):
    with open(name, 'rb') as f:
        return pickle.load(f)

def split_N_pad(series,window_size):
    assert len(series.shape)==2
    ret=[]
    l=series.shape[0]
    for i in range(l//window_size):
        ret.append(series[i*window_size:(i+1)*window_size,:])
    left = l-l//window_size*window_size
    '''TODO:pad'''
    if left!=0:
        p = np.zeros([window_size,series.shape[1]])
        p[:left,:]=series[-left:,:]
        ret.append(p)
    return ret
    
    
'''for AT'''
def data_slice(data,window_size):
    '''
    data : [size,length,dim]
    '''
    assert len(data.shape)==3
    ret=[]
    for i in range(data.shape[0]):
        series = data[i]
        ret.extend(split_N_pad(series,window_size))
    return np.array(ret)
        
        
    
def torch_pad_nan(arr, left=0, right=0, dim=0):
    if left > 0:
        padshape = list(arr.shape)
        padshape[dim] = left
        arr = torch.cat((torch.full(padshape, np.nan), arr), dim=dim)
    if right > 0:
        padshape = list(arr.shape)
        padshape[dim] = right
        arr = torch.cat((arr, torch.full(padshape, np.nan)), dim=dim)
    return arr
    
def pad_nan_to_target(array, target_length, axis=0, both_side=False):
    assert array.dtype in [np.float16, np.float32, np.float64]
    pad_size = target_length - array.shape[axis]
    if pad_size <= 0:
        return array
    npad = [(0, 0)] * array.ndim
    if both_side:
        npad[axis] = (pad_size // 2, pad_size - pad_size//2)
    else:
        npad[axis] = (0, pad_size)
    return np.pad(array, pad_width=npad, mode='constant', constant_values=np.nan)

def pad_zero_to_target(array, target_length, axis=0, both_side=False):
    assert array.dtype in [np.float16, np.float32, np.float64]
    pad_size = target_length - array.shape[axis]
    if pad_size <= 0:
        return array
    npad = [(0, 0)] * array.ndim
    if both_side:
        npad[axis] = (pad_size // 2, pad_size - pad_size//2)
    else:
        npad[axis] = (0, pad_size)
    return np.pad(array, pad_width=npad, mode='constant', constant_values=0)

def split_with_nan(x, sections, axis=0):
    assert x.dtype in [np.float16, np.float32, np.float64]
    arrs = np.array_split(x, sections, axis=axis)
    target_length = arrs[0].shape[axis]
    for i in range(len(arrs)):
        arrs[i] = pad_nan_to_target(arrs[i], target_length, axis=axis)
    return arrs

def take_per_row(A, indx, num_elem):
    all_indx = indx[:,None] + np.arange(num_elem)
    return A[torch.arange(all_indx.shape[0])[:,None], all_indx]

def centerize_vary_length_series(x):
    prefix_zeros = np.argmax(~np.isnan(x).all(axis=-1), axis=1)
    suffix_zeros = np.argmax(~np.isnan(x[:, ::-1]).all(axis=-1), axis=1)
    offset = (prefix_zeros + suffix_zeros) // 2 - prefix_zeros
    rows, column_indices = np.ogrid[:x.shape[0], :x.shape[1]]
    offset[offset < 0] += x.shape[1]
    column_indices = column_indices - offset[:, np.newaxis]
    return x[rows, column_indices]

def data_dropout(arr, p):
    B, T = arr.shape[0], arr.shape[1]
    mask = np.full(B*T, False, dtype=np.bool)
    ele_sel = np.random.choice(
        B*T,
        size=int(B*T*p),
        replace=False
    )
    mask[ele_sel] = True
    res = arr.copy()
    res[mask.reshape(B, T)] = np.nan
    return res

def name_with_datetime(prefix='default'):
    now = datetime.now()
    return prefix + '_' + now.strftime("%Y%m%d_%H%M%S")

def init_dl_program(
    device_name,
    seed=None,
    use_cudnn=True,
    deterministic=False,
    benchmark=False,
    use_tf32=False,
    max_threads=None
):
    import torch
    if max_threads is not None:
        torch.set_num_threads(max_threads)  # intraop
        if torch.get_num_interop_threads() != max_threads:
            torch.set_num_interop_threads(max_threads)  # interop
        try:
            import mkl
        except:
            pass
        else:
            mkl.set_num_threads(max_threads)
        
    if seed is not None:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        
    if isinstance(device_name, (str, int)):
        device_name = [device_name]
    
    devices = []
    for t in reversed(device_name):
        t_device = torch.device(t)
        devices.append(t_device)
        if t_device.type == 'cuda':
            assert torch.cuda.is_available()
            torch.cuda.set_device(t_device)
            if seed is not None:
                torch.cuda.manual_seed(seed)
                torch.cuda.manual_seed_all(seed)

    devices.reverse()
    torch.backends.cudnn.enabled = use_cudnn
    torch.backends.cudnn.deterministic = deterministic
    torch.backends.cudnn.benchmark = benchmark
    
    if hasattr(torch.backends.cudnn, 'allow_tf32'):
        torch.backends.cudnn.allow_tf32 = use_tf32
        torch.backends.cuda.matmul.allow_tf32 = use_tf32
        
    return devices if len(devices) > 1 else devices[0]



================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/AT_solver.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import time
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from other_anomaly_baselines.metrics.metrics import *
from tadpak import evaluate

from torch.utils.data import TensorDataset, DataLoader
import torch


from other_anomaly_baselines.models.AnomalyTransformer import AnomalyTransformer


# def to_var(x, volatile=False):
#     if torch.cuda.is_available():
#         x = x.cuda()
#     return Variable(x, volatile=volatile)



class UniLoader_train(object):
    def __init__(self, data_set, win_size, step, mode="train"):
        self.mode = mode
        self.step = step
        self.win_size = win_size

        self.train = data_set


    def __len__(self):
        """
        Number of images in the object dataset.
        """

        return (self.train.shape[0] - self.win_size) // self.step + 1


    def __getitem__(self, index):
        index = index * self.step

        return np.float32(self.train[index:index + self.win_size])


class UniLoader_test(object):
    def __init__(self, data_set, label_set, win_size, step, mode="train"):
        self.mode = mode
        self.step = step
        self.win_size = win_size

        self.train = data_set
        self.train_labels = label_set


    def __len__(self):
        """
        Number of images in the object dataset.
        """

        return (self.train.shape[0] - self.win_size) // self.step + 1


    def __getitem__(self, index):
        index = index * self.step

        return np.float32(self.train[index:index + self.win_size]), np.float32(self.train_labels[0:self.win_size])


def split_N_pad(series,window_size):
    assert len(series.shape)==2
    ret=[]
    l=series.shape[0]
    for i in range(l//window_size):
        ret.append(series[i*window_size:(i+1)*window_size,:])
    left = l-l//window_size*window_size
    '''TODO:pad'''
    if left!=0:
        p = np.zeros([window_size,series.shape[1]])
        p[:left,:]=series[-left:,:]
        ret.append(p)
    return ret


def mkdir(directory):
    if not os.path.exists(directory):
        os.makedirs(directory)


def my_kl_loss(p, q):
    res = p * (torch.log(p + 0.0001) - torch.log(q + 0.0001))
    return torch.mean(torch.sum(res, dim=-1), dim=1)


def adjust_learning_rate(optimizer, epoch, lr_):
    lr_adjust = {epoch: lr_ * (0.5 ** ((epoch - 1) // 1))}
    if epoch in lr_adjust.keys():
        lr = lr_adjust[epoch]
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        print('Updating learning rate to {}'.format(lr))


class EarlyStopping:
    def __init__(self, patience=7, verbose=False, dataset_name='', delta=0):
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.best_score2 = None
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.val_loss2_min = np.Inf
        self.delta = delta
        self.dataset = dataset_name

    def __call__(self, val_loss, val_loss2, model, path):
        score = -val_loss
        score2 = -val_loss2
        if self.best_score is None:
            self.best_score = score
            self.best_score2 = score2
            self.save_checkpoint(val_loss, val_loss2, model, path)
        elif score < self.best_score + self.delta or score2 < self.best_score2 + self.delta:
            self.counter += 1
            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.best_score2 = score2
            self.save_checkpoint(val_loss, val_loss2, model, path)
            self.counter = 0

    def save_checkpoint(self, val_loss, val_loss2, model, path):
        if self.verbose:
            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')
        torch.save(model.state_dict(), os.path.join(path, str(self.dataset) + '_checkpoint.pth'))
        self.val_loss_min = val_loss
        self.val_loss2_min = val_loss2


class Solver(object):
    DEFAULTS = {}

    def __init__(self, config, train_set, train_loader, val_set, val_loader, test_set, test_loader, dev_cuda):

        self.__dict__.update(Solver.DEFAULTS, **config)

        self.train_loader = train_loader
        self.vali_loader = val_loader
        self.test_loader = test_loader
        self.device = dev_cuda

        self.build_model()
        # self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.criterion = nn.MSELoss()

    def build_model(self):
        self.model = AnomalyTransformer(win_size=self.win_size, enc_in=self.input_c, c_out=self.output_c, e_layers=3, cud_device=self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)

        # if torch.cuda.is_available():
        self.model.to(self.device)

    def vali(self, vali_loader):
        self.model.eval()

        loss_1 = []
        loss_2 = []
        for i, (input_data, _) in enumerate(vali_loader):
            input = input_data.float().to(self.device)
            output, series, prior, _ = self.model(input)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                series_loss += (torch.mean(my_kl_loss(series[u], (
                        prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                               self.win_size)).detach())) + torch.mean(
                    my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)).detach(),
                        series[u])))
                prior_loss += (torch.mean(
                    my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)),
                               series[u].detach())) + torch.mean(
                    my_kl_loss(series[u].detach(),
                               (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))
            series_loss = series_loss / len(prior)
            prior_loss = prior_loss / len(prior)

            rec_loss = self.criterion(output, input)
            loss_1.append((rec_loss - self.k * series_loss).item())
            loss_2.append((rec_loss + self.k * prior_loss).item())

        return np.average(loss_1), np.average(loss_2)

    def train(self):

        print("======================TRAIN MODE======================")

        time_now = time.time()
        path = self.model_save_path
        if not os.path.exists(path):
            os.makedirs(path)
        early_stopping = EarlyStopping(patience=3, verbose=True, dataset_name=self.dataset)
        train_steps = len(self.train_loader)

        for epoch in range(self.num_epochs):
            iter_count = 0
            loss1_list = []

            epoch_time = time.time()
            self.model.train()
            for i, (input_data, labels) in enumerate(self.train_loader):

                self.optimizer.zero_grad()
                iter_count += 1
                input = input_data.float().to(self.device)

                output, series, prior, _ = self.model(input)

                # calculate Association discrepancy
                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    series_loss += (torch.mean(my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach())) + torch.mean(
                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                           self.win_size)).detach(),
                                   series[u])))
                    prior_loss += (torch.mean(my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach())) + torch.mean(
                        my_kl_loss(series[u].detach(), (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))
                series_loss = series_loss / len(prior)
                prior_loss = prior_loss / len(prior)

                rec_loss = self.criterion(output, input)

                loss1_list.append((rec_loss - self.k * series_loss).item())
                loss1 = rec_loss - self.k * series_loss
                loss2 = rec_loss + self.k * prior_loss

                if (i + 1) % 100 == 0:
                    speed = (time.time() - time_now) / iter_count
                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)
                    print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
                    iter_count = 0
                    time_now = time.time()

                # Minimax strategy
                loss1.backward(retain_graph=True)
                loss2.backward()
                self.optimizer.step()

            print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
            train_loss = np.average(loss1_list)

            vali_loss1, vali_loss2 = self.vali(self.vali_loader)

            print(
                "Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} ".format(
                    epoch + 1, train_steps, train_loss, vali_loss1))
            early_stopping(vali_loss1, vali_loss2, self.model, path)
            if early_stopping.early_stop:
                print("Early stopping")
                break
            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)

    def test(self, ucr_index=None):
        self.model.load_state_dict(
            torch.load(
                os.path.join(str(self.model_save_path), str(self.dataset) + '_checkpoint.pth')))
        self.model.eval()
        temperature = 50

        print("======================TEST MODE======================")

        criterion = nn.MSELoss(reduce=False)

        # (1) stastic on the train set
        attens_energy = []
        for i, (input_data, labels) in enumerate(self.train_loader):
            input = input_data.float().to(self.device)
            output, series, prior, _ = self.model(input)
            loss = torch.mean(criterion(input, output), dim=-1)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                if u == 0:
                    series_loss = my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss = my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
                else:
                    series_loss += my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss += my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature

            metric = torch.softmax((-series_loss - prior_loss), dim=-1)
            cri = metric * loss
            cri = cri.detach().cpu().numpy()
            attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        train_energy = np.array(attens_energy)

        # (2) find the threshold
        attens_energy = []
        for i, (input_data, labels) in enumerate(self.test_loader):
            input = input_data.float().to(self.device)
            output, series, prior, _ = self.model(input)

            loss = torch.mean(criterion(input, output), dim=-1)

            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                if u == 0:
                    series_loss = my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss = my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
                else:
                    series_loss += my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss += my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
            # Metric
            metric = torch.softmax((-series_loss - prior_loss), dim=-1)
            cri = metric * loss
            cri = cri.detach().cpu().numpy()
            attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)
        print("Threshold :", thresh)

        # (3) evaluation on the test set
        test_labels = []
        attens_energy = []
        for i, (input_data, labels) in enumerate(self.test_loader):
            input = input_data.float().to(self.device)
            output, series, prior, _ = self.model(input)

            loss = torch.mean(criterion(input, output), dim=-1)

            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                if u == 0:
                    series_loss = my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss = my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
                else:
                    series_loss += my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss += my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
            metric = torch.softmax((-series_loss - prior_loss), dim=-1)

            cri = metric * loss
            cri = cri.detach().cpu().numpy()
            attens_energy.append(cri)
            test_labels.append(labels)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        test_labels = np.array(test_labels)

        pred = (test_energy > thresh).astype(int)

        gt = test_labels.astype(int)

        print("pred:   ", pred.shape)
        print("gt:     ", gt.shape)

        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)
        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)
        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)
        #
        # eval_res = {
        #     'f1': None,
        #     'precision': None,
        #     'recall': None,
        #     "Affiliation precision": None,
        #     "Affiliation recall": None,
        #     "R_AUC_ROC": None,
        #     "R_AUC_PR": None,
        #     "VUS_ROC": None,
        #     "VUS_PR": None,
        #     'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],
        #     'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],
        #     'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],
        # }

        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)
        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)
        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)

        eval_res = {
            'f1': None,
            'precision': None,
            'recall': None,
            "Affiliation precision": None,
            "Affiliation recall": None,
            "R_AUC_ROC": None,
            "R_AUC_PR": None,
            "VUS_ROC": None,
            "VUS_PR": None,
            'f1_pa_10': None,
            'f1_pa_50': None,
            'f1_pa_90': None,
        }

        if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:
            pass
        else:

            # # matrix = [self.index]
            scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)
            for key, value in scores_simple.items():
                # matrix.append(value)
                if key == 'Affiliation precision':
                    eval_res["Affiliation precision"] = value
                if key == 'Affiliation recall':
                    eval_res["Affiliation recall"] = value
                if key == 'R_AUC_ROC':
                    eval_res["R_AUC_ROC"] = value
                if key == 'R_AUC_PR':
                    eval_res["R_AUC_PR"] = value
                if key == 'VUS_ROC':
                    eval_res["VUS_ROC"] = value
                if key == 'VUS_PR':
                    eval_res["VUS_PR"] = value

                print('{0:21} : {1:0.4f}'.format(key, value))

        # detection adjustment: please see this issue for more information https://github.com/thuml/Anomaly-Transformer/issues/14
        anomaly_state = False
        for i in range(len(gt)):
            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
                anomaly_state = True
                for j in range(i, 0, -1):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
                for j in range(i, len(gt)):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
            elif gt[i] == 0:
                anomaly_state = False
            if anomaly_state:
                pred[i] = 1

        pred = np.array(pred)
        gt = np.array(gt)
        print("pred: ", pred.shape)
        print("gt:   ", gt.shape)

        from sklearn.metrics import precision_recall_fscore_support
        from sklearn.metrics import accuracy_score
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred,
                                                                              average='binary')
        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} ".format(
                accuracy, precision,
                recall, f_score))

        eval_res['f1'] = f_score
        eval_res['precision'] = precision
        eval_res['recall'] = recall

        return eval_res


    def train_uni(self):

        print("======================TRAIN MODE======================")

        time_now = time.time()
        path = self.model_save_path
        if not os.path.exists(path):
            os.makedirs(path)
        early_stopping = EarlyStopping(patience=3, verbose=True, dataset_name=self.dataset)
        train_steps = len(self.train_loader)

        for epoch in range(self.num_epochs):
            iter_count = 0
            loss1_list = []

            epoch_time = time.time()
            self.model.train()
            for i, input_data in enumerate(self.train_loader):

                self.optimizer.zero_grad()
                iter_count += 1
                # print("type(input_data) = ", type(input_data), len(input_data))
                # # input_data = np.array(input_data)
                print("type(input_data) = ", type(input_data), input_data.shape)
                input = input_data.float().to(self.device)

                output, series, prior, _ = self.model(input)

                # calculate Association discrepancy
                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    series_loss += (torch.mean(my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach())) + torch.mean(
                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                           self.win_size)).detach(),
                                   series[u])))
                    prior_loss += (torch.mean(my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach())) + torch.mean(
                        my_kl_loss(series[u].detach(), (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))
                series_loss = series_loss / len(prior)
                prior_loss = prior_loss / len(prior)

                rec_loss = self.criterion(output, input)

                loss1_list.append((rec_loss - self.k * series_loss).item())
                loss1 = rec_loss - self.k * series_loss
                loss2 = rec_loss + self.k * prior_loss

                if (i + 1) % 100 == 0:
                    speed = (time.time() - time_now) / iter_count
                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)
                    print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
                    iter_count = 0
                    time_now = time.time()

                # Minimax strategy
                loss1.backward(retain_graph=True)
                loss2.backward()
                self.optimizer.step()

            print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
            train_loss = np.average(loss1_list)

            vali_loss1, vali_loss2 = self.vali(self.vali_loader)

            print(
                "Epoch: {0}, Steps: {1} | Train Loss: {2:.7f}".format(
                    epoch + 1, train_steps, train_loss))
            early_stopping(vali_loss1, vali_loss2, self.model, path)
            if early_stopping.early_stop:
                print("Early stopping")
                break
            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)

    def test_uni(self, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config):
        # self.model.load_state_dict(
        #     torch.load(
        #         os.path.join(str(self.model_save_path), str(self.dataset) + '_checkpoint.pth')))
        self.model.eval()
        temperature = 50

        print("======================TEST MODE======================")

        criterion = nn.MSELoss(reduce=False)

        # (1) stastic on the train set
        attens_energy = []
        for k in all_train_data:
            train_data = all_train_data[k]

            train_data = np.array(train_data)

            # train_data =
            train_data = np.expand_dims(train_data, axis=-1)
            train_dataset = UniLoader_train(train_data, config.win_size, 1)

            train_loader = DataLoader(dataset=train_dataset,
                                      batch_size=config.batch_size,
                                      shuffle=True,
                                      num_workers=2,
                                      drop_last=True)

            # train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))
            # train_loader = DataLoader(train_dataset, batch_size=min(config.batch_size, len(train_dataset)),
            #                           shuffle=True,
            #                           drop_last=True)
            for i, input_data in enumerate(train_loader):
                # print("type(input) = ", type(input_data), input_data.shape)
                input = input_data.float().to(self.device)
                output, series, prior, _ = self.model(input)
                loss = torch.mean(criterion(input, output), dim=-1)
                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    if u == 0:
                        series_loss = my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss = my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                    else:
                        series_loss += my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss += my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature

                metric = torch.softmax((-series_loss - prior_loss), dim=-1)
                cri = metric * loss
                cri = cri.detach().cpu().numpy()
                attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        train_energy = np.array(attens_energy)

        # (2) find the threshold
        attens_energy = []
        for k in all_train_data:
            _test_labels = all_test_labels[k]
            test_data = all_test_data[k]

            test_data = np.array(test_data)

            test_data = np.expand_dims(test_data, axis=-1)

            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)

            test_loader = DataLoader(dataset=test_dataset,
                                      batch_size=config.batch_size,
                                      shuffle=True,
                                      num_workers=2,
                                      drop_last=True)

            for i, (input_data, labels) in enumerate(test_loader):
                input = input_data.float().to(self.device)
                output, series, prior, _ = self.model(input)

                loss = torch.mean(criterion(input, output), dim=-1)

                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    if u == 0:
                        series_loss = my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss = my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                    else:
                        series_loss += my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss += my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                # Metric
                metric = torch.softmax((-series_loss - prior_loss), dim=-1)
                cri = metric * loss
                cri = cri.detach().cpu().numpy()
                attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)
        print("Threshold :", thresh)

        # (3) evaluation on the test set
        test_labels_list = []
        attens_energy = []
        for k in all_train_data:
            _test_labels = all_test_labels[k]
            # test_labels_list.append(_test_labels)

            test_data = all_test_data[k]

            test_data = np.array(test_data)

            test_data = np.expand_dims(test_data, axis=-1)

            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)

            test_loader = DataLoader(dataset=test_dataset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=2,
                                     drop_last=True)

            # test_dataset = TensorDataset(torch.from_numpy(test_data).to(torch.float), torch.from_numpy(_test_labels).float())
            # test_loader = DataLoader(test_dataset, batch_size=min(config.batch_size, len(test_dataset)),
            #                          shuffle=True,
            #                          drop_last=True)

            for i, (input_data, labels) in enumerate(test_loader):
                input = input_data.float().to(self.device)
                output, series, prior, _ = self.model(input)

                loss = torch.mean(criterion(input, output), dim=-1)

                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    if u == 0:
                        series_loss = my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss = my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                    else:
                        series_loss += my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss += my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                metric = torch.softmax((-series_loss - prior_loss), dim=-1)

                cri = metric * loss
                cri = cri.detach().cpu().numpy()
                attens_energy.append(cri)
                test_labels_list.append(labels)


        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_labels = np.concatenate(test_labels_list, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        test_labels = np.array(test_labels)

        # attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        # test_labels = np.concatenate(test_labels_list, axis=0).reshape(-1)
        # test_energy = np.array(attens_energy)
        # test_labels = np.array(test_labels)

        pred = (test_energy > thresh).astype(int)

        gt = test_labels.astype(int)

        print("pred:   ", pred.shape)
        print("gt:     ", gt.shape)

        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)
        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)
        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)

        eval_res = {
            'f1': None,
            'precision': None,
            'recall': None,
            "Affiliation precision": None,
            "Affiliation recall": None,
            "R_AUC_ROC": None,
            "R_AUC_PR": None,
            "VUS_ROC": None,
            "VUS_PR": None,
            # 'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],
            # 'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],
            # 'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],
        }

        # matrix = [self.index]
        scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)
        for key, value in scores_simple.items():
            # matrix.append(value)
            if key == 'Affiliation precision':
                eval_res["Affiliation precision"] = value
            if key == 'Affiliation recall':
                eval_res["Affiliation recall"] = value
            if key == 'R_AUC_ROC':
                eval_res["R_AUC_ROC"] = value
            if key == 'R_AUC_PR':
                eval_res["R_AUC_PR"] = value
            if key == 'VUS_ROC':
                eval_res["VUS_ROC"] = value
            if key == 'VUS_PR':
                eval_res["VUS_PR"] = value

            print('{0:21} : {1:0.4f}'.format(key, value))

        # detection adjustment: please see this issue for more information https://github.com/thuml/Anomaly-Transformer/issues/14
        anomaly_state = False
        for i in range(len(gt)):
            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
                anomaly_state = True
                for j in range(i, 0, -1):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
                for j in range(i, len(gt)):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
            elif gt[i] == 0:
                anomaly_state = False
            if anomaly_state:
                pred[i] = 1

        pred = np.array(pred)
        gt = np.array(gt)
        print("pred: ", pred.shape)
        print("gt:   ", gt.shape)

        from sklearn.metrics import precision_recall_fscore_support
        from sklearn.metrics import accuracy_score
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred,
                                                                              average='binary')
        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} ".format(
                accuracy, precision,
                recall, f_score))

        eval_res['f1'] = f_score
        eval_res['precision'] = precision
        eval_res['recall'] = recall

        return eval_res


================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/ATmodelbatch.py
================================================
import math

import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
from utils import data_slice, split_N_pad
import time
from torch.utils.data import DataLoader, TensorDataset, SequentialSampler

if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.DoubleTensor')
else:
    torch.set_default_tensor_type('torch.DoubleTensor')


class AnomalyAttention(nn.Module):
    def __init__(self, N, d_model):
        super(AnomalyAttention, self).__init__()
        self.d_model = d_model
        self.N = N

        self.Wq = nn.Linear(d_model, d_model, bias=False)
        self.Wk = nn.Linear(d_model, d_model, bias=False)
        self.Wv = nn.Linear(d_model, d_model, bias=False)
        self.Ws = nn.Linear(d_model, 1, bias=False)
        self.Q = self.K = self.V = self.sigma = torch.zeros((N, d_model))
        self.P = torch.zeros((N, N))
        self.S = torch.zeros((N, N))

    def forward(self, x):
        # x :[batch,N,d_model]
        self.initialize(x)
        self.S = self.series_association()
        self.P = self.prior_association()
        Z = self.reconstruction()
        return Z

    def initialize(self, x):
        self.Q = self.Wq(x)
        self.K = self.Wk(x)
        self.V = self.Wv(x)
        self.sigma = self.Ws(x)

    @staticmethod
    def gaussian_kernel(mean, sigma):
        normalize = 1 / (math.sqrt(2 * torch.pi) * torch.abs(sigma))
        return normalize * torch.exp(-0.5 * (mean / sigma).pow(2))

    def prior_association(self):
        # qwe = torch.from_numpy(
        #     np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])
        # ).cuda
        qwe = torch.from_numpy(
            np.abs(np.indices((self.N, self.N))[0] - np.indices((self.N, self.N))[1])
        )
        if torch.cuda.is_available():
            qwe = qwe.cuda()
        # 原 gaussian: [batch,N,N]
        # 因为是高斯所以这里行列求和都一样
        gaussian = self.gaussian_kernel(qwe.double(), self.sigma)
        gaussian /= gaussian.sum(dim=-1).view(-1, self.N, 1)
        return gaussian

    def series_association(self):
        # 原 [N,N]
        # return F.softmax(self.Q @ self.K.T / math.sqrt(self.d_model), dim=0)
        # 现 [batch,N,N],是列方向的softmax?,应该是不对的,得改成行方向的softmax,根据下游的reconstruction来看
        return F.softmax(torch.matmul(self.Q, self.K.transpose(1, 2)) / math.sqrt(self.d_model), dim=2)

    def reconstruction(self):
        return torch.matmul(self.S, self.V)


class AnomalyTransformerBlock(nn.Module):
    def __init__(self, N, d_model):
        super().__init__()
        self.N, self.d_model = N, d_model

        self.attention = AnomalyAttention(self.N, self.d_model)
        self.ln1 = nn.LayerNorm(self.d_model)
        self.ff = nn.Sequential(nn.Linear(self.d_model, self.d_model), nn.ReLU())
        self.ln2 = nn.LayerNorm(self.d_model)

    def forward(self, x):
        # x: [batch,N,d_model]
        x_identity = x
        x = self.attention(x)
        z = self.ln1(x + x_identity)
        z_identity = z
        z = self.ff(z)
        z = self.ln2(z + z_identity)

        # z: [batch,N,d_model]
        return z


class AnomalyTransformer(nn.Module):
    def __init__(self, batch_size, N, in_channel, d_model, layers, lambda_):
        super().__init__()
        self.batch_size = batch_size
        self.in_channel = in_channel
        self.N = N
        self.d_model = d_model

        self.input2hidden = nn.Linear(self.in_channel, self.d_model)
        self.hidden2output = nn.Linear(self.d_model, self.in_channel)
        self.blocks = nn.ModuleList(
            [AnomalyTransformerBlock(self.N, self.d_model) for _ in range(layers)]
        )
        self.output = None
        self.lambda_ = lambda_

        self.P_layers = []
        self.S_layers = []

    def to_string(self):
        return 'in_channel:%d_N:%d_dmodel:%d_' % (self.in_channel, self.N, self.d_model)

    def forward(self, x):

        # x: [batch,N,in_channel]
        self.P_layers = []
        self.S_layers = []
        x = self.input2hidden(x)
        for idx, block in enumerate(self.blocks):
            x = block(x)
            # x: [batch,N,d_model]
            self.P_layers.append(block.attention.P)
            self.S_layers.append(block.attention.S)
        self.output = self.hidden2output(x)
        # output: [batch,N,in_channel]
        return self.output

    # def layer_association_discrepancy(self, Pl, Sl, x):
    #     rowwise_kl = lambda row: (
    #         F.kl_div(Pl[row, :], Sl[row, :]) + F.kl_div(Sl[row, :], Pl[row, :])
    #     )
    #     ad_vector = torch.concat(
    #         [rowwise_kl(row).unsqueeze(0) for row in range(Pl.shape[0])]
    #     )
    #     return ad_vector
    # ad_vector: [N]

    # def rowwise_kl (self,Pl,Sl,idx,row):
    #     return F.kl_div(Pl[idx,row, :], Sl[idx,row, :]) + F.kl_div(Sl[idx,row, :], Pl[idx,row, :])
    # def layer_association_discrepancy(self, Pl, Sl, x):

    #     wholetmp=[]
    #     for idx in range(Pl.shape[0]):
    #         rowtmp=[]
    #         for row in range(Pl.shape[1]):
    #             rowtmp.append(self.rowwise_kl(Pl,Sl,idx,row).unsqueeze(0))
    #         wholetmp.append(torch.cat(rowtmp))

    #     ad_vector = torch.cat(
    #         wholetmp
    #     ).reshape([-1,Pl.shape[1]])
    #     #ad_vector: [batch,N]
    #     return ad_vector

    def rowwise_kl(self, row, Pl, Sl, eps=1e-4):
        Pl_r = Pl[:, row, :]
        Sl_r = Sl[:, row, :]
        Pl_r = (Pl_r + eps) / torch.sum(Pl_r + eps, dim=-1, keepdims=True)
        Sl_r = (Sl_r + eps) / torch.sum(Sl_r + eps, dim=-1, keepdims=True)
        '''TODO:改这个函数'''
        ret = torch.sum(
            F.kl_div(torch.log(Pl_r), Sl_r, reduction='none') + F.kl_div(torch.log(Sl_r), Pl_r, reduction='none'), dim=1
        )
        return ret

    def layer_association_discrepancy(self, Pl, Sl, x):
        ad_vector = torch.concat(
            [self.rowwise_kl(row, Pl, Sl).unsqueeze(1) for row in range(Pl.shape[1])], dim=1
        )
        return ad_vector

    def association_discrepancy(self, P_list, S_list, x):

        ret = (1 / len(P_list)) * sum(
            [
                self.layer_association_discrepancy(P, S, x)
                for P, S in zip(P_list, S_list)
            ]
        )
        # ret: [batch,N]
        return ret

    def loss_function(self, x_hat, P_list, S_list, lambda_, x):
        # P_list: [layers,batch,N,N]
        # S_list: [layers,batch,N,N]
        frob_norm = torch.linalg.matrix_norm(x_hat - x, ord="fro")
        ret = frob_norm - (
                lambda_
                * torch.linalg.norm(self.association_discrepancy(P_list, S_list, x), dim=1, ord=1)
        )
        return ret.mean()

    def min_loss(self, x):

        P_list = self.P_layers
        S_list = [S.detach() for S in self.S_layers]
        # S_list = self.S_layers
        lambda_ = -self.lambda_
        return self.loss_function(self.output, P_list, S_list, lambda_, x)

    def max_loss(self, x):
        P_list = [P.detach() for P in self.P_layers]
        # P_list = self.P_layers
        S_list = self.S_layers
        lambda_ = self.lambda_
        return self.loss_function(self.output, P_list, S_list, lambda_, x)

    def anomaly_score_whole(self, x):
        # x:[length,dim]
        x = np.array(split_N_pad(x.reshape([-1, 1]), self.N))
        '''TODO:测试data_slice'''
        data = torch.from_numpy(x)
        if torch.cuda.is_available():
            data = data.cuda()
        dataset = TensorDataset(data)
        dataloader = DataLoader(dataset, batch_size=min(self.batch_size, len(dataset)), shuffle=False, drop_last=False)
        scores = []
        for step, batch in enumerate(dataloader):
            batch = batch[0]
            score = self.anomaly_score(batch)
            scores.append(score)
        return torch.cat(scores).flatten()

    def anomaly_score(self, x):
        # 原 x:[N,in_channel]
        output = self.forward(x)
        tmp = -self.association_discrepancy(self.P_layers, self.S_layers, x)
        ad = F.softmax(
            tmp, dim=0
        )
        assert ad.shape[1] == self.N

        # norm = torch.tensor(
        #     [
        #         torch.linalg.norm(x[i, :] - self.output[i, :], ord=2)
        #         for i in range(self.N)
        #     ]
        # )
        norm = []
        for idx in range(x.shape[0]):
            tmp = torch.tensor(
                [
                    torch.linalg.norm(x[idx, i, :] - self.output[idx, i, :], ord=2)
                    for i in range(self.N)
                ]
            )
            norm.append(tmp)
        norm = torch.cat(norm).reshape([-1, self.N])
        assert norm.shape[1] == self.N
        score = torch.mul(ad, norm)
        return score


================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/README.md
================================================
## README_Anomaly_Detection

### Usage

|  ID  |                            Method                            | Year |   Press   |                         Source Code                          |
| :--: | :----------------------------------------------------------: | :--: | :-------: | :----------------------------------------------------------: |
|  1   |  [SPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |
|  2   | [DSPOT](https://dl.acm.org/doi/abs/10.1145/3097983.3098144)  | 2017 |    KDD    |     [github_link](https://github.com/Amossys-team/SPOT)      |
|  3   | [LSTM-VAE](https://ieeexplore.ieee.org/abstract/document/8279425) | 2018 | IEEE RA.L | [github_link](https://github.com/SchindlerLiang/VAE-for-Anomaly-Detection) |
|  4   | [DONUT](https://dl.acm.org/doi/abs/10.1145/3178876.3185996)  | 2018 |    WWW    |     [github_link](https://github.com/NetManAIOps/donut)      |
|  5   |  [SR*](https://dl.acm.org/doi/abs/10.1145/3292500.3330680)   | 2019 |    KDD    |                              -                               |
|  6   |            [AT](https://arxiv.org/abs/2110.02642)            | 2022 |   ICLR    | [github_link](https://github.com/spencerbraun/anomaly_transformer_pytorch) |
|  7   | [TS2Vec](https://www.aaai.org/AAAI22Papers/AAAI-8809.YueZ.pdf) | 2022 |   AAAI    |      [github_link](https://github.com/yuezhihan/ts2vec)      |


1. To train and evaluate SPOT/DSPOT on a dataset, set the dataset_name `dataset='yahoo' or 'kpi'`, and then run the following command:

   ```python
   python train_spot.py
   python train_dspot.py
   ```

2. To train and evaluate LSTM-VAE on a dataset, run the following command:

   ```python
   python train_lstm_vae.py <dataset_name> <run_name> --loader <loader> --gpu <gpu_device_id> --seed 42 --eval
   ```

    `dataset_name`: The dataset name.

    `run_name`: The folder name used to save model, output and evaluation metrics. This can be set to any word.

    `loader`: The data loader used to load the experimental data.

    `gpu_device_id`: The GPU device's ID. This can be  `0,1,2...`

3. To train and evaluate DONUT on a dataset, run the following command:

   ```python
   python train_donut.py <dataset_name> <run_name> --loader <loader> --gpu <gpu_device_id> --seed 42 --eval
   ```

4. The anomaly detection results of the SR are collected from the original [SR](https://dl.acm.org/doi/abs/10.1145/3292500.3330680) article.

5. To train and evaluate AT on a dataset,  set hyper_parameters in the file  `trainATbatch.py` , and then run the following command:

   ```python
   python trainATbatch.py
   ```

6. To train and evaluate TS2Vec on a dataset, run the following command:

   ```python
   python train_ts2vec.py <dataset_name> <run_name> --loader <loader> --repr-dims 320 --gpu <gpu_device_id> --seed 42 --eval
   ```

7. To train and evaluate TimesNet on a dataset, run the following command:

   ```python
   python train_timesnet.py <dataset_name> <run_name> ...
   ```

8. To train and evaluate GPT4TS on a dataset, run the following command:

   ```python
   python train_gpt4ts.py <dataset_name> <run_name> ...
   ```
   
9. To train and evaluate DCdetector on a dataset, run the following command:

   ```python
   python train_dcdetector.py <dataset_name> <run_name> ...
   ```

================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/dataset_read_test.py
================================================
import datautils
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score


def get_range_proba(predict, label, delay=7):
    splits = np.where(label[1:] != label[:-1])[0] + 1
    is_anomaly = label[0] == 1
    new_predict = np.array(predict)
    pos = 0

    for sp in splits:
        if is_anomaly:
            if 1 in predict[pos:min(pos + delay + 1, sp)]:
                new_predict[pos: sp] = 1
            else:
                new_predict[pos: sp] = 0
        is_anomaly = not is_anomaly
        pos = sp
    sp = len(label)

    if is_anomaly:  # anomaly in the end
        if 1 in predict[pos: min(pos + delay + 1, sp)]:
            new_predict[pos: sp] = 1
        else:
            new_predict[pos: sp] = 0

    return new_predict


# set missing = 0
def reconstruct_label(timestamp, label):
    timestamp = np.asarray(timestamp, np.int64)
    index = np.argsort(timestamp)

    timestamp_sorted = np.asarray(timestamp[index])
    interval = np.min(np.diff(timestamp_sorted))

    label = np.asarray(label, np.int64)
    label = np.asarray(label[index])

    idx = (timestamp_sorted - timestamp_sorted[0]) // interval

    new_label = np.zeros(shape=((timestamp_sorted[-1] - timestamp_sorted[0]) // interval + 1,), dtype=np.int)
    new_label[idx] = label

    return new_label


def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_list, delay):
    labels = []
    pred = []
    for test_pred, test_labels, test_timestamps in zip(test_pred_list, test_labels_list, test_timestamps_list):
        assert test_pred.shape == test_labels.shape == test_timestamps.shape
        test_labels = reconstruct_label(test_timestamps, test_labels)
        test_pred = reconstruct_label(test_timestamps, test_pred)
        test_pred = get_range_proba(test_pred, test_labels, delay)
        labels.append(test_labels)
        pred.append(test_pred)
    labels = np.concatenate(labels)
    pred = np.concatenate(pred)
    return {
        'f1': f1_score(labels, pred),
        'precision': precision_score(labels, pred),
        'recall': recall_score(labels, pred)
    }


dataset = 'kpi' # yahoo, kpi
print('Loading kpi data... ', end='')
all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(dataset)

print("type = ", type(all_train_data), type(all_train_labels), type(all_train_timestamps), type(all_test_data))
print("delay = ", delay)
i = 1
for k in all_test_data:
    print("i = ", i, ", k = ", k)
    print("all_train_data.shape = ", all_train_data[k].shape)
    print("all_train_labels.shape = ", all_train_labels[k].shape)
    print("all_train_timestamps.shape = ", all_train_timestamps[k].shape)
    print("all_test_data.shape = ", all_test_data[k].shape)
    print("all_test_labels.shape = ", all_test_labels[k].shape)
    print("all_test_timestamps.shape = ", all_test_timestamps[k].shape)
    print("all_train_labels[k][:10] = ", all_train_labels[k][:10])
    print("all_test_timestamps[k][:10] = ", all_test_timestamps[k][:10])
    i = i + 1
    break


# dataset = 'yahoo' # yahoo, kpi
# print('Loading yahoo data... ', end='')
# all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay = datautils.load_anomaly(dataset)
#
# print("type = ", type(all_train_data), type(all_train_labels), type(all_train_timestamps), type(all_test_data))
# print("delay = ", delay)
# i = 1
# for k in all_test_data:
#     print("i = ", i, ", k = ", k)
#     print("all_train_data.shape = ", all_train_data[k].shape)
#     print("all_train_labels.shape = ", all_train_labels[k].shape)
#     print("all_train_timestamps.shape = ", all_train_timestamps[k].shape)
#     print("all_test_data.shape = ", all_test_data[k].shape)
#     print("all_test_labels.shape = ", all_test_labels[k].shape)
#     print("all_test_timestamps.shape = ", all_test_timestamps[k].shape)
#     i = i + 1

================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/datautils.py
================================================
import os
import numpy as np
import pandas as pd
import math
import random
from datetime import datetime
import pickle
from utils import pkl_load, pad_nan_to_target
from scipy.io.arff import loadarff
from sklearn.preprocessing import StandardScaler, MinMaxScaler

def load_UCR(dataset):
    train_file = os.path.join('datasets/UCR', dataset, dataset + "_TRAIN.tsv")
    test_file = os.path.join('datasets/UCR', dataset, dataset + "_TEST.tsv")
    train_df = pd.read_csv(train_file, sep='\t', header=None)
    test_df = pd.read_csv(test_file, sep='\t', header=None)
    train_array = np.array(train_df)
    test_array = np.array(test_df)

    # Move the labels to {0, ..., L-1}
    labels = np.unique(train_array[:, 0])
    transform = {}
    for i, l in enumerate(labels):
        transform[l] = i

    train = train_array[:, 1:].astype(np.float64)
    train_labels = np.vectorize(transform.get)(train_array[:, 0])
    test = test_array[:, 1:].astype(np.float64)
    test_labels = np.vectorize(transform.get)(test_array[:, 0])

    # Normalization for non-normalized datasets
    # To keep the amplitude information, we do not normalize values over
    # individual time series, but on the whole dataset
    if dataset not in [
        'AllGestureWiimoteX',
        'AllGestureWiimoteY',
        'AllGestureWiimoteZ',
        'BME',
        'Chinatown',
        'Crop',
        'EOGHorizontalSignal',
        'EOGVerticalSignal',
        'Fungi',
        'GestureMidAirD1',
        'GestureMidAirD2',
        'GestureMidAirD3',
        'GesturePebbleZ1',
        'GesturePebbleZ2',
        'GunPointAgeSpan',
        'GunPointMaleVersusFemale',
        'GunPointOldVersusYoung',
        'HouseTwenty',
        'InsectEPGRegularTrain',
        'InsectEPGSmallTrain',
        'MelbournePedestrian',
        'PickupGestureWiimoteZ',
        'PigAirwayPressure',
        'PigArtPressure',
        'PigCVP',
        'PLAID',
        'PowerCons',
        'Rock',
        'SemgHandGenderCh2',
        'SemgHandMovementCh2',
        'SemgHandSubjectCh2',
        'ShakeGestureWiimoteZ',
        'SmoothSubspace',
        'UMD'
    ]:
        return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels
    
    mean = np.nanmean(train)
    std = np.nanstd(train)
    train = (train - mean) / std
    test = (test - mean) / std
    return train[..., np.newaxis], train_labels, test[..., np.newaxis], test_labels

def load_anomaly(name):
    res = pkl_load(f'datasets/{name}.pkl')
    return res['all_train_data'], res['all_train_labels'], res['all_train_timestamps'], \
           res['all_test_data'],  res['all_test_labels'],  res['all_test_timestamps'], \
           res['delay']

def gen_ano_train_data(all_train_data):
    ''' Get the anomaly train data.
    Args:
        all_train_data(dict): all_train_data[k] (numpy.ndarray) with the shape (n_timestamps).
    Returns:
        pretrain_data (numpy.ndarray): padding with 'nan', the shape is (n_instance, n_timestamps, n_features).
    '''
    maxl = np.max([ len(all_train_data[k]) for k in all_train_data ])
    pretrain_data = []
    for k in all_train_data:
        train_data = pad_nan_to_target(all_train_data[k], maxl, axis=0)
        pretrain_data.append(train_data)
    pretrain_data = np.expand_dims(np.stack(pretrain_data), 2)
    return pretrain_data

================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/dcdetector_solver.py
================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import time
# from utils.utils import *
from other_anomaly_baselines.models.DCdetector import DCdetector
from other_anomaly_baselines.datasets.data_loader import get_loader_segment
from einops import rearrange
from other_anomaly_baselines.metrics.metrics import *
import warnings
from tadpak import evaluate
from torch.utils.data import TensorDataset, DataLoader

warnings.filterwarnings('ignore')


class UniLoader_train(object):
    def __init__(self, data_set, win_size, step, mode="train"):
        self.mode = mode
        self.step = step
        self.win_size = win_size

        self.train = data_set


    def __len__(self):
        """
        Number of images in the object dataset.
        """

        return (self.train.shape[0] - self.win_size) // self.step + 1


    def __getitem__(self, index):
        index = index * self.step

        return np.float32(self.train[index:index + self.win_size])


class UniLoader_test(object):
    def __init__(self, data_set, label_set, win_size, step, mode="train"):
        self.mode = mode
        self.step = step
        self.win_size = win_size

        self.train = data_set
        self.train_labels = label_set


    def __len__(self):
        """
        Number of images in the object dataset.
        """

        return (self.train.shape[0] - self.win_size) // self.step + 1


    def __getitem__(self, index):
        index = index * self.step

        return np.float32(self.train[index:index + self.win_size]), np.float32(self.train_labels[0:self.win_size])




def my_kl_loss(p, q):
    res = p * (torch.log(p + 0.0001) - torch.log(q + 0.0001))
    return torch.mean(torch.sum(res, dim=-1), dim=1)


def adjust_learning_rate(optimizer, epoch, lr_):
    lr_adjust = {epoch: lr_ * (0.5 ** ((epoch - 1) // 1))}
    if epoch in lr_adjust.keys():
        lr = lr_adjust[epoch]
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr


class EarlyStopping:
    def __init__(self, patience=7, verbose=False, dataset_name='', delta=0, index=0):
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.best_score2 = None
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.val_loss2_min = np.Inf
        self.delta = delta
        self.dataset = dataset_name
        self.index = index

    def __call__(self, val_loss, val_loss2, model, path):
        score = -val_loss
        score2 = -val_loss2
        if self.best_score is None:
            self.best_score = score
            self.best_score2 = score2
            self.save_checkpoint(val_loss, val_loss2, model, path)
        elif score < self.best_score + self.delta or score2 < self.best_score2 + self.delta:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.best_score2 = score2
            self.save_checkpoint(val_loss, val_loss2, model, path)
            self.counter = 0

    def save_checkpoint(self, val_loss, val_loss2, model, path):
        print("os.path.join(path, str(self.dataset) + '_checkpoint.pth') = ", os.path.join(path, str(self.dataset) + '_checkpoint.pth'))
        torch.save(model.state_dict(), os.path.join(path, str(self.dataset) + str(self.index) +'_checkpoint.pth'))
        self.val_loss_min = val_loss
        self.val_loss2_min = val_loss2


class Solver(object):
    DEFAULTS = {}

    def __init__(self, config, multi=True):

        self.__dict__.update(Solver.DEFAULTS, **config)

        if multi:
            self.train_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,
                                                   win_size=self.win_size, mode='train', dataset=self.dataset, )
            self.vali_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,
                                                  win_size=self.win_size, mode='val', dataset=self.dataset)
            self.test_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,
                                                  win_size=self.win_size, mode='test', dataset=self.dataset)
            self.thre_loader, _ = get_loader_segment(self.index, self.data_path + self.dataset, batch_size=self.batch_size,
                                                  win_size=self.win_size, mode='thre', dataset=self.dataset)
        else:
            self.train_loader, _ = None, None
            self.vali_loader, _ = None, None
            self.test_loader, _ = None, None
            self.thre_loader, _ = None, None

        self.build_model()

        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        if self.loss_fuc == 'MAE':
            self.criterion = nn.L1Loss()
        elif self.loss_fuc == 'MSE':
            self.criterion = nn.MSELoss()

    def build_model(self):
        self.model = DCdetector(win_size=self.win_size, enc_in=self.input_c, c_out=self.output_c, n_heads=self.n_heads,
                                d_model=self.d_model, e_layers=self.e_layers, patch_size=self.patch_size,
                                channel=self.input_c)

        if torch.cuda.is_available():
            self.model.cuda()

        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)

    def vali(self, vali_loader):
        self.model.eval()
        loss_1 = []
        loss_2 = []
        for i, (input_data, _) in enumerate(vali_loader):
            input = input_data.float().to(self.device)
            series, prior = self.model(input)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                series_loss += (torch.mean(my_kl_loss(series[u], (
                        prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                               self.win_size)).detach())) + torch.mean(
                    my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)).detach(),
                        series[u])))
                prior_loss += (torch.mean(
                    my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)),
                               series[u].detach())) + torch.mean(
                    my_kl_loss(series[u].detach(),
                               (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))

            series_loss = series_loss / len(prior)
            prior_loss = prior_loss / len(prior)

            loss_1.append((prior_loss - series_loss).item())

        return np.average(loss_1), np.average(loss_2)

    def train(self):

        time_now = time.time()
        path = self.model_save_path
        if not os.path.exists(path):
            os.makedirs(path)
        early_stopping = EarlyStopping(patience=5, verbose=True, dataset_name=self.dataset, index=self.index)
        train_steps = len(self.train_loader)

        for epoch in range(self.num_epochs):
            iter_count = 0

            epoch_time = time.time()
            self.model.train()
            # for i, data in enumerate(self.train_loader):
            #     print(data)
            #     break

            for i, (input_data, labels) in enumerate(self.train_loader):

                self.optimizer.zero_grad()
                iter_count += 1
                input = input_data.float().to(self.device)


                # print("input = ", type(input), input.shape)
                series, prior = self.model(input)

                series_loss = 0.0
                prior_loss = 0.0

                for u in range(len(prior)):
                    series_loss += (torch.mean(my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach())) + torch.mean(
                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                           self.win_size)).detach(),
                                   series[u])))
                    prior_loss += (torch.mean(my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach())) + torch.mean(
                        my_kl_loss(series[u].detach(), (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))

                series_loss = series_loss / len(prior)
                prior_loss = prior_loss / len(prior)

                loss = prior_loss - series_loss

                if (i + 1) % 100 == 0:
                    speed = (time.time() - time_now) / iter_count
                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)
                    print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
                    iter_count = 0
                    time_now = time.time()

                loss.backward()
                self.optimizer.step()

            vali_loss1, vali_loss2 = self.vali(self.vali_loader)

            print(
                "Epoch: {0}, Cost time: {1:.3f}s ".format(
                    epoch + 1, time.time() - epoch_time))
            early_stopping(vali_loss1, vali_loss2, self.model, path)
            if early_stopping.early_stop:
                break
            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)

    def test(self, ucr_index=None):
        self.model.load_state_dict(
            torch.load(
                os.path.join(str(self.model_save_path), str(self.dataset) + str(self.index) + '_checkpoint.pth')))
        self.model.eval()
        temperature = 50

        # (1) stastic on the train set
        attens_energy = []
        for i, (input_data, labels) in enumerate(self.train_loader):
            input = input_data.float().to(self.device)
            series, prior = self.model(input)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                if u == 0:
                    series_loss = my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss = my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
                else:
                    series_loss += my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss += my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature

            metric = torch.softmax((-series_loss - prior_loss), dim=-1)
            cri = metric.detach().cpu().numpy()
            attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        train_energy = np.array(attens_energy)

        # (2) find the threshold
        attens_energy = []
        for i, (input_data, labels) in enumerate(self.thre_loader):
            input = input_data.float().to(self.device)
            series, prior = self.model(input)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                if u == 0:
                    series_loss = my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss = my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
                else:
                    series_loss += my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss += my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature

            metric = torch.softmax((-series_loss - prior_loss), dim=-1)
            cri = metric.detach().cpu().numpy()
            attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)
        print("Threshold :", thresh)

        # (3) evaluation on the test set
        test_labels = []
        attens_energy = []
        for i, (input_data, labels) in enumerate(self.thre_loader):
            input = input_data.float().to(self.device)
            series, prior = self.model(input)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                if u == 0:
                    series_loss = my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss = my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
                else:
                    series_loss += my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach()) * temperature
                    prior_loss += my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach()) * temperature
            metric = torch.softmax((-series_loss - prior_loss), dim=-1)
            cri = metric.detach().cpu().numpy()
            attens_energy.append(cri)
            test_labels.append(labels)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        test_labels = np.array(test_labels)

        pred = (test_energy > thresh).astype(int)
        gt = test_labels.astype(int)

        # labels = np.asarray(labels_log, np.int64)[0]

        # print("test_energy.shape = ", test_energy.shape, test_labels.shape)
        # print("test_energy.shape = ", test_energy[:10])
        # print("test_labels.shape = ", test_labels[:10])
        index_list =  [38, 54, 71, 72, 79, 85, 88, 108, 146, 162, 179, 180, 187, 193, 196, 203, 212, 229, 232]
        if ucr_index in index_list:
            eval_res = {
                'f1': None,
                'precision': None,
                'recall': None,
                "Affiliation precision": None,
                "Affiliation recall": None,
                "R_AUC_ROC": None,
                "R_AUC_PR": None,
                "VUS_ROC": None,
                "VUS_PR": None,
                'f1_pa_10': None,
                'f1_pa_50': None,
                'f1_pa_90': None,
            }
        else:

            results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)
            results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)
            results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)

            eval_res = {
                'f1': None,
                'precision': None,
                'recall': None,
                "Affiliation precision": None,
                "Affiliation recall": None,
                "R_AUC_ROC": None,
                "R_AUC_PR": None,
                "VUS_ROC": None,
                "VUS_PR": None,
                'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],
                'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],
                'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],
            }

            matrix = [self.index]
            scores_simple = combine_all_evaluation_scores(pred, gt, test_energy)
            for key, value in scores_simple.items():
                matrix.append(value)
                if key == 'Affiliation precision':
                    eval_res["Affiliation precision"] = value
                if key == 'Affiliation recall':
                    eval_res["Affiliation recall"] = value
                if key == 'R_AUC_ROC':
                    eval_res["R_AUC_ROC"] = value
                if key == 'R_AUC_PR':
                    eval_res["R_AUC_PR"] = value
                if key == 'VUS_ROC':
                    eval_res["VUS_ROC"] = value
                if key == 'VUS_PR':
                    eval_res["VUS_PR"] = value

                print('{0:21} : {1:0.4f}'.format(key, value))

        anomaly_state = False
        for i in range(len(gt)):
            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
                anomaly_state = True
                for j in range(i, 0, -1):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
                for j in range(i, len(gt)):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
            elif gt[i] == 0:
                anomaly_state = False
            if anomaly_state:
                pred[i] = 1

        pred = np.array(pred)
        gt = np.array(gt)

        from sklearn.metrics import precision_recall_fscore_support
        from sklearn.metrics import accuracy_score

        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')
        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} ".format(accuracy, precision,
                                                                                                   recall, f_score))

        # if self.data_path == 'UCR' or 'UCR_AUG':
        #     import csv
        #     with open('result_dc/' + self.dataset + '.csv', 'a+') as f:
        #         writer = csv.writer(f)
        #         writer.writerow(matrix)

        eval_res['f1'] = f_score
        eval_res['precision'] = precision
        eval_res['recall'] = recall

        return eval_res

    def vali_uni(self, vali_loader):
        self.model.eval()
        loss_1 = []
        loss_2 = []
        for i, input_data in enumerate(vali_loader):
            input = input_data.float().to(self.device)
            series, prior = self.model(input)
            series_loss = 0.0
            prior_loss = 0.0
            for u in range(len(prior)):
                series_loss += (torch.mean(my_kl_loss(series[u], (
                        prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                               self.win_size)).detach())) + torch.mean(
                    my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)).detach(),
                        series[u])))
                prior_loss += (torch.mean(
                    my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)),
                               series[u].detach())) + torch.mean(
                    my_kl_loss(series[u].detach(),
                               (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))

            series_loss = series_loss / len(prior)
            prior_loss = prior_loss / len(prior)

            loss_1.append((prior_loss - series_loss).item())

        return np.average(loss_1), np.average(loss_2)

    def train_uni(self):

        time_now = time.time()
        path = self.model_save_path
        if not os.path.exists(path):
            os.makedirs(path)
        early_stopping = EarlyStopping(patience=5, verbose=True, dataset_name=self.dataset, index=self.index)
        train_steps = len(self.train_loader)

        for epoch in range(self.num_epochs):
            iter_count = 0

            epoch_time = time.time()
            self.model.train()
            # for i, data in enumerate(self.train_loader):
            #     print(data)
            #     break

            for i, input_data in enumerate(self.train_loader):

                self.optimizer.zero_grad()
                iter_count += 1
                input = input_data.float().to(self.device)


                # print("input = ", type(input), input.shape)
                series, prior = self.model(input)

                series_loss = 0.0
                prior_loss = 0.0

                for u in range(len(prior)):
                    series_loss += (torch.mean(my_kl_loss(series[u], (
                            prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                   self.win_size)).detach())) + torch.mean(
                        my_kl_loss((prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                           self.win_size)).detach(),
                                   series[u])))
                    prior_loss += (torch.mean(my_kl_loss(
                        (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                self.win_size)),
                        series[u].detach())) + torch.mean(
                        my_kl_loss(series[u].detach(), (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)))))

                series_loss = series_loss / len(prior)
                prior_loss = prior_loss / len(prior)

                loss = prior_loss - series_loss

                if (i + 1) % 100 == 0:
                    speed = (time.time() - time_now) / iter_count
                    left_time = speed * ((self.num_epochs - epoch) * train_steps - i)
                    print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
                    iter_count = 0
                    time_now = time.time()

                loss.backward()
                self.optimizer.step()

            vali_loss1, vali_loss2 = self.vali_uni(self.vali_loader)

            print(
                "Epoch: {0}, Cost time: {1:.3f}s ".format(
                    epoch + 1, time.time() - epoch_time))
            early_stopping(vali_loss1, vali_loss2, self.model, path)
            if early_stopping.early_stop:
                break
            adjust_learning_rate(self.optimizer, epoch + 1, self.lr)

    def test_uni(self, all_train_data, all_test_data, all_test_labels, all_test_timestamps, delay, config):
        self.model.load_state_dict(
            torch.load(
                os.path.join(str(self.model_save_path), str(self.dataset) + str(self.index) + '_checkpoint.pth')))
        self.model.eval()
        temperature = 50

        # (1) stastic on the train set
        attens_energy = []

        for k in all_train_data:
            train_data = all_train_data[k]

            train_data = np.array(train_data)

            # train_data =
            train_data = np.expand_dims(train_data, axis=-1)
            train_dataset = UniLoader_train(train_data, config.win_size, 1)

            train_loader = DataLoader(dataset=train_dataset,
                                      batch_size=config.batch_size,
                                      shuffle=True,
                                      num_workers=2,
                                      drop_last=True)

            for i, input_data in enumerate(train_loader):
                input = input_data.float().to(self.device)
                series, prior = self.model(input)
                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    if u == 0:
                        series_loss = my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss = my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                    else:
                        series_loss += my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss += my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature

                metric = torch.softmax((-series_loss - prior_loss), dim=-1)
                cri = metric.detach().cpu().numpy()
                attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        train_energy = np.array(attens_energy)

        # (2) find the threshold
        attens_energy = []
        for k in all_train_data:
            _test_labels = all_test_labels[k]
            test_data = all_test_data[k]

            test_data = np.array(test_data)

            test_data = np.expand_dims(test_data, axis=-1)

            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)

            test_loader = DataLoader(dataset=test_dataset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=2,
                                     drop_last=True)

            for i, (input_data, labels) in enumerate(test_loader):
                input = input_data.float().to(self.device)
                series, prior = self.model(input)
                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    if u == 0:
                        series_loss = my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss = my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                    else:
                        series_loss += my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss += my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature

                metric = torch.softmax((-series_loss - prior_loss), dim=-1)
                cri = metric.detach().cpu().numpy()
                attens_energy.append(cri)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        thresh = np.percentile(combined_energy, 100 - self.anormly_ratio)
        print("Threshold :", thresh)

        # (3) evaluation on the test set
        test_labels = []
        attens_energy = []
        for k in all_train_data:
            _test_labels = all_test_labels[k]
            test_data = all_test_data[k]

            test_data = np.array(test_data)

            test_data = np.expand_dims(test_data, axis=-1)

            test_dataset = UniLoader_test(test_data, _test_labels, config.win_size, 1)

            test_loader = DataLoader(dataset=test_dataset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=2,
                                     drop_last=True)
            for i, (input_data, labels) in enumerate(test_loader):
                input = input_data.float().to(self.device)
                series, prior = self.model(input)
                series_loss = 0.0
                prior_loss = 0.0
                for u in range(len(prior)):
                    if u == 0:
                        series_loss = my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss = my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                    else:
                        series_loss += my_kl_loss(series[u], (
                                prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                       self.win_size)).detach()) * temperature
                        prior_loss += my_kl_loss(
                            (prior[u] / torch.unsqueeze(torch.sum(prior[u], dim=-1), dim=-1).repeat(1, 1, 1,
                                                                                                    self.win_size)),
                            series[u].detach()) * temperature
                metric = torch.softmax((-series_loss - prior_loss), dim=-1)
                cri = metric.detach().cpu().numpy()
                attens_energy.append(cri)
                test_labels.append(labels)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        test_labels = np.array(test_labels)

        pred = (test_energy > thresh).astype(int)
        gt = test_labels.astype(int)

        # labels = np.asarray(labels_log, np.int64)[0]

        # print("test_energy.shape = ", test_energy.shape, test_labels.shape)
        # print("test_energy.shape = ", test_energy[:10])
        # print("test_labels.shape = ", test_labels[:10])

        # results_f1_pa_k_10 = evaluate.evaluate(test_energy, test_labels, k=10)
        # results_f1_pa_k_50 = evaluate.evaluate(test_energy, test_labels, k=50)
        # results_f1_pa_k_90 = evaluate.evaluate(test_energy, test_labels, k=90)

        eval_res = {
            'f1': None,
            'precision': None,
            'recall': None,
            "Affiliation precision": None,
            "Affiliation recall": None,
            "R_AUC_ROC": None,
            "R_AUC_PR": None,
            "VUS_ROC": None,
            "VUS_PR": None,
            # 'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],
            # 'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],
            # 'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],
        }

        # matrix = [self.index]

        min_len = min(min(pred.shape[0], gt.shape[0]), test_energy.shape[0])

        scores_simple = combine_all_evaluation_scores(pred[:min_len], gt[:min_len], test_energy[:min_len])
        for key, value in scores_simple.items():
            # matrix.append(value)
            if key == 'Affiliation precision':
                eval_res["Affiliation precision"] = value
            if key == 'Affiliation recall':
                eval_res["Affiliation recall"] = value
            if key == 'R_AUC_ROC':
                eval_res["R_AUC_ROC"] = value
            if key == 'R_AUC_PR':
                eval_res["R_AUC_PR"] = value
            if key == 'VUS_ROC':
                eval_res["VUS_ROC"] = value
            if key == 'VUS_PR':
                eval_res["VUS_PR"] = value

            print('{0:21} : {1:0.4f}'.format(key, value))

        anomaly_state = False
        for i in range(len(gt)):
            if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
                anomaly_state = True
                for j in range(i, 0, -1):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
                for j in range(i, len(gt)):
                    if gt[j] == 0:
                        break
                    else:
                        if pred[j] == 0:
                            pred[j] = 1
            elif gt[i] == 0:
                anomaly_state = False
            if anomaly_state:
                pred[i] = 1

        pred = np.array(pred)
        gt = np.array(gt)

        from sklearn.metrics import precision_recall_fscore_support
        from sklearn.metrics import accuracy_score

        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = precision_recall_fscore_support(gt[:min_len], pred[:min_len], average='binary')
        print(
            "Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f} ".format(accuracy, precision,
                                                                                                   recall, f_score))

        # if self.data_path == 'UCR' or 'UCR_AUG':
        #     import csv
        #     with open('result_dc/' + self.dataset + '.csv', 'a+') as f:
        #         writer = csv.writer(f)
        #         writer.writerow(matrix)

        eval_res['f1'] = f_score
        eval_res['precision'] = precision
        eval_res['recall'] = recall

        return eval_res


================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/donut.py
================================================
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
from models.donut_model import DONUT_Model
from utils import split_with_nan, centerize_vary_length_series
import math
import time
from tasks.anomaly_detection import eval_ad_result, np_shift
import bottleneck as bn
from sklearn.metrics import f1_score, precision_score, recall_score
from other_anomaly_baselines.metrics.affiliation.metrics import pr_from_events
from other_anomaly_baselines.metrics.vus.metrics import get_range_vus_roc
from other_anomaly_baselines.metrics.affiliation.generics import convert_vector_to_events
from tadpak import evaluate


def adjustment(gt, pred):
    anomaly_state = False
    for i in range(len(gt)):
        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
            anomaly_state = True
            for j in range(i, 0, -1):
                if gt[j] == 0:
                    break
                else:
                    if pred[j] == 0:
                        pred[j] = 1
            for j in range(i, len(gt)):
                if gt[j] == 0:
                    break
                else:
                    if pred[j] == 0:
                        pred[j] = 1
        elif gt[i] == 0:
            anomaly_state = False
        if anomaly_state:
            pred[i] = 1
    return gt, pred


class DONUT:
    
    def __init__(
        self,
        input_dims,
        latent_dim=100,
        hidden_dim=3,
        device='cuda',
        lr=0.001,
        batch_size=8,
        z_kld_weight=0.1,
        x_kld_weight=0.1,
        max_train_length=None,
        after_iter_callback=None,
        after_epoch_callback=None
    ):
        
        super().__init__()
        self.device = device
        self.lr = lr
        self.batch_size = batch_size
        self.z_kld_weight = z_kld_weight
        self.x_kld_weight = x_kld_weight
        self.max_train_length = max_train_length
        self.input_dims = input_dims
       
        self.net = DONUT_Model(in_channel=input_dims, latent_dim=latent_dim, hidden_dim=hidden_dim).to(self.device)
        
        self.after_iter_callback = after_iter_callback
        self.after_epoch_callback = after_epoch_callback
        
        self.n_epochs = 0
        self.n_iters = 0
    
    def train(self, train_data, n_epochs=None, n_iters=None, verbose=False):
        ''' 
        Args:
            train_data (numpy.ndarray): The training data. It should have a shape of (n_instance, n_timestamps, n_features). All missing data should be set to NaN.
            n_epochs (Union[int, NoneType]): The number of epochs. When this reaches, the training stops.
            n_iters (Union[int, NoneType]): The number of iterations. When this reaches, the training stops. If both n_epochs and n_iters are not specified, a default setting would be used that sets n_iters to 200 for a dataset with size <= 100000, 600 otherwise.
            verbose (bool): Whether to print the training loss after each epoch.
            
        Returns:
            loss_log: a list containing the training losses on each epoch.
        '''
        assert train_data.ndim == 3
        
        if n_iters is None and n_epochs is None:
            n_iters = 200 if train_data.size <= 100000 else 600  # default param for n_iters
        
        if self.max_train_length is not None:
            sections = train_data.shape[1] // self.max_train_length
            if sections >= 2:
                train_data = np.concatenate(split_with_nan(train_data, sections, axis=1), axis=0)
                # train_data: (n_instance*sections, max_train_length, n_features)

        temporal_missing = np.isnan(train_data).all(axis=-1).any(axis=0) # (max_train_length)
        if temporal_missing[0] or temporal_missing[-1]: # whether the head or tail exists nan
            train_data = centerize_vary_length_series(train_data)
                
        train_data = train_data[~np.isnan(train_data).all(axis=2).all(axis=1)] 
        # delete the sequence (max_train_length, n_features) contains only nan

        for i in range(train_data.shape[0]):
            train_data[i][np.isnan(train_data[i])] = np.nanmean(train_data[i])
        
        train_dataset = TensorDataset(torch.from_numpy(train_data).to(torch.float))
        train_loader = DataLoader(train_dataset, batch_size=min(self.batch_size, len(train_dataset)), shuffle=True, drop_last=True)
        
        optimizer = torch.optim.AdamW(self.net.parameters(), lr=self.lr)
        
        loss_log = []
        
        while True:
            if n_epochs is not None and self.n_epochs >= n_epochs:
                break
            
            cum_loss = 0
            n_epoch_iters = 0
            
            interrupted = False
            for batch in train_loader:
                if n_iters is not None and self.n_iters >= n_iters:
                    interrupted = True
                    break
                
                x = batch[0]  #(batch_size, n_timestamps, n_features)
                # print("#####################")
                # raise Exception('my personal exception!')

                if self.max_train_length is not None and x.size(1) > self.max_train_length:
                    window_offset = np.random.randint(x.size(1) - self.max_train_length + 1)
                    x = x[:, window_offset : window_offset + self.max_train_length]
                x = x.to(self.device)
                
                optimizer.zero_grad()
                
                outputs, z_mu, z_log_var, x_mu, x_log_var = self.net(x) 
                loss = self.net.loss_function(x, outputs, z_mu, z_log_var, x_mu, x_log_var, self.z_kld_weight, self.x_kld_weight)
                
                loss.backward()
                optimizer.step()
                    
                cum_loss += loss.item()
                n_epoch_iters += 1
                
                self.n_iters += 1
                
                if self.after_iter_callback is not None:
                    self.after_iter_callback(self, loss.item())
            
            if interrupted:
                break
            
            cum_loss /= n_epoch_iters
            loss_log.append(cum_loss)
            if verbose:
                print(f"Epoch #{self.n_epochs}: loss={cum_loss}")
            self.n_epochs += 1
            
            if self.after_epoch_callback is not None:
                self.after_epoch_callback(self, cum_loss)
            
        return loss_log
    

    def anomaly_score(self, model, test_data, is_multi=False):
        if is_multi:
            test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, self.input_dims))).to(self.device)
        else:
            test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, 1))).to(self.device)
        # test_data = torch.from_numpy(np.float32(test_data.reshape(1, -1, 1))).to(self.device)

        if self.max_train_length is not None and test_data.size(1) > self.max_train_length:
            window_offset = np.random.randint(test_data.size(1) - self.max_train_length + 1)
            test_data = test_data[:, window_offset: window_offset + self.max_train_length]

        # 设置批次大小
        batch_size = 2

        # 创建 DataLoader
        test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

        self.net.eval()
        with torch.no_grad():
            # 初始化保存输出的列表
            outputs_list = []
            # z_mu_list = []
            # z_log_var_list = []
            # x_mu_list = []
            # x_log_var_list = []
            for input_data in test_loader:
                input_data = input_data[0]  # 从 TensorDataset 中提取数据


                # x = x.to(self.device)

                print("input_data.shape = ", input_data.shape)
                batch_outputs, batch_z_mu, batch_z_log_var, batch_x_mu, batch_x_log_var =  self.net(input_data)

                # 保存每个批次的结果
                outputs_list.append(batch_outputs)
                # z_mu_list.append(batch_z_mu)
                # z_log_var_list.append(batch_z_log_var)
                # x_mu_list.append(batch_x_mu)
                # x_log_var_list.append(batch_x_log_var)

            # 将所有批次结果整合
            outputs = torch.cat(outputs_list, dim=0)
            # z_mu = torch.cat(z_mu_list, dim=0)
            # z_log_var = torch.cat(z_log_var_list, dim=0)
            # x_mu = torch.cat(x_mu_list, dim=0)
            # x_log_var = torch.cat(x_log_var_list, dim=0)
            # print("test_data.shape = ", test_data.shape)
            # print("self.net = ", self.net)
            # outputs, z_mu, z_log_var, x_mu, x_log_var = self.net(test_data)

            # rec_error = torch.sum(torch.abs(outputs - test_data), dim=-1)
            rec_error = torch.sum(torch.square(outputs - test_data), dim=-1)
            rec_error = torch.flatten(rec_error)

        return rec_error
    
    def evaluate(self, model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay, is_multi=False, ucr_index=None):
        t = time.time()

        res_log = []
        labels_log = []
        timestamps_log = []
        res_log_socres = []
        if is_multi:
            train_data = all_train_data

            test_data = all_test_data
            test_labels = all_test_labels

            print("train_data.shape = ", train_data.shape, ", test_data.shape = ", test_data.shape)

            train_err = self.anomaly_score(model, train_data, is_multi=is_multi).detach().cpu().numpy()
            test_err = self.anomaly_score(model, test_data, is_multi=is_multi).detach().cpu().numpy()

            ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)
            train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]
            test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]
            train_err_adj = train_err_adj[22:]

            thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)
            test_res = (test_err_adj > thr) * 1
            res_log_socres.append(test_err_adj)

            for i in range(len(test_res)):
                if i >= delay and test_res[i - delay:i].sum() >= 1:
                    test_res[i] = 0

            res_log.append(test_res)
            labels_log.append(test_labels)

        else:
            for k in all_test_data:
                train_data = all_train_data[k]
                train_labels = all_train_labels[k]
                train_timestamps = all_train_timestamps[k]

                test_data = all_test_data[k]
                test_labels = all_test_labels[k]
                test_timestamps = all_test_timestamps[k]

                train_err = self.anomaly_score(model, train_data).detach().cpu().numpy()
                test_err = self.anomaly_score(model, test_data).detach().cpu().numpy()

                ma = np_shift(bn.move_mean(np.concatenate([train_err, test_err]), 21), 1)
                train_err_adj = (train_err - ma[:len(train_err)]) / ma[:len(train_err)]
                test_err_adj = (test_err - ma[len(train_err):]) / ma[len(train_err):]
                train_err_adj = train_err_adj[22:]

                thr = np.mean(train_err_adj) + 4 * np.std(train_err_adj)
                test_res = (test_err_adj > thr) * 1
                res_log_socres.append(test_err_adj)

                for i in range(len(test_res)):
                    if i >= delay and test_res[i-delay:i].sum() >= 1:
                        test_res[i] = 0

                res_log.append(test_res)
                labels_log.append(test_labels)
                timestamps_log.append(test_timestamps)
        t = time.time() - t

        if is_multi:
            if ucr_index == 79 or ucr_index == 108 or ucr_index == 187 or ucr_index == 203:
                labels = np.asarray(labels_log, np.int64)[0]
                pred = np.asarray(res_log, np.int64)[0]

                labels, pred = adjustment(labels, pred)

                eval_res = {
                    'f1': f1_score(labels, pred),
                    'precision': precision_score(labels, pred),
                    'recall': recall_score(labels, pred),
                    "Affiliation precision": None,
                    "Affiliation recall": None,
                    "R_AUC_ROC": None,
                    "R_AUC_PR": None,
                    "VUS_ROC": None,
                    "VUS_PR": None,
                    'f1_pa_10': None,
                    'f1_pa_50': None,
                    'f1_pa_90': None,
                }
            else:


                labels = np.asarray(labels_log, np.int64)[0]
                pred = np.asarray(res_log, np.int64)[0]
                # print("labels.shape = ", labels.shape, labels[:5])
                # print("pred.shape = ", pred.shape, pred[:5])

                events_pred = convert_vector_to_events(pred)
                events_gt = convert_vector_to_events(labels)

                Trange = (0, len(labels))
                affiliation = pr_from_events(events_pred, events_gt, Trange)
                vus_results = get_range_vus_roc(labels, pred, 100)  # default slidingWindow = 100

                pred_scores = np.asarray(res_log_socres, np.float64)[0]
                results_f1_pa_k_10 = evaluate.evaluate(pred_scores, labels, k=10)
                results_f1_pa_k_50 = evaluate.evaluate(pred_scores, labels, k=50)
                results_f1_pa_k_90 = evaluate.evaluate(pred_scores, labels, k=90)

                labels, pred = adjustment(labels, pred)

                eval_res = {
                    'f1': f1_score(labels, pred),
                    'precision': precision_score(labels, pred),
                    'recall': recall_score(labels, pred),
                    "Affiliation precision": affiliation['precision'],
                    "Affiliation recall": affiliation['recall'],
                    "R_AUC_ROC": vus_results["R_AUC_ROC"],
                    "R_AUC_PR": vus_results["R_AUC_PR"],
                    "VUS_ROC": vus_results["VUS_ROC"],
                    "VUS_PR": vus_results["VUS_PR"],
                    'f1_pa_10': results_f1_pa_k_10['best_f1_w_pa'],
                    'f1_pa_50': results_f1_pa_k_50['best_f1_w_pa'],
                    'f1_pa_90': results_f1_pa_k_90['best_f1_w_pa'],
                }
        else:

            eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay, pred_scores=res_log_socres)
        eval_res['infer_time'] = t
        return res_log, eval_res

    def save(self, fn):
        ''' Save the model to a file.
        
        Args:
            fn (str): filename.
        '''
        torch.save(self.net.state_dict(), fn)
    
    def load(self, fn):
        ''' Load the model from a file.
        
        Args:
            fn (str): filename.
        '''
        state_dict = torch.load(fn, map_location=self.device)
        self.net.load_state_dict(state_dict)

================================================
FILE: ts_anomaly_detection_methods/other_anomaly_baselines/exp_anomaly_detection.py
================================================
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
import torch.multiprocessing
from other_anomaly_baselines.models import TimesNet
from other_anomaly_baselines.models import GPT4TS

torch.multiprocessing.set_sharing_strategy('file_system')
import torch
import torch.nn as nn
from torch import optim
import os
import time
import warnings
import numpy as np
import math
from other_anomaly_baselines.metrics.metrics import *
import warnings
from tadpak import evaluate

from torch.utils.data import TensorDataset, DataLoader


warnings.filterwarnings('ignore')



class UniLoader_train(object):
    def __init__(self, data_set, win_size, step, mode="train"):
        self.mode = mode
        self.step = step
        self.win_size = win_size

        self.train = data_set


    def __len__(self):
        """
        Number of images in the object dataset.
        """

        return (self.train.shape[0] - self.win_size) // self.step + 1


    def __getitem__(self, index):
        index = index * self.step

        return np.float32(self.train[index:index + self.win_size])


class UniLoader_test(object):
    def __init__(self, data_set, label_set, win_size, step, mode="train"):
        self.mode = mode
        self.step = step
        self.win_size = win_size

        self.train = data_set
        self.train_labels = label_set


    def __len__(self):
        """
        Number of images in the object dataset.
        """

        return (self.train.shape[0] - self.win_size) // self.step + 1


    def __getitem__(self, index):
        index = index * self.step

        return np.float32(self.train[index:index + self.win_size]), np.float32(self.train_labels[0:self.win_size])




def adjustment(gt, pred):
    anomaly_state = False
    for i in range(len(gt)):
        if gt[i] == 1 and pred[i] == 1 and not anomaly_state:
            anomaly_state = True
            for j in range(i, 0, -1):
                if gt[j] == 0:
                    break
                else:
                    if pred[j] == 0:
                        pred[j] = 1
            for j in range(i, len(gt)):
                if gt[j] == 0:
                    break
                else:
                    if pred[j] == 0:
                        pred[j] = 1
        elif gt[i] == 0:
            anomaly_state = False
        if anomaly_state:
            pred[i] = 1
    return gt, pred


def adjust_learning_rate(optimizer, epoch, args):
    # lr = args.learning_rate * (0.2 ** (epoch // 2))
    if args.lradj == 'type1':
        lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}
    elif args.lradj == 'type2':
        lr_adjust = {
            2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,
            10: 5e-7, 15: 1e-7, 20: 5e-8
        }
    elif args.lradj == "cosine":
        lr_adjust = {epoch: args.learning_rate /2 * (1 + math.cos(epoch / args.train_epochs * math.pi))}
    if epoch in lr_adjust.keys():
        lr = lr_adjust[epoch]
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        print('Updating learning rate to {}'.format(lr))


class EarlyStopping:
    def __init__(self, patience=7, verbose=False, delta=0):
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.delta = delta

    def __call__(self, val_loss, model, path):
        score = -val_loss
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path)
        elif score < self.best_score + self.delta:
            self.counter += 1
            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path)
            self.counter = 0

    def save_checkpoint(self, val_loss, model, path):
        if self.verbose:
            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')
        torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')
        self.val_loss_min = val_loss

class Exp_Basic(object):
    def __init__(self, args):
        self.args = args
        self.model_dict = {
            'TimesNet': TimesNet,
            'GPT4TS': GPT4TS,
        }
        self.device = self._acquire_device()
        self.model = self._build_model().to(self.device)

    def _build_model(self):
        raise NotImplementedError
        return None

    def _acquire_device(self):
        if self.args.use_gpu:
            os.environ["CUDA_VISIBLE_DEVICES"] = str(
                self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
            device = torch.device('cuda:{}'.format(self.args.gpu))
            print('Use GPU: cuda:{}'.format(self.args.gpu))
        else:
            device = torch.device('cpu')
            print('Use CPU')
        return device

    def _get_data(self):
        pass

    def vali(self):
        pass

    def train(self):
        pass

    def test(self):
        pass


class Exp_Anomaly_Detection(Exp_Basic):
    def __init__(self, args, train_set, train_loader, val_set, val_loader, test_set, test_loader):
        super(Exp_Anomaly_Detection, self).__init__(args)
        self.train_set = train_set
        self.train_loader = train_loader
        self.val_set = val_set
        self.val_loader = val_loader
        self.test_set = test_set
        self.test_loader = test_loader

    def _build_model(self):
        model = self.model_dict[self.args.model].Model(self.args).float()

        if self.args.use_multi_gpu and self.args.use_gpu:
            model = nn.DataParallel(model, device_ids=self.args.device_ids)
        return model

    def _get_data(self, flag):
        # data_set, data_loader = data_provider(self.args, flag)
        if flag == 'train':
            return self.train_set, self.train_loader

        if flag == 'val':
            return self.val_set, self.val_loader

        if flag == 'test':
            return self.test_set, self.test_loader

        # return self.data_set, self.data_loader

    def _select_optimizer(self):
        model_optim = optim.Adam(self.model.parame
Download .txt
gitextract_owuozh1h/

├── .idea/
│   ├── .gitignore
│   ├── deployment.xml
│   ├── inspectionProfiles/
│   │   ├── Project_Default.xml
│   │   └── profiles_settings.xml
│   ├── modules.xml
│   ├── time-series-ptms.iml
│   └── vcs.xml
├── README.md
├── ts_anomaly_detection_methods/
│   ├── README.md
│   ├── anomaly_transformer/
│   │   ├── ATmodelbatch.py
│   │   ├── datautils.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── anomaly_transformer_model.py
│   │   │   ├── dilated_conv.py
│   │   │   ├── encoder.py
│   │   │   └── losses.py
│   │   ├── tasks/
│   │   │   ├── __init__.py
│   │   │   └── anomaly_detection.py
│   │   ├── train.py
│   │   ├── trainATbatch.py
│   │   ├── ts2vec.py
│   │   └── utils.py
│   └── other_anomaly_baselines/
│       ├── AT_solver.py
│       ├── ATmodelbatch.py
│       ├── README.md
│       ├── dataset_read_test.py
│       ├── datautils.py
│       ├── dcdetector_solver.py
│       ├── donut.py
│       ├── exp_anomaly_detection.py
│       ├── hello_test_evo.py
│       ├── lstm_vae.py
│       ├── metrics/
│       │   ├── AUC.py
│       │   ├── Matthews_correlation_coefficient.py
│       │   ├── affiliation/
│       │   │   ├── _affiliation_zone.py
│       │   │   ├── _integral_interval.py
│       │   │   ├── _single_ground_truth_event.py
│       │   │   ├── generics.py
│       │   │   └── metrics.py
│       │   ├── combine_all_scores.py
│       │   ├── customizable_f1_score.py
│       │   ├── evaluate_utils.py
│       │   ├── evaluator.py
│       │   ├── f1_score_f1_pa.py
│       │   ├── f1_series.py
│       │   ├── fc_score.py
│       │   ├── metrics.py
│       │   ├── precision_at_k.py
│       │   └── vus/
│       │       ├── analysis/
│       │       │   ├── robustness_eval.py
│       │       │   └── score_computation.py
│       │       ├── metrics.py
│       │       ├── models/
│       │       │   ├── distance.py
│       │       │   └── feature.py
│       │       └── utils/
│       │           ├── metrics.py
│       │           └── slidingWindows.py
│       ├── models/
│       │   ├── AnomalyTransformer.py
│       │   ├── DCdetector.py
│       │   ├── GPT4TS.py
│       │   ├── TimesNet.py
│       │   ├── __init__.py
│       │   ├── dilated_conv.py
│       │   ├── donut_model.py
│       │   ├── encoder.py
│       │   ├── losses.py
│       │   └── lstm_vae_model.py
│       ├── new_dataset_read_test.py
│       ├── scripts/
│       │   ├── at_zeta0.sh
│       │   ├── at_zeta1.sh
│       │   ├── generator_sh.py
│       │   ├── kpi.sh
│       │   ├── multi_at.sh
│       │   ├── ucr_at.sh
│       │   ├── ucr_at_delta_0.sh
│       │   ├── ucr_at_delta_1.sh
│       │   ├── ucr_at_delta_1_2.sh
│       │   ├── ucr_at_zeta0.sh
│       │   ├── uni_at.sh
│       │   └── yahoo.sh
│       ├── spot.py
│       ├── tasks/
│       │   ├── __init__.py
│       │   └── anomaly_detection.py
│       ├── train.py
│       ├── trainATbatch.py
│       ├── train_at_multi.py
│       ├── train_at_uni.py
│       ├── train_dcdetector.py
│       ├── train_dcdetector_nui.py
│       ├── train_donut.py
│       ├── train_donut_multi.py
│       ├── train_dspot.py
│       ├── train_dspot_multi.py
│       ├── train_gpt4ts.py
│       ├── train_gpt4ts_uni.py
│       ├── train_lstm_vae.py
│       ├── train_lstm_vae_multi.py
│       ├── train_spot.py
│       ├── train_spot_multi.py
│       ├── train_timesnet.py
│       ├── train_timesnet_uni.py
│       ├── train_ts2vec.py
│       ├── train_ts2vec_multi.py
│       ├── ts2vec.py
│       └── utils.py
├── ts_classification_methods/
│   ├── .gitignore
│   ├── README.md
│   ├── data/
│   │   ├── __init__.py
│   │   ├── dataloader.py
│   │   └── preprocessing.py
│   ├── environment.yaml
│   ├── gpt4ts/
│   │   ├── __init__.py
│   │   ├── gpt4ts_utils.py
│   │   ├── main_gpt4ts.py
│   │   ├── main_gpt4ts_ucr.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── embed.py
│   │   │   ├── gpt4ts.py
│   │   │   └── loss.py
│   │   └── scripts/
│   │       └── generator_gpt4ts.py
│   ├── model/
│   │   ├── __init__.py
│   │   ├── loss.py
│   │   └── tsm_model.py
│   ├── patchtst/
│   │   ├── __init__.py
│   │   ├── main_patchtst_iota.py
│   │   ├── main_patchtst_ucr.py
│   │   ├── mian_patchtst.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── attention.py
│   │   │   ├── basics.py
│   │   │   ├── heads.py
│   │   │   ├── patchTST.py
│   │   │   ├── pos_encoding.py
│   │   │   └── revin.py
│   │   ├── patch_mask.py
│   │   └── scripts/
│   │       └── generator_patchtst.py
│   ├── result_tsm/
│   │   ├── ChlorineConcentration/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── Crop/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── ECG5000/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── ElectricDevices/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── FordA/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── FordB/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── NonInvasiveFetalECGThorax1/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── NonInvasiveFetalECGThorax2/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── StarLightCurves/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── TwoPatterns/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryAll/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryX/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryY/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   ├── UWaveGestureLibraryZ/
│   │   │   ├── classifier_weights.pt
│   │   │   ├── fcn_reconstruction_pretrain_weights.pt
│   │   │   ├── pretrain_weights.pt
│   │   │   └── rnn_reconstruction_pretrain_weights.pt
│   │   └── Wafer/
│   │       ├── classifier_weights.pt
│   │       ├── fcn_reconstruction_pretrain_weights.pt
│   │       ├── pretrain_weights.pt
│   │       └── rnn_reconstruction_pretrain_weights.pt
│   ├── scripts/
│   │   ├── dilated_single_norm.sh
│   │   ├── fcn_lin_set_norm.sh
│   │   ├── fcn_lin_single_norm.sh
│   │   ├── generator_dilated.py
│   │   ├── generator_fcn.py
│   │   ├── generator_pretrain_cls.py
│   │   └── transfer_pretrain_finetune.sh
│   ├── selftime_cls/
│   │   ├── __init__.py
│   │   ├── config/
│   │   │   ├── CricketX_config.json
│   │   │   ├── DodgerLoopDay_config.json
│   │   │   ├── InsectWingbeatSound_config.json
│   │   │   ├── MFPT_config.json
│   │   │   ├── UWaveGestureLibraryAll_config.json
│   │   │   └── XJTU_config.json
│   │   ├── dataloader/
│   │   │   ├── TSC_data_loader.py
│   │   │   ├── __init__.py
│   │   │   └── ucr2018.py
│   │   ├── dataprepare.py
│   │   ├── evaluation/
│   │   │   ├── __init__.py
│   │   │   └── eval_ssl.py
│   │   ├── model/
│   │   │   ├── __init__.py
│   │   │   ├── model_RelationalReasoning.py
│   │   │   └── model_backbone.py
│   │   ├── optim/
│   │   │   ├── __init__.py
│   │   │   ├── pretrain.py
│   │   │   ├── pytorchtools.py
│   │   │   └── train.py
│   │   ├── scripts/
│   │   │   └── ucr.sh
│   │   ├── train_ssl.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── augmentation.py
│   │       ├── datasets.py
│   │       ├── helper.py
│   │       ├── transforms.py
│   │       ├── utils.py
│   │       └── utils_plot.py
│   ├── test/
│   │   ├── __init__.py
│   │   ├── train_uea_test.py
│   │   └── uea_test.py
│   ├── timesnet/
│   │   ├── __init__.py
│   │   ├── main_timesnet.py
│   │   ├── main_timesnet_ucr.py
│   │   ├── models/
│   │   │   ├── Conv_Blocks.py
│   │   │   ├── Embed.py
│   │   │   ├── SelfAttention_Family.py
│   │   │   ├── TimesNet.py
│   │   │   ├── Transformer.py
│   │   │   ├── Transformer_EncDec.py
│   │   │   └── __init__.py
│   │   └── scripts/
│   │       └── generator_timesnet.py
│   ├── tloss_cls/
│   │   ├── default_hyperparameters.json
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   └── triplet_loss.py
│   │   ├── networks/
│   │   │   ├── __init__.py
│   │   │   ├── causal_cnn.py
│   │   │   └── lstm.py
│   │   ├── scikit_wrappers.py
│   │   ├── scripts/
│   │   │   ├── ucr.sh
│   │   │   └── uea.sh
│   │   ├── transfer_ucr.py
│   │   ├── ucr.py
│   │   ├── uea.py
│   │   └── utils.py
│   ├── train.py
│   ├── ts2vec_cls/
│   │   ├── __init__.py
│   │   ├── datautils.py
│   │   ├── models/
│   │   │   ├── __init__.py
│   │   │   ├── dilated_conv.py
│   │   │   ├── encoder.py
│   │   │   └── losses.py
│   │   ├── result/
│   │   │   └── ts2vec_tsm_train_val_b8_single_norm_0409_cls_result.csv
│   │   ├── scripts/
│   │   │   ├── generator_ts2vec.py
│   │   │   ├── generator_ts2vec_uea.py
│   │   │   ├── ts2vec_fcn_set_norm.sh
│   │   │   ├── ts2vec_fcn_single_norm.sh
│   │   │   ├── ts2vec_tsm_set_norm.sh
│   │   │   ├── ts2vec_tsm_single_norm.sh
│   │   │   └── ts2vec_tsm_uea.sh
│   │   ├── tasks/
│   │   │   ├── __init__.py
│   │   │   ├── _eval_protocols.py
│   │   │   └── classification.py
│   │   ├── train.py
│   │   ├── train_fcn.py
│   │   ├── train_tsm.py
│   │   ├── train_tsm_uea.py
│   │   ├── ts2vec.py
│   │   └── utils.py
│   ├── tsm_utils.py
│   ├── tst_cls/
│   │   ├── scripts/
│   │   │   ├── classification.sh
│   │   │   └── pretrain_finetune.sh
│   │   └── src/
│   │       ├── __init__.py
│   │       ├── dataprepare.py
│   │       ├── datasets/
│   │       │   ├── __init__.py
│   │       │   ├── data.py
│   │       │   ├── dataset.py
│   │       │   ├── datasplit.py
│   │       │   └── utils.py
│   │       ├── main.py
│   │       ├── models/
│   │       │   ├── __init__.py
│   │       │   ├── loss.py
│   │       │   └── ts_transformer.py
│   │       ├── optimizers.py
│   │       ├── options.py
│   │       ├── running.py
│   │       └── utils/
│   │           ├── __init__.py
│   │           ├── analysis.py
│   │           └── utils.py
│   ├── tstcc_cls/
│   │   ├── __init__.py
│   │   ├── config_files/
│   │   │   ├── ucr_Configs.py
│   │   │   └── uea_Configs.py
│   │   ├── dataloader/
│   │   │   ├── augmentations.py
│   │   │   └── dataloader.py
│   │   ├── main.py
│   │   ├── main_ucr.py
│   │   ├── main_uea.py
│   │   ├── models/
│   │   │   ├── TC.py
│   │   │   ├── attention.py
│   │   │   ├── loss.py
│   │   │   └── model.py
│   │   ├── result/
│   │   │   └── tstcc_0327_cls_result.csv
│   │   ├── scripts/
│   │   │   ├── fivefold_tstcc_ucr.sh
│   │   │   ├── fivefold_tstcc_uea.sh
│   │   │   ├── generator_ucr.py
│   │   │   ├── generator_uea.py
│   │   │   └── part_uea_tstcc.sh
│   │   ├── trainer/
│   │   │   └── trainer.py
│   │   └── utils.py
│   ├── visualize.py
│   └── visuals/
│       ├── GunPoint/
│       │   ├── classifier_NonInvasiveFetalECGThorax1_linear.pt
│       │   ├── direct_dilated_classifier.pt
│       │   ├── direct_dilated_encoder.pt
│       │   ├── direct_fcn_classifier.pt
│       │   ├── direct_fcn_encoder.pt
│       │   ├── encoder_NonInvasiveFetalECGThorax1_linear.pt
│       │   ├── supervised_classifier_ElectricDevices_linear.pt
│       │   ├── supervised_classifier_UWaveGestureLibraryX_linear.pt
│       │   ├── supervised_encoder_ElectricDevices_linear.pt
│       │   ├── supervised_encoder_UWaveGestureLibraryX_linear.pt
│       │   ├── unsupervised_classifier_UWaveGestureLibraryX_linear.pt
│       │   └── unsupervised_encoder_UWaveGestureLibraryX_linear.pt
│       ├── MixedShapesSmallTrain/
│       │   ├── direct_fcn_linear_encoder_weights.pt
│       │   ├── fcn_linear_encoder_finetune_weights_ElectricDevices.pt
│       │   └── fcn_linear_encoder_finetune_weights_UWaveGestureLibraryZ.pt
│       └── Wine/
│           ├── direct_fcn_encoder.pt
│           ├── direct_fcn_linear_encoder_weights.pt
│           ├── encoder_Crop_linear.pt
│           ├── encoder_NonInvasiveFetalECGThorax1_linear.pt
│           └── encoder_UWaveGestureLibraryZ_linear.pt
└── ts_forecasting_methods/
    ├── CoST/
    │   ├── CODEOWNERS
    │   ├── CODE_OF_CONDUCT.md
    │   ├── LICENSE.txt
    │   ├── README.md
    │   ├── SECURITY.md
    │   ├── cost.py
    │   ├── datasets/
    │   │   ├── PLACE_DATASETS_HERE
    │   │   ├── electricity.py
    │   │   └── m5.py
    │   ├── datautils.py
    │   ├── models/
    │   │   ├── __init__.py
    │   │   ├── dilated_conv.py
    │   │   └── encoder.py
    │   ├── requirements.txt
    │   ├── scripts/
    │   │   ├── ETT_CoST.sh
    │   │   ├── Electricity_CoST.sh
    │   │   ├── M5_CoST.sh
    │   │   └── Weather_CoST.sh
    │   ├── tasks/
    │   │   ├── __init__.py
    │   │   ├── _eval_protocols.py
    │   │   └── forecasting.py
    │   ├── train.py
    │   └── utils.py
    ├── Other_baselines/
    │   ├── README.md
    │   ├── __init__.py
    │   ├── data_config.yml
    │   ├── data_provider/
    │   │   ├── __init__.py
    │   │   ├── data_factory.py
    │   │   ├── data_factory_tempo.py
    │   │   ├── data_loader.py
    │   │   ├── data_loader_tempo.py
    │   │   ├── m4.py
    │   │   └── uea.py
    │   ├── exp/
    │   │   ├── __init__.py
    │   │   ├── exp_basic.py
    │   │   ├── exp_basic_patch.py
    │   │   ├── exp_long_term_forecasting.py
    │   │   ├── exp_main.py
    │   │   └── exp_short_term_forecasting.py
    │   ├── layers/
    │   │   ├── AutoCorrelation.py
    │   │   ├── Autoformer_EncDec.py
    │   │   ├── Conv_Blocks.py
    │   │   ├── Embed.py
    │   │   ├── PatchTST_backbone.py
    │   │   ├── PatchTST_layers.py
    │   │   ├── RevIN.py
    │   │   ├── SelfAttention_Family.py
    │   │   ├── Transformer_EncDec.py
    │   │   └── __init__.py
    │   ├── models/
    │   │   ├── Autoformer.py
    │   │   ├── DLinear.py
    │   │   ├── GPT4TS.py
    │   │   ├── Informer.py
    │   │   ├── LogTrans.py
    │   │   ├── PatchTST.py
    │   │   ├── PatchTST_raw.py
    │   │   ├── TCN.py
    │   │   ├── TEMPO.py
    │   │   ├── TimesNet.py
    │   │   ├── __init__.py
    │   │   └── iTransformer.py
    │   ├── train_autoformer.py
    │   ├── train_cost.py
    │   ├── train_dlinear.py
    │   ├── train_gpt4ts.py
    │   ├── train_informer.py
    │   ├── train_itransformer.py
    │   ├── train_logtrans.py
    │   ├── train_patchtst.py
    │   ├── train_tcn.py
    │   ├── train_tempo.py
    │   ├── train_timesnet.py
    │   ├── train_ts2vec.py
    │   └── utils/
    │       ├── ADFtest.py
    │       ├── __init__.py
    │       ├── augmentation.py
    │       ├── dtw.py
    │       ├── dtw_metric.py
    │       ├── losses.py
    │       ├── m4_summary.py
    │       ├── masking.py
    │       ├── metrics.py
    │       ├── print_args.py
    │       ├── rev_in.py
    │       ├── timefeatures.py
    │       ├── tools.py
    │       └── tools_tempo.py
    ├── README.md
    ├── SupervisedBaselines/
    │   ├── Dockerfile
    │   ├── LICENSE
    │   ├── Makefile
    │   ├── README.md
    │   ├── data_provider/
    │   │   ├── __init__.py
    │   │   ├── data_factory.py
    │   │   └── data_loader.py
    │   ├── environment.yml
    │   ├── exp/
    │   │   ├── __init__.py
    │   │   ├── exp_basic.py
    │   │   ├── exp_informer.py
    │   │   └── exp_main.py
    │   ├── layers/
    │   │   ├── AutoCorrelation.py
    │   │   ├── Autoformer_EncDec.py
    │   │   ├── Embed.py
    │   │   ├── SelfAttention_Family.py
    │   │   ├── Transformer_EncDec.py
    │   │   └── __init__.py
    │   ├── requirements.txt
    │   ├── run.py
    │   └── utils/
    │       ├── __init__.py
    │       ├── download_data.py
    │       ├── masking.py
    │       ├── metrics.py
    │       ├── timefeatures.py
    │       └── tools.py
    └── ts2vec/
        ├── README.md
        ├── __init__.py
        ├── data_provider/
        │   ├── __init__.py
        │   ├── data_factory.py
        │   ├── data_loader.py
        │   ├── m4.py
        │   ├── metrics.py
        │   ├── tools.py
        │   └── uea.py
        ├── datautils.py
        ├── forecasting_datasets_load_test.py
        ├── models/
        │   ├── __init__.py
        │   ├── dilated_conv.py
        │   ├── encoder.py
        │   └── losses.py
        ├── requirements.txt
        ├── scripts/
        │   ├── electricity.sh
        │   ├── ett.sh
        │   ├── kpi.sh
        │   ├── ucr.sh
        │   ├── uea.sh
        │   └── yahoo.sh
        ├── tasks/
        │   ├── __init__.py
        │   ├── _eval_protocols.py
        │   ├── anomaly_detection.py
        │   ├── classification.py
        │   └── forecasting.py
        ├── train.py
        ├── ts2vec.py
        └── utils.py
Download .txt
Showing preview only (200K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2458 symbols across 249 files)

FILE: ts_anomaly_detection_methods/anomaly_transformer/ATmodelbatch.py
  class AnomalyAttention (line 17) | class AnomalyAttention(nn.Module):
    method __init__ (line 18) | def __init__(self, N, d_model):
    method forward (line 31) | def forward(self, x):
    method initialize (line 39) | def initialize(self, x):
    method gaussian_kernel (line 46) | def gaussian_kernel(mean, sigma):
    method prior_association (line 50) | def prior_association(self):
    method series_association (line 65) | def series_association(self):
    method reconstruction (line 71) | def reconstruction(self):
  class AnomalyTransformerBlock (line 74) | class AnomalyTransformerBlock(nn.Module):
    method __init__ (line 75) | def __init__(self, N, d_model):
    method forward (line 84) | def forward(self, x):
  class AnomalyTransformer (line 96) | class AnomalyTransformer(nn.Module):
    method __init__ (line 97) | def __init__(self,batch_size, N, in_channel, d_model, layers, lambda_):
    method to_string (line 114) | def to_string(self):
    method forward (line 117) | def forward(self, x):
    method rowwise_kl (line 159) | def rowwise_kl(self, row, Pl, Sl, eps=1e-4):
    method layer_association_discrepancy (line 169) | def layer_association_discrepancy(self, Pl, Sl, x):
    method association_discrepancy (line 175) | def association_discrepancy(self, P_list, S_list, x):
    method loss_function (line 186) | def loss_function(self, x_hat, P_list, S_list, lambda_, x):
    method min_loss (line 196) | def min_loss(self, x):
    method max_loss (line 204) | def max_loss(self, x):
    method anomaly_score_whole (line 211) | def anomaly_score_whole(self, x):
    method anomaly_score (line 229) | def anomaly_score(self, x):

FILE: ts_anomaly_detection_methods/anomaly_transformer/datautils.py
  function load_UCR (line 12) | def load_UCR(dataset):
  function load_anomaly (line 78) | def load_anomaly(name):
  function gen_ano_train_data (line 85) | def gen_ano_train_data(all_train_data):

FILE: ts_anomaly_detection_methods/anomaly_transformer/models/anomaly_transformer_model.py
  class AnomalyAttention (line 7) | class AnomalyAttention(nn.Module):
    method __init__ (line 8) | def __init__(self, N, d_model):
    method forward (line 26) | def forward(self, x):
    method initialize (line 36) | def initialize(self, x):
    method gaussian_kernel (line 43) | def gaussian_kernel(mean, sigma):
    method prior_association (line 47) | def prior_association(self):
    method series_association (line 56) | def series_association(self):
    method reconstruction (line 59) | def reconstruction(self):
  class AnomalyTransformerBlock (line 63) | class AnomalyTransformerBlock(nn.Module):
    method __init__ (line 64) | def __init__(self, N, d_model):
    method forward (line 73) | def forward(self, x):
  class AnomalyTransformer (line 85) | class AnomalyTransformer(nn.Module):
    method __init__ (line 86) | def __init__(self, N, in_channel, d_model, layers, lambda_):
    method forward (line 102) | def forward(self, x):
    method layer_association_discrepancy (line 113) | def layer_association_discrepancy(self, Pl, Sl, x):
    method association_discrepancy (line 122) | def association_discrepancy(self, P_list, S_list, x):
    method loss_function (line 131) | def loss_function(self, x_hat, P_list, S_list, lambda_, x):
    method min_loss (line 138) | def min_loss(self, x):
    method max_loss (line 144) | def max_loss(self, x):
    method anomaly_score (line 150) | def anomaly_score(self, x):

FILE: ts_anomaly_detection_methods/anomaly_transformer/models/dilated_conv.py
  class SamePadConv (line 6) | class SamePadConv(nn.Module):
    method __init__ (line 7) | def __init__(self, in_channels, out_channels, kernel_size, dilation=1,...
    method forward (line 19) | def forward(self, x):
  class ConvBlock (line 25) | class ConvBlock(nn.Module):
    method __init__ (line 26) | def __init__(self, in_channels, out_channels, kernel_size, dilation, f...
    method forward (line 32) | def forward(self, x):
  class DilatedConvEncoder (line 40) | class DilatedConvEncoder(nn.Module):
    method __init__ (line 41) | def __init__(self, in_channels, channels, kernel_size):
    method forward (line 54) | def forward(self, x):

FILE: ts_anomaly_detection_methods/anomaly_transformer/models/encoder.py
  function generate_continuous_mask (line 7) | def generate_continuous_mask(B, T, n=5, l=0.1):
  function generate_binomial_mask (line 23) | def generate_binomial_mask(B, T, p=0.5):
  class TSEncoder (line 26) | class TSEncoder(nn.Module):
    method __init__ (line 27) | def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, ...
    method forward (line 41) | def forward(self, x, mask=None):  # x: B x T x input_dims

FILE: ts_anomaly_detection_methods/anomaly_transformer/models/losses.py
  function hierarchical_contrastive_loss (line 5) | def hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):
  function instance_contrastive_loss (line 23) | def instance_contrastive_loss(z1, z2):
  function temporal_contrastive_loss (line 38) | def temporal_contrastive_loss(z1, z2):

FILE: ts_anomaly_detection_methods/anomaly_transformer/tasks/anomaly_detection.py
  function get_range_proba (line 8) | def get_range_proba(predict, label, delay=7):
  function reconstruct_label (line 34) | def reconstruct_label(timestamp, label):
  function eval_ad_result (line 52) | def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_lis...
  function np_shift (line 71) | def np_shift(arr, num, fill_value=np.nan):
  function eval_anomaly_detection (line 84) | def eval_anomaly_detection(model, all_train_data, all_train_labels, all_...
  function eval_anomaly_detection_coldstart (line 162) | def eval_anomaly_detection_coldstart(model, all_train_data, all_train_la...

FILE: ts_anomaly_detection_methods/anomaly_transformer/train.py
  function save_checkpoint_callback (line 14) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/anomaly_transformer/trainATbatch.py
  class Config (line 27) | class Config:
  function train (line 50) | def train(config,model,all_train_data, all_train_labels, all_train_times...
  function evaluate (line 101) | def evaluate(config,cur_epoch,model,all_train_data, all_train_labels, al...
  function main (line 156) | def main(config):

FILE: ts_anomaly_detection_methods/anomaly_transformer/ts2vec.py
  class TS2Vec (line 11) | class TS2Vec:
    method __init__ (line 14) | def __init__(
    method fit (line 61) | def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method _eval_with_pooling (line 167) | def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_wind...
    method encode (line 211) | def encode(self, data, mask=None, encoding_window=None, casual=False, ...
    method save (line 308) | def save(self, fn):
    method load (line 316) | def load(self, fn):

FILE: ts_anomaly_detection_methods/anomaly_transformer/utils.py
  function pkl_save (line 8) | def pkl_save(name, var):
  function pkl_load (line 12) | def pkl_load(name):
  function split_N_pad (line 16) | def split_N_pad(series,window_size):
  function data_slice (line 32) | def data_slice(data,window_size):
  function torch_pad_nan (line 45) | def torch_pad_nan(arr, left=0, right=0, dim=0):
  function pad_nan_to_target (line 56) | def pad_nan_to_target(array, target_length, axis=0, both_side=False):
  function pad_zero_to_target (line 68) | def pad_zero_to_target(array, target_length, axis=0, both_side=False):
  function split_with_nan (line 80) | def split_with_nan(x, sections, axis=0):
  function take_per_row (line 88) | def take_per_row(A, indx, num_elem):
  function centerize_vary_length_series (line 92) | def centerize_vary_length_series(x):
  function data_dropout (line 101) | def data_dropout(arr, p):
  function name_with_datetime (line 114) | def name_with_datetime(prefix='default'):
  function init_dl_program (line 118) | def init_dl_program(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/AT_solver.py
  class UniLoader_train (line 29) | class UniLoader_train(object):
    method __init__ (line 30) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 38) | def __len__(self):
    method __getitem__ (line 46) | def __getitem__(self, index):
  class UniLoader_test (line 52) | class UniLoader_test(object):
    method __init__ (line 53) | def __init__(self, data_set, label_set, win_size, step, mode="train"):
    method __len__ (line 62) | def __len__(self):
    method __getitem__ (line 70) | def __getitem__(self, index):
  function split_N_pad (line 76) | def split_N_pad(series,window_size):
  function mkdir (line 91) | def mkdir(directory):
  function my_kl_loss (line 96) | def my_kl_loss(p, q):
  function adjust_learning_rate (line 101) | def adjust_learning_rate(optimizer, epoch, lr_):
  class EarlyStopping (line 110) | class EarlyStopping:
    method __init__ (line 111) | def __init__(self, patience=7, verbose=False, dataset_name='', delta=0):
    method __call__ (line 123) | def __call__(self, val_loss, val_loss2, model, path):
    method save_checkpoint (line 141) | def save_checkpoint(self, val_loss, val_loss2, model, path):
  class Solver (line 149) | class Solver(object):
    method __init__ (line 152) | def __init__(self, config, train_set, train_loader, val_set, val_loade...
    method build_model (line 165) | def build_model(self):
    method vali (line 172) | def vali(self, vali_loader):
    method train (line 206) | def train(self):
    method test (line 283) | def test(self, ucr_index=None):
    method train_uni (line 519) | def train_uni(self):
    method test_uni (line 599) | def test_uni(self, all_train_data, all_test_data, all_test_labels, all...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/ATmodelbatch.py
  class AnomalyAttention (line 18) | class AnomalyAttention(nn.Module):
    method __init__ (line 19) | def __init__(self, N, d_model):
    method forward (line 32) | def forward(self, x):
    method initialize (line 40) | def initialize(self, x):
    method gaussian_kernel (line 47) | def gaussian_kernel(mean, sigma):
    method prior_association (line 51) | def prior_association(self):
    method series_association (line 66) | def series_association(self):
    method reconstruction (line 72) | def reconstruction(self):
  class AnomalyTransformerBlock (line 76) | class AnomalyTransformerBlock(nn.Module):
    method __init__ (line 77) | def __init__(self, N, d_model):
    method forward (line 86) | def forward(self, x):
  class AnomalyTransformer (line 99) | class AnomalyTransformer(nn.Module):
    method __init__ (line 100) | def __init__(self, batch_size, N, in_channel, d_model, layers, lambda_):
    method to_string (line 118) | def to_string(self):
    method forward (line 121) | def forward(self, x):
    method rowwise_kl (line 163) | def rowwise_kl(self, row, Pl, Sl, eps=1e-4):
    method layer_association_discrepancy (line 174) | def layer_association_discrepancy(self, Pl, Sl, x):
    method association_discrepancy (line 180) | def association_discrepancy(self, P_list, S_list, x):
    method loss_function (line 191) | def loss_function(self, x_hat, P_list, S_list, lambda_, x):
    method min_loss (line 201) | def min_loss(self, x):
    method max_loss (line 209) | def max_loss(self, x):
    method anomaly_score_whole (line 216) | def anomaly_score_whole(self, x):
    method anomaly_score (line 232) | def anomaly_score(self, x):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/dataset_read_test.py
  function get_range_proba (line 6) | def get_range_proba(predict, label, delay=7):
  function reconstruct_label (line 32) | def reconstruct_label(timestamp, label):
  function eval_ad_result (line 50) | def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_lis...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/datautils.py
  function load_UCR (line 12) | def load_UCR(dataset):
  function load_anomaly (line 78) | def load_anomaly(name):
  function gen_ano_train_data (line 84) | def gen_ano_train_data(all_train_data):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/dcdetector_solver.py
  class UniLoader_train (line 19) | class UniLoader_train(object):
    method __init__ (line 20) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 28) | def __len__(self):
    method __getitem__ (line 36) | def __getitem__(self, index):
  class UniLoader_test (line 42) | class UniLoader_test(object):
    method __init__ (line 43) | def __init__(self, data_set, label_set, win_size, step, mode="train"):
    method __len__ (line 52) | def __len__(self):
    method __getitem__ (line 60) | def __getitem__(self, index):
  function my_kl_loss (line 68) | def my_kl_loss(p, q):
  function adjust_learning_rate (line 73) | def adjust_learning_rate(optimizer, epoch, lr_):
  class EarlyStopping (line 81) | class EarlyStopping:
    method __init__ (line 82) | def __init__(self, patience=7, verbose=False, dataset_name='', delta=0...
    method __call__ (line 95) | def __call__(self, val_loss, val_loss2, model, path):
    method save_checkpoint (line 112) | def save_checkpoint(self, val_loss, val_loss2, model, path):
  class Solver (line 119) | class Solver(object):
    method __init__ (line 122) | def __init__(self, config, multi=True):
    method build_model (line 150) | def build_model(self):
    method vali (line 160) | def vali(self, vali_loader):
    method train (line 192) | def train(self):
    method test (line 263) | def test(self, ucr_index=None):
    method vali_uni (line 481) | def vali_uni(self, vali_loader):
    method train_uni (line 513) | def train_uni(self):
    method test_uni (line 584) | def test_uni(self, all_train_data, all_test_data, all_test_labels, all...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/donut.py
  function adjustment (line 18) | def adjustment(gt, pred):
  class DONUT (line 42) | class DONUT:
    method __init__ (line 44) | def __init__(
    method train (line 76) | def train(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method anomaly_score (line 168) | def anomaly_score(self, model, test_data, is_multi=False):
    method evaluate (line 225) | def evaluate(self, model, all_train_data, all_train_labels, all_train_...
    method save (line 353) | def save(self, fn):
    method load (line 361) | def load(self, fn):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/exp_anomaly_detection.py
  class UniLoader_train (line 27) | class UniLoader_train(object):
    method __init__ (line 28) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 36) | def __len__(self):
    method __getitem__ (line 44) | def __getitem__(self, index):
  class UniLoader_test (line 50) | class UniLoader_test(object):
    method __init__ (line 51) | def __init__(self, data_set, label_set, win_size, step, mode="train"):
    method __len__ (line 60) | def __len__(self):
    method __getitem__ (line 68) | def __getitem__(self, index):
  function adjustment (line 76) | def adjustment(gt, pred):
  function adjust_learning_rate (line 100) | def adjust_learning_rate(optimizer, epoch, args):
  class EarlyStopping (line 118) | class EarlyStopping:
    method __init__ (line 119) | def __init__(self, patience=7, verbose=False, delta=0):
    method __call__ (line 128) | def __call__(self, val_loss, model, path):
    method save_checkpoint (line 143) | def save_checkpoint(self, val_loss, model, path):
  class Exp_Basic (line 149) | class Exp_Basic(object):
    method __init__ (line 150) | def __init__(self, args):
    method _build_model (line 159) | def _build_model(self):
    method _acquire_device (line 163) | def _acquire_device(self):
    method _get_data (line 174) | def _get_data(self):
    method vali (line 177) | def vali(self):
    method train (line 180) | def train(self):
    method test (line 183) | def test(self):
  class Exp_Anomaly_Detection (line 187) | class Exp_Anomaly_Detection(Exp_Basic):
    method __init__ (line 188) | def __init__(self, args, train_set, train_loader, val_set, val_loader,...
    method _build_model (line 197) | def _build_model(self):
    method _get_data (line 204) | def _get_data(self, flag):
    method _select_optimizer (line 217) | def _select_optimizer(self):
    method _select_criterion (line 221) | def _select_criterion(self):
    method vali (line 225) | def vali(self, vali_data, vali_loader, criterion):
    method vali_uni (line 245) | def vali_uni(self, vali_data, vali_loader, criterion):
    method train (line 265) | def train(self, setting):
    method train_uni (line 333) | def train_uni(self, setting):
    method test (line 403) | def test(self, setting, test=0, dataset=None, ucr_index=None):
    method test_uni (line 551) | def test_uni(self, setting, all_train_data, all_test_data, all_test_la...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/lstm_vae.py
  function adjustment (line 18) | def adjustment(gt, pred):
  class LSTM_VAE (line 42) | class LSTM_VAE:
    method __init__ (line 44) | def __init__(
    method train (line 76) | def train(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method anomaly_score (line 168) | def anomaly_score(self, model, test_data, is_multi=False):
    method evaluate (line 188) | def evaluate(self, model, all_train_data, all_train_labels, all_train_...
    method save (line 315) | def save(self, fn):
    method load (line 323) | def load(self, fn):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/AUC.py
  function extend_postive_range (line 8) | def extend_postive_range(x, window=16):
  function extend_postive_range_individual (line 28) | def extend_postive_range_individual(x, percentage=0.2):
  function TPR_FPR_RangeAUC (line 48) | def TPR_FPR_RangeAUC(labels, pred, P, L):
  function Range_AUC (line 84) | def Range_AUC(score_t_test, y_test,  window=5, percentage=0, plot_ROC=Fa...
  function point_wise_AUC (line 134) | def point_wise_AUC(score_t_test, y_test,  plot_ROC=False):
  function main (line 149) | def main():

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/Matthews_correlation_coefficient.py
  function MCC (line 5) | def MCC(y_test, pred_labels):
  function main (line 12) | def main():

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_affiliation_zone.py
  function t_start (line 5) | def t_start(j, Js = [(1,2),(3,4),(5,6)], Trange = (1,10)):
  function t_stop (line 22) | def t_stop(j, Js = [(1,2),(3,4),(5,6)], Trange = (1,10)):
  function E_gt_func (line 38) | def E_gt_func(j, Js, Trange):
  function get_all_E_gt_func (line 53) | def get_all_E_gt_func(Js, Trange):
  function affiliation_partition (line 66) | def affiliation_partition(Is = [(1,1.5),(2,5),(5,6),(8,9)], E_gt = [(1,2...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_integral_interval.py
  function interval_length (line 14) | def interval_length(J = (1,2)):
  function sum_interval_lengths (line 25) | def sum_interval_lengths(Is = [(1,2),(3,4),(5,6)]):
  function interval_intersection (line 34) | def interval_intersection(I = (1, 3), J = (2, 4)):
  function interval_subset (line 54) | def interval_subset(I = (1, 3), J = (0, 6)):
  function cut_into_three_func (line 67) | def cut_into_three_func(I, J):
  function get_pivot_j (line 104) | def get_pivot_j(I, J):
  function integral_mini_interval (line 125) | def integral_mini_interval(I, J):
  function integral_interval_distance (line 144) | def integral_interval_distance(I, J):
  function integral_mini_interval_P_CDFmethod__min_piece (line 177) | def integral_mini_interval_P_CDFmethod__min_piece(I, J, E):
  function integral_mini_interval_Pprecision_CDFmethod (line 213) | def integral_mini_interval_Pprecision_CDFmethod(I, J, E):
  function integral_interval_probaCDF_precision (line 244) | def integral_interval_probaCDF_precision(I, J, E):
  function cut_J_based_on_mean_func (line 281) | def cut_J_based_on_mean_func(J, e_mean):
  function integral_mini_interval_Precall_CDFmethod (line 306) | def integral_mini_interval_Precall_CDFmethod(I, J, E):
  function integral_interval_probaCDF_recall (line 422) | def integral_interval_probaCDF_recall(I, J, E):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_single_ground_truth_event.py
  function affiliation_precision_distance (line 14) | def affiliation_precision_distance(Is = [(1,2),(3,4),(5,6)], J = (2,5.5)):
  function affiliation_precision_proba (line 26) | def affiliation_precision_proba(Is = [(1,2),(3,4),(5,6)], J = (2,5.5), E...
  function affiliation_recall_distance (line 39) | def affiliation_recall_distance(Is = [(1,2),(3,4),(5,6)], J = (2,5.5)):
  function affiliation_recall_proba (line 54) | def affiliation_recall_proba(Is = [(1,2),(3,4),(5,6)], J = (2,5.5), E = ...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/generics.py
  function convert_vector_to_events (line 10) | def convert_vector_to_events(vector = [0, 1, 1, 0, 0, 1, 0]):
  function infer_Trange (line 34) | def infer_Trange(events_pred, events_gt):
  function has_point_anomalies (line 59) | def has_point_anomalies(events):
  function _sum_wo_nan (line 71) | def _sum_wo_nan(vec):
  function _len_wo_nan (line 81) | def _len_wo_nan(vec):
  function read_gz_data (line 91) | def read_gz_data(filename = 'data/machinetemp_groundtruth.gz'):
  function read_all_as_events (line 104) | def read_all_as_events():
  function f1_func (line 129) | def f1_func(p, r):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/metrics.py
  function test_events (line 18) | def test_events(events):
  function pr_from_events (line 35) | def pr_from_events(events_pred, events_gt, Trange):
  function produce_all_results (line 98) | def produce_all_results():

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/combine_all_scores.py
  function combine_all_evaluation_scores (line 14) | def combine_all_evaluation_scores(y_test, pred_labels, anomaly_scores):
  function main (line 55) | def main():

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/customizable_f1_score.py
  function b (line 7) | def b(bias, i, length):
  function w (line 21) | def w(AnomalyRange, p):
  function Cardinality_factor (line 36) | def Cardinality_factor(Anomolyrange, Prange):
  function existence_reward (line 55) | def existence_reward(labels, preds):
  function range_recall_new (line 68) | def range_recall_new(labels, preds, alpha):
  function customizable_f1_score (line 88) | def customizable_f1_score(y_test, pred_labels,  alpha=0.2):
  function main (line 101) | def main():

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/evaluate_utils.py
  function get_composite_fscore_from_scores (line 6) | def get_composite_fscore_from_scores(score_t_test, thres, true_events, p...
  class NptConfig (line 19) | class NptConfig:
    method __init__ (line 20) | def __init__(self, config_dict):
  function find_length (line 24) | def find_length(data):
  function range_convers_new (line 42) | def range_convers_new(label):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/evaluator.py
  function evaluate (line 20) | def evaluate(saved_model_root, logger, thres_methods=["top_k_time", "bes...
  function analyse_from_pkls (line 317) | def analyse_from_pkls(results_root:str, thres_methods=["best_f1_test"], ...
  function repredict_from_saved_model (line 427) | def repredict_from_saved_model(model_root, algo_class, entity, logger):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/f1_score_f1_pa.py
  function get_point_adjust_scores (line 7) | def get_point_adjust_scores(y_test, pred_labels, true_events, thereshold...
  function get_adjust_F1PA (line 27) | def get_adjust_F1PA(pred, gt):
  function get_prec_rec_fscore (line 59) | def get_prec_rec_fscore(tp, fp, fn):
  function get_f_score (line 70) | def get_f_score(prec, rec):
  function get_accuracy_precision_recall_fscore (line 79) | def get_accuracy_precision_recall_fscore(y_true: list, y_pred: list):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/f1_series.py
  function threshold_and_predict (line 19) | def threshold_and_predict(score_t_test, y_test, true_events, logger, tes...
  function evaluate_predicted_labels (line 92) | def evaluate_predicted_labels(pred_labels, y_test, true_events, logger, ...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/fc_score.py
  function get_events (line 5) | def get_events(y_test, outlier=1, normal=0):
  function get_composite_fscore_raw (line 27) | def get_composite_fscore_raw(y_test, pred_labels,  true_events, return_p...
  function main (line 40) | def main():

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/metrics.py
  function combine_all_evaluation_scores (line 13) | def combine_all_evaluation_scores(y_test, pred_labels, anomaly_scores):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/precision_at_k.py
  function precision_at_k (line 6) | def precision_at_k(y_test, score_t_test, pred_labels):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/analysis/robustness_eval.py
  function generate_new_label (line 26) | def generate_new_label(label,lag):
  function compute_anomaly_acc_lag (line 34) | def compute_anomaly_acc_lag(methods_scores,label,slidingWindow,methods_k...
  function compute_anomaly_acc_percentage (line 87) | def compute_anomaly_acc_percentage(methods_scores,label,slidingWindow,me...
  function compute_anomaly_acc_noise (line 150) | def compute_anomaly_acc_noise(methods_scores,label,slidingWindow,methods...
  function compute_anomaly_acc_pairwise (line 208) | def compute_anomaly_acc_pairwise(methods_scores,label,slidingWindow,meth...
  function normalize_dict_exp (line 270) | def normalize_dict_exp(methods_acc_lag,methods_keys):
  function group_dict (line 296) | def group_dict(methods_acc_lag,methods_keys):
  function generate_curve (line 322) | def generate_curve(label,score,slidingWindow):
  function box_plot (line 334) | def box_plot(data, edge_color, fill_color):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/analysis/score_computation.py
  function find_section_length (line 35) | def find_section_length(label,length):
  function generate_data (line 67) | def generate_data(filepath,init_pos,max_length):
  function compute_score (line 92) | def compute_score(methods,slidingWindow,data,X_data,data_train,data_test...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/metrics.py
  function get_range_vus_roc (line 5) | def get_range_vus_roc(score, labels, slidingWindow):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/models/distance.py
  class Euclidean (line 20) | class Euclidean:
    method __init__ (line 40) | def __init__(self, power = 1, neighborhood = 100, window = 20, norm = ...
    method measure (line 48) | def measure(self, X, Y, index):
    method set_param (line 100) | def set_param(self):
  class Mahalanobis (line 111) | class Mahalanobis:
    method __init__ (line 129) | def __init__(self, probability = False):
    method set_param (line 135) | def set_param(self):
    method norm_pdf_multivariate (line 154) | def norm_pdf_multivariate(self, x):
    method normpdf (line 175) | def normpdf(self, x):
    method measure (line 185) | def measure(self, X, Y, index):
  class Garch (line 226) | class Garch:
    method __init__ (line 245) | def __init__(self, p = 1, q = 1, mean = 'zero', vol = 'garch'):
    method set_param (line 252) | def set_param(self):
    method measure (line 273) | def measure(self, X, Y, index):
  class SSA_DISTANCE (line 302) | class SSA_DISTANCE:
    method __init__ (line 320) | def __init__(self, method ='linear', e = 1):
    method Linearization (line 324) | def Linearization(self, X2):
    method set_param (line 363) | def set_param(self):
    method measure (line 371) | def measure(self, X2, X3, start_index):
  class Fourier (line 428) | class Fourier:
    method __init__ (line 444) | def __init__(self, power = 2):
    method set_param (line 447) | def set_param(self):
    method measure (line 455) | def measure(self, X2, X3, start_index):
  class DTW (line 483) | class DTW:
    method __init__ (line 500) | def __init__(self, method = 'L2'):
    method set_param (line 510) | def set_param(self):
    method measure (line 518) | def measure(self, X1, X2, start_index):
  class EDRS (line 589) | class EDRS:
    method __init__ (line 611) | def __init__(self, method = 'L1', ep = False, vol = False):
    method set_param (line 621) | def set_param(self):
    method measure (line 647) | def measure(self, X1, X2, start_index):
  class TWED (line 733) | class TWED:
    method __init__ (line 754) | def __init__(self, gamma = 0.1, v = 0.1):
    method set_param (line 759) | def set_param(self):
    method measure (line 763) | def measure(self, A, B, start_index):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/models/feature.py
  class Window (line 43) | class Window:
    method __init__ (line 49) | def __init__(self,  window = 100):
    method convert (line 52) | def convert(self, X):
  class tf_Stat (line 65) | class tf_Stat:
    method __init__ (line 74) | def __init__(self,  window = 100, step = 25):
    method convert (line 78) | def convert(self, X):
  class Stat (line 108) | class Stat:
    method __init__ (line 114) | def __init__(self,  window = 100, data_step = 10, param = [{"coeff": 0...
    method convert (line 125) | def convert(self, X):
    method ar_coefficient (line 186) | def ar_coefficient(self, x):
    method autocorrelation (line 241) | def autocorrelation(self, x):
    method _into_subchunks (line 283) | def _into_subchunks(self, x, subchunk_length, every_n=1):
    method sample_entropy (line 307) | def sample_entropy(self, x):
    method hurst_f (line 357) | def hurst_f(self, x):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/utils/metrics.py
  class metricor (line 6) | class metricor:
    method __init__ (line 7) | def __init__(self, a = 1, probability = True, bias = 'flat', ):
    method detect_model (line 12) | def detect_model(self, model, label, contamination = 0.1, window = 100...
    method labels_conv (line 28) | def labels_conv(self, preds):
    method labels_conv_binary (line 36) | def labels_conv_binary(self, preds):
    method w (line 45) | def w(self, AnomalyRange, p):
    method Cardinality_factor (line 57) | def Cardinality_factor(self, Anomolyrange, Prange):
    method b (line 75) | def b(self, i, length):
    method scale_threshold (line 90) | def scale_threshold(self, score, score_mu, score_sigma):
    method metric_new (line 94) | def metric_new(self, label, score, plot_ROC=False, alpha=0.2,coeff=3):
    method metric_PR (line 161) | def metric_PR(self, label, score):
    method range_recall_new (line 170) | def range_recall_new(self, labels, preds, alpha):
    method range_convers_new (line 193) | def range_convers_new(self, label):
    method existence_reward (line 225) | def existence_reward(self, labels, preds):
    method num_nonzero_segments (line 237) | def num_nonzero_segments(self, x):
    method extend_postive_range (line 246) | def extend_postive_range(self, x, window=5):
    method extend_postive_range_individual (line 264) | def extend_postive_range_individual(self, x, percentage=0.2):
    method TPR_FPR_RangeAUC (line 283) | def TPR_FPR_RangeAUC(self, labels, pred, P, L):
    method RangeAUC (line 322) | def RangeAUC(self, labels, score, window=0, percentage=0, plot_ROC=Fal...
    method RangeAUC_volume (line 371) | def RangeAUC_volume(self, labels_original, score, windowSize):
  function generate_curve (line 431) | def generate_curve(label,score,slidingWindow):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/utils/slidingWindows.py
  function find_length (line 8) | def find_length(data):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/AnomalyTransformer.py
  class PositionalEmbedding (line 12) | class PositionalEmbedding(nn.Module):
    method __init__ (line 13) | def __init__(self, d_model, max_len=5000):
    method forward (line 28) | def forward(self, x):
  class TokenEmbedding (line 32) | class TokenEmbedding(nn.Module):
    method __init__ (line 33) | def __init__(self, c_in, d_model):
    method forward (line 42) | def forward(self, x):
  class DataEmbedding (line 47) | class DataEmbedding(nn.Module):
    method __init__ (line 48) | def __init__(self, c_in, d_model, dropout=0.0):
    method forward (line 56) | def forward(self, x):
  class TriangularCausalMask (line 62) | class TriangularCausalMask():
    method __init__ (line 63) | def __init__(self, B, L, device="cpu"):
    method mask (line 69) | def mask(self):
  class AnomalyAttention (line 73) | class AnomalyAttention(nn.Module):
    method __init__ (line 74) | def __init__(self, win_size, mask_flag=True, scale=None, attention_dro...
    method forward (line 87) | def forward(self, queries, keys, values, sigma, attn_mask):
  class AttentionLayer (line 116) | class AttentionLayer(nn.Module):
    method __init__ (line 117) | def __init__(self, attention, d_model, n_heads, d_keys=None,
    method forward (line 137) | def forward(self, queries, keys, values, attn_mask):
  class EncoderLayer (line 160) | class EncoderLayer(nn.Module):
    method __init__ (line 161) | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activat...
    method forward (line 172) | def forward(self, x, attn_mask=None):
  class Encoder (line 185) | class Encoder(nn.Module):
    method __init__ (line 186) | def __init__(self, attn_layers, norm_layer=None):
    method forward (line 191) | def forward(self, x, attn_mask=None):
  class AnomalyTransformer (line 208) | class AnomalyTransformer(nn.Module):
    method __init__ (line 209) | def __init__(self, win_size, enc_in, c_out, d_model=512, n_heads=8, e_...
    method forward (line 235) | def forward(self, x):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/DCdetector.py
  class DAC_structure (line 21) | class DAC_structure(nn.Module):
    method __init__ (line 22) | def __init__(self, win_size, patch_size, channel, mask_flag=True, scal...
    method forward (line 33) | def forward(self, queries_patch_size, queries_patch_num, keys_patch_si...
  class AttentionLayer (line 66) | class AttentionLayer(nn.Module):
    method __init__ (line 67) | def __init__(self, attention, d_model, patch_size, channel, n_heads, w...
    method forward (line 84) | def forward(self, x_patch_size, x_patch_num, x_ori, patch_index, attn_...
  class PositionalEmbedding (line 112) | class PositionalEmbedding(nn.Module):
    method __init__ (line 113) | def __init__(self, d_model, max_len=5000):
    method forward (line 128) | def forward(self, x):
  class TokenEmbedding (line 132) | class TokenEmbedding(nn.Module):
    method __init__ (line 133) | def __init__(self, c_in, d_model):
    method forward (line 142) | def forward(self, x):
  class DataEmbedding (line 147) | class DataEmbedding(nn.Module):
    method __init__ (line 148) | def __init__(self, c_in, d_model, dropout=0.05):
    method forward (line 156) | def forward(self, x):
  class RevIN (line 161) | class RevIN(nn.Module):
    method __init__ (line 162) | def __init__(self, num_features: int, eps=1e-5, affine=True):
    method forward (line 175) | def forward(self, x, mode: str):
    method _init_params (line 185) | def _init_params(self):
    method _get_statistics (line 193) | def _get_statistics(self, x):
    method _normalize (line 198) | def _normalize(self, x):
    method _denormalize (line 206) | def _denormalize(self, x):
  class Encoder (line 215) | class Encoder(nn.Module):
    method __init__ (line 216) | def __init__(self, attn_layers, norm_layer=None):
    method forward (line 221) | def forward(self, x_patch_size, x_patch_num, x_ori, patch_index, attn_...
  class DCdetector (line 231) | class DCdetector(nn.Module):
    method __init__ (line 232) | def __init__(self, win_size, enc_in, c_out, n_heads=1, d_model=256, e_...
    method forward (line 262) | def forward(self, x):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/GPT4TS.py
  class PositionalEmbedding (line 11) | class PositionalEmbedding(nn.Module):
    method __init__ (line 12) | def __init__(self, d_model, max_len=5000):
    method forward (line 28) | def forward(self, x):
  class TokenEmbedding (line 32) | class TokenEmbedding(nn.Module):
    method __init__ (line 33) | def __init__(self, c_in, d_model):
    method forward (line 43) | def forward(self, x):
  class FixedEmbedding (line 48) | class FixedEmbedding(nn.Module):
    method __init__ (line 49) | def __init__(self, c_in, d_model):
    method forward (line 65) | def forward(self, x):
  class TemporalEmbedding (line 69) | class TemporalEmbedding(nn.Module):
    method __init__ (line 70) | def __init__(self, d_model, embed_type='fixed', freq='h'):
    method forward (line 87) | def forward(self, x):
  class TimeFeatureEmbedding (line 99) | class TimeFeatureEmbedding(nn.Module):
    method __init__ (line 100) | def __init__(self, d_model, embed_type='timeF', freq='h'):
    method forward (line 108) | def forward(self, x):
  class DataEmbedding (line 112) | class DataEmbedding(nn.Module):
    method __init__ (line 113) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 123) | def forward(self, x, x_mark):
  class DataEmbedding_wo_pos (line 132) | class DataEmbedding_wo_pos(nn.Module):
    method __init__ (line 133) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 143) | def forward(self, x, x_mark):
  class Model (line 150) | class Model(nn.Module):
    method __init__ (line 152) | def __init__(self, configs):
    method forward (line 217) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
    method imputation (line 233) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method forecast (line 261) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method anomaly_detection (line 302) | def anomaly_detection(self, x_enc):
    method classification (line 350) | def classification(self, x_enc, x_mark_enc):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/TimesNet.py
  class Inception_Block_V1 (line 8) | class Inception_Block_V1(nn.Module):
    method __init__ (line 9) | def __init__(self, in_channels, out_channels, num_kernels=6, init_weig...
    method _initialize_weights (line 21) | def _initialize_weights(self):
    method forward (line 28) | def forward(self, x):
  class PositionalEmbedding (line 36) | class PositionalEmbedding(nn.Module):
    method __init__ (line 37) | def __init__(self, d_model, max_len=5000):
    method forward (line 53) | def forward(self, x):
  class TokenEmbedding (line 57) | class TokenEmbedding(nn.Module):
    method __init__ (line 58) | def __init__(self, c_in, d_model):
    method forward (line 68) | def forward(self, x):
  class FixedEmbedding (line 73) | class FixedEmbedding(nn.Module):
    method __init__ (line 74) | def __init__(self, c_in, d_model):
    method forward (line 90) | def forward(self, x):
  class TemporalEmbedding (line 94) | class TemporalEmbedding(nn.Module):
    method __init__ (line 95) | def __init__(self, d_model, embed_type='fixed', freq='h'):
    method forward (line 112) | def forward(self, x):
  class TimeFeatureEmbedding (line 124) | class TimeFeatureEmbedding(nn.Module):
    method __init__ (line 125) | def __init__(self, d_model, embed_type='timeF', freq='h'):
    method forward (line 133) | def forward(self, x):
  class DataEmbedding (line 137) | class DataEmbedding(nn.Module):
    method __init__ (line 138) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 148) | def forward(self, x, x_mark):
  function FFT_for_Period (line 158) | def FFT_for_Period(x, k=2):
  class TimesBlock (line 170) | class TimesBlock(nn.Module):
    method __init__ (line 171) | def __init__(self, configs):
    method forward (line 185) | def forward(self, x):
  class Model (line 220) | class Model(nn.Module):
    method __init__ (line 225) | def __init__(self, configs):
    method forecast (line 252) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 279) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 307) | def anomaly_detection(self, x_enc):
    method classification (line 332) | def classification(self, x_enc, x_mark_enc):
    method forward (line 350) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/dilated_conv.py
  class SamePadConv (line 6) | class SamePadConv(nn.Module):
    method __init__ (line 7) | def __init__(self, in_channels, out_channels, kernel_size, dilation=1,...
    method forward (line 19) | def forward(self, x):
  class ConvBlock (line 25) | class ConvBlock(nn.Module):
    method __init__ (line 26) | def __init__(self, in_channels, out_channels, kernel_size, dilation, f...
    method forward (line 32) | def forward(self, x):
  class DilatedConvEncoder (line 40) | class DilatedConvEncoder(nn.Module):
    method __init__ (line 41) | def __init__(self, in_channels, channels, kernel_size):
    method forward (line 54) | def forward(self, x):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/donut_model.py
  class VariationalNet (line 6) | class VariationalNet(nn.Module):
    method __init__ (line 11) | def __init__(self, in_channel, latent_dim=100, hidden_dim=3):
    method forward (line 30) | def forward(self, inputs):
  class GenerativeNet (line 45) | class GenerativeNet(nn.Module):
    method __init__ (line 50) | def __init__(self, in_channel, latent_dim=100, hidden_dim=3):
    method forward (line 69) | def forward(self, z):
  class DONUT_Model (line 84) | class DONUT_Model(nn.Module):
    method __init__ (line 86) | def __init__(self, in_channel, latent_dim=100, hidden_dim=3):
    method reparameterize (line 97) | def reparameterize(self, mu, logvar):
    method forward (line 109) | def forward(self, inputs):
    method loss_function (line 126) | def loss_function(self, inputs, outputs, z_mu, z_log_var, x_mu, x_log_...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/encoder.py
  function generate_continuous_mask (line 7) | def generate_continuous_mask(B, T, n=5, l=0.1):
  function generate_binomial_mask (line 23) | def generate_binomial_mask(B, T, p=0.5):
  class TSEncoder (line 26) | class TSEncoder(nn.Module):
    method __init__ (line 27) | def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, ...
    method forward (line 41) | def forward(self, x, mask=None):  # x: B x T x input_dims

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/losses.py
  function hierarchical_contrastive_loss (line 5) | def hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):
  function instance_contrastive_loss (line 23) | def instance_contrastive_loss(z1, z2):
  function temporal_contrastive_loss (line 38) | def temporal_contrastive_loss(z1, z2):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/models/lstm_vae_model.py
  class LSTM_Encoder (line 6) | class LSTM_Encoder(nn.Module):
    method __init__ (line 11) | def __init__(self, device, in_channel, hidden_size=16, hidden_dim=3):
    method forward (line 26) | def forward(self, inputs):
  class LSTM_Decoder (line 46) | class LSTM_Decoder(nn.Module):
    method __init__ (line 51) | def __init__(self, device, in_channel, hidden_size=16, hidden_dim=3):
    method forward (line 66) | def forward(self, z):
  class LSTM_VAE_Model (line 86) | class LSTM_VAE_Model(nn.Module):
    method __init__ (line 88) | def __init__(self, device, in_channel, hidden_size=16, hidden_dim=3):
    method reparameterize (line 100) | def reparameterize(self, mu, logvar):
    method forward (line 112) | def forward(self, inputs):
    method loss_function (line 129) | def loss_function(self, inputs, outputs, z_mu, z_log_var, x_mu, x_log_...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/spot.py
  class SPOT (line 27) | class SPOT:
    method __init__ (line 58) | def __init__(self, q = 1e-4):
    method __str__ (line 80) | def __str__(self):
    method fit (line 109) | def fit(self,init_data,data):
    method add (line 150) | def add(self,data):
    method initialize (line 172) | def initialize(self, level = 0.98, verbose = True):
    method _rootsFinder (line 215) | def _rootsFinder(fun,jac,bounds,npoints,method):
    method _log_likelihood (line 272) | def _log_likelihood(Y,gamma,sigma):
    method _grimshaw (line 299) | def _grimshaw(self,epsilon = 1e-8, n_points = 10):
    method _quantile (line 382) | def _quantile(self,gamma,sigma):
    method run (line 405) | def run(self, with_alarm = True):
    method plot (line 472) | def plot(self,run_results,with_alarm = True):
  function backMean (line 517) | def backMean(X,d):
  class dSPOT (line 528) | class dSPOT:
    method __init__ (line 561) | def __init__(self, q, depth):
    method __str__ (line 572) | def __str__(self):
    method fit (line 602) | def fit(self,init_data,data):
    method add (line 642) | def add(self,data):
    method initialize (line 664) | def initialize(self, verbose = True):
    method _rootsFinder (line 706) | def _rootsFinder(fun,jac,bounds,npoints,method):
    method _log_likelihood (line 754) | def _log_likelihood(Y,gamma,sigma):
    method _grimshaw (line 781) | def _grimshaw(self,epsilon = 1e-8, n_points = 10):
    method _quantile (line 864) | def _quantile(self,gamma,sigma):
    method run (line 887) | def run(self, with_alarm = True):
    method plot (line 960) | def plot(self,run_results, with_alarm = True):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/tasks/anomaly_detection.py
  function get_range_proba (line 17) | def get_range_proba(predict, label, delay=7):
  function get_range_proba (line 43) | def get_range_proba(predict, label, delay=7):
  function reconstruct_label (line 69) | def reconstruct_label(timestamp, label):
  function eval_ad_result (line 87) | def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_lis...
  function np_shift (line 158) | def np_shift(arr, num, fill_value=np.nan):
  function adjustment (line 171) | def adjustment(gt, pred):
  function eval_anomaly_detection (line 195) | def eval_anomaly_detection(model, all_train_data, all_train_labels, all_...
  function eval_anomaly_detection_coldstart (line 426) | def eval_anomaly_detection_coldstart(model, all_train_data, all_train_la...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train.py
  function save_checkpoint_callback (line 13) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/trainATbatch.py
  function get_range_proba (line 24) | def get_range_proba(predict, label, delay=7):
  class Config (line 56) | class Config:
  function train (line 78) | def train(config, model, all_train_data, all_train_labels, all_train_tim...
  function np_shift (line 132) | def np_shift(arr, num, fill_value=np.nan):
  function reconstruct_label (line 146) | def reconstruct_label(timestamp, label):
  function eval_ad_result (line 164) | def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_lis...
  function evaluate (line 183) | def evaluate(config, cur_epoch, model, all_train_data, all_train_labels,...
  function main (line 243) | def main(config):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_at_multi.py
  function str2bool (line 22) | def str2bool(v):
  function main (line 26) | def main(config, train_set, train_loader, val_set, val_loader, test_set,...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_at_uni.py
  class UniLoader (line 24) | class UniLoader(object):
    method __init__ (line 25) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 33) | def __len__(self):
    method __getitem__ (line 41) | def __getitem__(self, index):
  function str2bool (line 47) | def str2bool(v):
  function main (line 51) | def main(config, train_set, train_loader, val_set, val_loader, test_set,...

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_dcdetector.py
  function to_var (line 23) | def to_var(x, volatile=False):
  function mkdir (line 29) | def mkdir(directory):
  class Logger (line 35) | class Logger(object):
    method __init__ (line 36) | def __init__(self, filename='default.log', add_flag=True, stream=sys.s...
    method write (line 41) | def write(self, message):
    method flush (line 51) | def flush(self):
  function str2bool (line 55) | def str2bool(v):
  function find_nearest (line 59) | def find_nearest(array, value):
  function main (line 65) | def main(config):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_dcdetector_nui.py
  class UniLoader (line 31) | class UniLoader(object):
    method __init__ (line 32) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 40) | def __len__(self):
    method __getitem__ (line 48) | def __getitem__(self, index):
  function to_var (line 55) | def to_var(x, volatile=False):
  function mkdir (line 61) | def mkdir(directory):
  class Logger (line 67) | class Logger(object):
    method __init__ (line 68) | def __init__(self, filename='default.log', add_flag=True, stream=sys.s...
    method write (line 73) | def write(self, message):
    method flush (line 83) | def flush(self):
  function str2bool (line 87) | def str2bool(v):
  function find_nearest (line 91) | def find_nearest(array, value):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_donut.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_donut_multi.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_dspot.py
  function adjustment (line 25) | def adjustment(gt, pred):
  function get_range_proba (line 49) | def get_range_proba(predict, label, delay=7):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_dspot_multi.py
  function adjustment (line 30) | def adjustment(gt, pred):
  function get_range_proba (line 54) | def get_range_proba(predict, label, delay=7):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_gpt4ts_uni.py
  class UniLoader (line 20) | class UniLoader(object):
    method __init__ (line 21) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 29) | def __len__(self):
    method __getitem__ (line 37) | def __getitem__(self, index):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_lstm_vae.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_lstm_vae_multi.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_spot.py
  function adjustment (line 25) | def adjustment(gt, pred):
  function get_range_proba (line 49) | def get_range_proba(predict, label, delay=7):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_spot_multi.py
  function adjustment (line 25) | def adjustment(gt, pred):
  function get_range_proba (line 49) | def get_range_proba(predict, label, delay=7):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_timesnet_uni.py
  class UniLoader (line 19) | class UniLoader(object):
    method __init__ (line 20) | def __init__(self, data_set, win_size, step, mode="train"):
    method __len__ (line 28) | def __len__(self):
    method __getitem__ (line 36) | def __getitem__(self, index):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_ts2vec.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/train_ts2vec_multi.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/ts2vec.py
  class TS2Vec (line 10) | class TS2Vec:
    method __init__ (line 13) | def __init__(
    method fit (line 60) | def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method _eval_with_pooling (line 167) | def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_wind...
    method encode (line 211) | def encode(self, data, mask=None, encoding_window=None, casual=False, ...
    method save (line 310) | def save(self, fn):
    method load (line 318) | def load(self, fn):

FILE: ts_anomaly_detection_methods/other_anomaly_baselines/utils.py
  function pkl_save (line 8) | def pkl_save(name, var):
  function pkl_load (line 12) | def pkl_load(name):
  function torch_pad_nan (line 16) | def torch_pad_nan(arr, left=0, right=0, dim=0):
  function pad_nan_to_target (line 30) | def pad_nan_to_target(array, target_length, axis=0, both_side=False):
  function split_with_nan (line 44) | def split_with_nan(x, sections, axis=0):
  function take_per_row (line 52) | def take_per_row(A, indx, num_elem):
  function centerize_vary_length_series (line 56) | def centerize_vary_length_series(x):
  function data_dropout (line 65) | def data_dropout(arr, p):
  function name_with_datetime (line 78) | def name_with_datetime(prefix='default'):
  function init_dl_program (line 82) | def init_dl_program(
  function split_N_pad (line 134) | def split_N_pad(series,window_size):
  function data_slice (line 150) | def data_slice(data,window_size):

FILE: ts_classification_methods/data/dataloader.py
  class UCRDataset (line 6) | class UCRDataset(data.Dataset):
    method __init__ (line 7) | def __init__(self, dataset, target):
    method __getitem__ (line 14) | def __getitem__(self, index):
    method __len__ (line 17) | def __len__(self):
  class UEADataset (line 21) | class UEADataset(data.Dataset):
    method __init__ (line 22) | def __init__(self, dataset, target):
    method __getitem__ (line 26) | def __getitem__(self, index):
    method __len__ (line 29) | def __len__(self):

FILE: ts_classification_methods/data/preprocessing.py
  function load_data (line 10) | def load_data(dataroot, dataset):
  function load_UEA (line 29) | def load_UEA(dataroot, dataset):
  function transfer_labels (line 60) | def transfer_labels(labels):
  function k_fold (line 71) | def k_fold(data, target):
  function normalize_per_series (line 101) | def normalize_per_series(data):
  function normalize_train_val_test (line 107) | def normalize_train_val_test(train_set, val_set, test_set):
  function normalize_uea_set (line 113) | def normalize_uea_set(data_set):
  function fill_nan_value (line 120) | def fill_nan_value(train_set, val_set, test_set):

FILE: ts_classification_methods/gpt4ts/gpt4ts_utils.py
  function build_dataset (line 14) | def build_dataset(args):
  function load_data (line 21) | def load_data(dataroot, dataset):
  function normalize_per_series (line 38) | def normalize_per_series(data):
  function load_UEA (line 45) | def load_UEA(dataroot, dataset):
  function transfer_labels (line 76) | def transfer_labels(labels):
  function k_fold (line 87) | def k_fold(data_set, target):
  function normalize_uea_set (line 117) | def normalize_uea_set(data_set):
  function fill_nan_value (line 124) | def fill_nan_value(train_set, val_set, test_set):
  class UEADataset (line 139) | class UEADataset(data.Dataset):
    method __init__ (line 140) | def __init__(self, dataset, target):
    method __getitem__ (line 144) | def __getitem__(self, index):
    method __len__ (line 147) | def __len__(self):
  function save_cls_new_result (line 151) | def save_cls_new_result(args, mean_accu, max_acc, min_acc, std_acc, trai...
  function set_seed (line 169) | def set_seed(args):
  function get_all_datasets (line 177) | def get_all_datasets(data_set, target):
  function cross_entropy (line 182) | def cross_entropy():
  function reconstruction_loss (line 187) | def reconstruction_loss():
  function build_loss (line 192) | def build_loss(args):

FILE: ts_classification_methods/gpt4ts/main_gpt4ts.py
  function evaluate_gpt4ts (line 22) | def evaluate_gpt4ts(val_loader, model, loss):

FILE: ts_classification_methods/gpt4ts/main_gpt4ts_ucr.py
  function evaluate_gpt4ts (line 22) | def evaluate_gpt4ts(val_loader, model, loss):

FILE: ts_classification_methods/gpt4ts/models/embed.py
  class PositionalEmbedding (line 6) | class PositionalEmbedding(nn.Module):
    method __init__ (line 7) | def __init__(self, d_model, max_len=25000):
    method forward (line 23) | def forward(self, x):
  class TokenEmbedding (line 27) | class TokenEmbedding(nn.Module):
    method __init__ (line 28) | def __init__(self, c_in, d_model):
    method forward (line 38) | def forward(self, x):
  class FixedEmbedding (line 43) | class FixedEmbedding(nn.Module):
    method __init__ (line 44) | def __init__(self, c_in, d_model):
    method forward (line 60) | def forward(self, x):
  class TemporalEmbedding (line 64) | class TemporalEmbedding(nn.Module):
    method __init__ (line 65) | def __init__(self, d_model, embed_type='fixed', freq='h'):
    method forward (line 82) | def forward(self, x):
  class TimeFeatureEmbedding (line 94) | class TimeFeatureEmbedding(nn.Module):
    method __init__ (line 95) | def __init__(self, d_model, embed_type='timeF', freq='h'):
    method forward (line 103) | def forward(self, x):
  class DataEmbedding (line 107) | class DataEmbedding(nn.Module):
    method __init__ (line 108) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 118) | def forward(self, x, x_mark):
  class DataEmbedding_wo_pos (line 127) | class DataEmbedding_wo_pos(nn.Module):
    method __init__ (line 128) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 138) | def forward(self, x, x_mark):
  class PatchEmbedding (line 146) | class PatchEmbedding(nn.Module):
    method __init__ (line 147) | def __init__(self, d_model, patch_len, stride, dropout):
    method forward (line 163) | def forward(self, x):
  class DataEmbedding_wo_time (line 173) | class DataEmbedding_wo_time(nn.Module):
    method __init__ (line 174) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 181) | def forward(self, x):

FILE: ts_classification_methods/gpt4ts/models/gpt4ts.py
  class gpt4ts (line 10) | class gpt4ts(nn.Module):
    method __init__ (line 12) | def __init__(self, max_seq_len, num_classes, var_len, d_model=768, pat...
    method forward (line 52) | def forward(self, x_enc, x_mark_enc=None):

FILE: ts_classification_methods/gpt4ts/models/loss.py
  function get_loss_module (line 6) | def get_loss_module(config):
  function l2_reg_loss (line 23) | def l2_reg_loss(model):
  class NoFussCrossEntropyLoss (line 31) | class NoFussCrossEntropyLoss(nn.CrossEntropyLoss):
    method forward (line 37) | def forward(self, inp, target):
  class MaskedMSELoss (line 42) | class MaskedMSELoss(nn.Module):
    method __init__ (line 46) | def __init__(self, reduction: str = 'mean'):
    method forward (line 53) | def forward(self,

FILE: ts_classification_methods/model/loss.py
  function cross_entropy (line 4) | def cross_entropy():
  function reconstruction_loss (line 9) | def reconstruction_loss():

FILE: ts_classification_methods/model/tsm_model.py
  class Chomp1d (line 7) | class Chomp1d(nn.Module):
    method __init__ (line 8) | def __init__(self, chomp_size):
    method forward (line 12) | def forward(self, x):
  class SqueezeChannels (line 16) | class SqueezeChannels(nn.Module):
    method __init__ (line 17) | def __init__(self):
    method forward (line 20) | def forward(self, x):
  class FCN (line 24) | class FCN(nn.Module):
    method __init__ (line 25) | def __init__(self, num_classes, input_size=1):
    method forward (line 59) | def forward(self, x, vis=False):
  class DilatedBlock (line 69) | class DilatedBlock(nn.Module):
    method __init__ (line 71) | def __init__(self, in_channels, out_channels, kernel_size, dilation, f...
    method forward (line 97) | def forward(self, x):
  class DilatedConvolution (line 109) | class DilatedConvolution(nn.Module):
    method __init__ (line 110) | def __init__(self, in_channels, embedding_channels, out_channels, dept...
    method forward (line 136) | def forward(self, x, vis=False):
  class DilatedConvolutionVis (line 143) | class DilatedConvolutionVis(nn.Module):
    method __init__ (line 144) | def __init__(self, in_channels, embedding_channels, out_channels, dept...
    method forward (line 170) | def forward(self, x, vis=False):
  class Classifier (line 177) | class Classifier(nn.Module):
    method __init__ (line 178) | def __init__(self, input_dims, output_dims) -> None:
    method forward (line 184) | def forward(self, x):
  class NonLinearClassifier (line 188) | class NonLinearClassifier(nn.Module):
    method __init__ (line 189) | def __init__(self, input_dim, embedding_dim, output_dim, dropout=0.2) ...
    method forward (line 201) | def forward(self, x):
  class NonLinearClassifierVis (line 205) | class NonLinearClassifierVis(nn.Module):
    method __init__ (line 206) | def __init__(self, input_dim, embedding_dim, output_dim, dropout=0.2) ...
    method forward (line 224) | def forward(self, x, vis=False):
  class RNNDecoder (line 236) | class RNNDecoder(nn.Module):
    method __init__ (line 237) | def __init__(self, input_dim=1, embedding_dim=128) -> None:
    method forward (line 250) | def forward(self, h1, h2, h3, x):
  function conv_out_len (line 260) | def conv_out_len(seq_len, ker_size, stride, dilation, stack):
  class FCNDecoder (line 269) | class FCNDecoder(nn.Module):
    method __init__ (line 272) | def __init__(self, num_classes, seq_len=None, input_size=None):
    method forward (line 310) | def forward(self, x):

FILE: ts_classification_methods/patchtst/main_patchtst_iota.py
  function create_patch (line 25) | def create_patch(xb, patch_len, stride):
  function evaluate_gpt4ts (line 39) | def evaluate_gpt4ts(args, val_loader, model, loss):

FILE: ts_classification_methods/patchtst/main_patchtst_ucr.py
  function create_patch (line 25) | def create_patch(xb, patch_len, stride):
  function evaluate_gpt4ts (line 39) | def evaluate_gpt4ts(args, val_loader, model, loss):

FILE: ts_classification_methods/patchtst/mian_patchtst.py
  function create_patch (line 25) | def create_patch(xb, patch_len, stride):
  function evaluate_gpt4ts (line 39) | def evaluate_gpt4ts(args, val_loader, model, loss):

FILE: ts_classification_methods/patchtst/models/attention.py
  class MultiheadAttention (line 8) | class MultiheadAttention(nn.Module):
    method __init__ (line 9) | def __init__(self, d_model, n_heads, d_k=None, d_v=None, res_attention...
    method forward (line 35) | def forward(self, Q: Tensor, K: Optional[Tensor] = None, V: Optional[T...
  class ScaledDotProductAttention (line 68) | class ScaledDotProductAttention(nn.Module):
    method __init__ (line 73) | def __init__(self, d_model, n_heads, attn_dropout=0., res_attention=Fa...
    method forward (line 81) | def forward(self, q: Tensor, k: Tensor, v: Tensor, prev: Optional[Tens...

FILE: ts_classification_methods/patchtst/models/basics.py
  class Transpose (line 7) | class Transpose(nn.Module):
    method __init__ (line 8) | def __init__(self, *dims, contiguous=False):
    method forward (line 12) | def forward(self, x):
  class SigmoidRange (line 19) | class SigmoidRange(nn.Module):
    method __init__ (line 20) | def __init__(self, low, high):
    method forward (line 25) | def forward(self, x):
  class LinBnDrop (line 30) | class LinBnDrop(nn.Sequential):
    method __init__ (line 33) | def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=Fal...
  function sigmoid_range (line 42) | def sigmoid_range(x, low, high):
  function get_activation_fn (line 47) | def get_activation_fn(activation):

FILE: ts_classification_methods/patchtst/models/heads.py
  class LinearRegressionHead (line 5) | class LinearRegressionHead(nn.Module):
    method __init__ (line 6) | def __init__(self, n_vars, d_model, output_dim, head_dropout, y_range=...
    method forward (line 13) | def forward(self, x):
  class LinearClassificationHead (line 26) | class LinearClassificationHead(nn.Module):
    method __init__ (line 27) | def __init__(self, n_vars, d_model, n_classes, head_dropout):
    method forward (line 33) | def forward(self, x):
  class LinearPredictionHead (line 45) | class LinearPredictionHead(nn.Module):
    method __init__ (line 46) | def __init__(self, individual, n_vars, d_model, num_patch, forecast_le...
    method forward (line 67) | def forward(self, x):
  class LinearPretrainHead (line 87) | class LinearPretrainHead(nn.Module):
    method __init__ (line 88) | def __init__(self, d_model, patch_len, dropout):
    method forward (line 93) | def forward(self, x):

FILE: ts_classification_methods/patchtst/models/patchTST.py
  class PatchTST (line 9) | class PatchTST(nn.Module):
    method __init__ (line 18) | def __init__(self, c_in: int, target_dim: int, patch_len: int, stride:...
    method forward (line 52) | def forward(self, z):
  class RegressionHead (line 68) | class RegressionHead(nn.Module):
    method __init__ (line 69) | def __init__(self, n_vars, d_model, output_dim, head_dropout, y_range=...
    method forward (line 76) | def forward(self, x):
  class ClassificationHead (line 89) | class ClassificationHead(nn.Module):
    method __init__ (line 90) | def __init__(self, n_vars, d_model, n_classes, head_dropout):
    method forward (line 96) | def forward(self, x):
  class PredictionHead (line 111) | class PredictionHead(nn.Module):
    method __init__ (line 112) | def __init__(self, individual, n_vars, d_model, num_patch, forecast_le...
    method forward (line 133) | def forward(self, x):
  class PretrainHead (line 153) | class PretrainHead(nn.Module):
    method __init__ (line 154) | def __init__(self, d_model, patch_len, dropout):
    method forward (line 159) | def forward(self, x):
  class PatchTSTEncoder (line 171) | class PatchTSTEncoder(nn.Module):
    method __init__ (line 172) | def __init__(self, c_in, num_patch, patch_len,
    method forward (line 203) | def forward(self, x) -> Tensor:
  class TSTEncoder (line 233) | class TSTEncoder(nn.Module):
    method __init__ (line 234) | def __init__(self, d_model, n_heads, d_ff=None,
    method forward (line 246) | def forward(self, src: Tensor):
  class TSTEncoderLayer (line 260) | class TSTEncoderLayer(nn.Module):
    method __init__ (line 261) | def __init__(self, d_model, n_heads, d_ff=256, store_attn=False,
    method forward (line 297) | def forward(self, src: Tensor, prev: Optional[Tensor] = None):

FILE: ts_classification_methods/patchtst/models/pos_encoding.py
  function PositionalEncoding (line 10) | def PositionalEncoding(q_len, d_model, normalize=True):
  function positional_encoding (line 24) | def positional_encoding(pe, learn_pe, q_len, d_model):

FILE: ts_classification_methods/patchtst/models/revin.py
  class RevIN (line 4) | class RevIN(nn.Module):
    method __init__ (line 5) | def __init__(self, num_features: int, eps=1e-5, affine=True):
    method forward (line 18) | def forward(self, x, mode:str):
    method _init_params (line 27) | def _init_params(self):
    method _get_statistics (line 32) | def _get_statistics(self, x):
    method _normalize (line 37) | def _normalize(self, x):
    method _denormalize (line 45) | def _denormalize(self, x):

FILE: ts_classification_methods/patchtst/patch_mask.py
  class GetAttr (line 9) | class GetAttr:
    method _component_attr_filter (line 13) | def _component_attr_filter(self, k):
    method _dir (line 18) | def _dir(self):
    method __getattr__ (line 21) | def __getattr__(self, k):
    method __dir__ (line 27) | def __dir__(self):
    method __setstate__ (line 31) | def __setstate__(self, data):
  function get_device (line 35) | def get_device(use_cuda=True, device_id=None, usage=5):
  function set_device (line 47) | def set_device(usage=5):
  function default_device (line 53) | def default_device(use_cuda=True):
  function get_available_cuda (line 60) | def get_available_cuda(usage=10):
  function to_device (line 69) | def to_device(b, device=None, non_blocking=False):
  function to_numpy (line 86) | def to_numpy(b):
  class Callback (line 99) | class Callback(GetAttr):
  class SetupLearnerCB (line 103) | class SetupLearnerCB(Callback):
    method __init__ (line 104) | def __init__(self):
    method before_batch_train (line 107) | def before_batch_train(self):
    method before_batch_valid (line 110) | def before_batch_valid(self):
    method before_batch_predict (line 113) | def before_batch_predict(self):
    method before_batch_test (line 116) | def before_batch_test(self):
    method _to_device (line 119) | def _to_device(self):
    method before_fit (line 127) | def before_fit(self):
  class GetPredictionsCB (line 133) | class GetPredictionsCB(Callback):
    method __init__ (line 134) | def __init__(self):
    method before_predict (line 137) | def before_predict(self):
    method after_batch_predict (line 140) | def after_batch_predict(self):
    method after_predict (line 144) | def after_predict(self):
  class GetTestCB (line 148) | class GetTestCB(Callback):
    method __init__ (line 149) | def __init__(self):
    method before_test (line 152) | def before_test(self):
    method after_batch_test (line 155) | def after_batch_test(self):
    method after_test (line 160) | def after_test(self):
  class PatchCB (line 166) | class PatchCB(Callback):
    method __init__ (line 168) | def __init__(self, patch_len, stride):
    method before_forward (line 178) | def before_forward(self): self.set_patch()
    method set_patch (line 180) | def set_patch(self):
  class PatchMaskCB (line 189) | class PatchMaskCB(Callback):
    method __init__ (line 190) | def __init__(self, patch_len, stride, mask_ratio,
    method before_fit (line 203) | def before_fit(self):
    method before_forward (line 208) | def before_forward(self): self.patch_masking()
    method patch_masking (line 210) | def patch_masking(self):
    method _loss (line 222) | def _loss(self, preds, target):
  function create_patch (line 233) | def create_patch(xb, patch_len, stride):
  class Patch (line 247) | class Patch(nn.Module):
    method __init__ (line 248) | def __init__(self, seq_len, patch_len, stride):
    method forward (line 257) | def forward(self, x):
  function random_masking (line 266) | def random_masking(xb, mask_ratio):
  function random_masking_3D (line 301) | def random_masking_3D(xb, mask_ratio):

FILE: ts_classification_methods/selftime_cls/dataloader/TSC_data_loader.py
  function set_nan_to_zero (line 6) | def set_nan_to_zero(a):
  function TSC_data_loader (line 12) | def TSC_data_loader(dataset_path,dataset_name):

FILE: ts_classification_methods/selftime_cls/dataloader/ucr2018.py
  class UCR2018 (line 17) | class UCR2018(data.Dataset):
    method __init__ (line 19) | def __init__(self, data, targets, transform):
    method __getitem__ (line 24) | def __getitem__(self, index):
    method __len__ (line 33) | def __len__(self):
  class MultiUCR2018_Intra (line 37) | class MultiUCR2018_Intra(data.Dataset):
    method __init__ (line 39) | def __init__(self, data, targets, K, transform, transform_cut, totenso...
    method __getitem__ (line 47) | def __getitem__(self, index):
    method __len__ (line 65) | def __len__(self):
  class MultiUCR2018_InterIntra (line 69) | class MultiUCR2018_InterIntra(data.Dataset):
    method __init__ (line 71) | def __init__(self, data, targets, K, transform, transform_cut, totenso...
    method __getitem__ (line 79) | def __getitem__(self, index):
    method __len__ (line 99) | def __len__(self):
  class MultiUCR2018 (line 103) | class MultiUCR2018(data.Dataset):
    method __init__ (line 105) | def __init__(self, data, targets, K, transform):
    method __getitem__ (line 111) | def __getitem__(self, index):
    method __len__ (line 124) | def __len__(self):
  function load_ucr2018 (line 128) | def load_ucr2018(dataset_path, dataset_name):

FILE: ts_classification_methods/selftime_cls/dataprepare.py
  function load_data (line 7) | def load_data(dataroot, dataset):
  function transfer_labels (line 29) | def transfer_labels(labels):
  function k_fold (line 39) | def k_fold(data, target):
  function normalize_per_series (line 69) | def normalize_per_series(data):
  function fill_nan_value (line 74) | def fill_nan_value(train_set, val_set, test_set):

FILE: ts_classification_methods/selftime_cls/evaluation/eval_ssl.py
  function evaluation (line 11) | def evaluation(x_train, y_train, x_val, y_val, x_test, y_test, nb_class,...

FILE: ts_classification_methods/selftime_cls/model/model_RelationalReasoning.py
  class RelationalReasoning (line 8) | class RelationalReasoning(torch.nn.Module):
    method __init__ (line 10) | def __init__(self, backbone, feature_size=64):
    method aggregate (line 19) | def aggregate(self, features, K):
    method train (line 50) | def train(self, tot_epochs, train_loader, opt):
  class RelationalReasoning_Intra (line 113) | class RelationalReasoning_Intra(torch.nn.Module):
    method __init__ (line 115) | def __init__(self, backbone, feature_size=64, nb_class=3):
    method run_test (line 127) | def run_test(self, predict, labels):
    method train (line 133) | def train(self, tot_epochs, train_loader, opt):
  class RelationalReasoning_InterIntra (line 204) | class RelationalReasoning_InterIntra(torch.nn.Module):
    method __init__ (line 205) | def __init__(self, backbone, feature_size=64, nb_class=3):
    method aggregate (line 223) | def aggregate(self, features, K):
    method run_test (line 255) | def run_test(self, predict, labels):
    method train (line 261) | def train(self, tot_epochs, train_loader, opt):

FILE: ts_classification_methods/selftime_cls/model/model_backbone.py
  class SimConv4 (line 9) | class SimConv4(torch.nn.Module):
    method __init__ (line 10) | def __init__(self, in_channel=1, feature_size=64):
    method forward (line 60) | def forward(self, x):

FILE: ts_classification_methods/selftime_cls/optim/pretrain.py
  function pretrain_IntraSampleRel (line 10) | def pretrain_IntraSampleRel(x_train, y_train, opt):
  function pretrain_InterSampleRel (line 88) | def pretrain_InterSampleRel(x_train, y_train, opt):
  function pretrain_SelfTime (line 138) | def pretrain_SelfTime(x_train, y_train, opt, in_channel=1):

FILE: ts_classification_methods/selftime_cls/optim/pytorchtools.py
  class EarlyStopping (line 6) | class EarlyStopping:
    method __init__ (line 8) | def __init__(self, patience=50, verbose=False, delta=0, checkpoint_pth...
    method __call__ (line 27) | def __call__(self, val_loss, model):
    method save_checkpoint (line 44) | def save_checkpoint(self, val_loss, model, checkpoint_pth):

FILE: ts_classification_methods/selftime_cls/optim/train.py
  function supervised_train (line 13) | def supervised_train(x_train, y_train, x_val, y_val, x_test, y_test, nb_...

FILE: ts_classification_methods/selftime_cls/train_ssl.py
  function parse_option (line 17) | def parse_option():

FILE: ts_classification_methods/selftime_cls/utils/augmentation.py
  function slidewindow (line 7) | def slidewindow(ts, horizon=.2, stride=0.2):
  function cutout (line 24) | def cutout(ts, perc=.1):
  function cut_piece2C (line 38) | def cut_piece2C(ts, perc=.1):
  function cut_piece3C (line 59) | def cut_piece3C(ts, perc=.1):
  function cut_piece4C (line 83) | def cut_piece4C(ts, perc=.1):
  function cut_piece5C (line 109) | def cut_piece5C(ts, perc=.1):
  function cut_piece6C (line 137) | def cut_piece6C(ts, perc=.1):
  function cut_piece7C (line 167) | def cut_piece7C(ts, perc=.1):
  function cut_piece8C (line 199) | def cut_piece8C(ts, perc=.1):
  function jitter (line 233) | def jitter(x, sigma=0.03):
  function scaling (line 237) | def scaling(x, sigma=0.1):
  function rotation (line 242) | def rotation(x):
  function scaling_s (line 248) | def scaling_s(x, sigma=0.1, plot=False):
  function rotation_s (line 258) | def rotation_s(x, plot=False):
  function rotation2d (line 267) | def rotation2d(x, sigma=0.2):
  function permutation (line 278) | def permutation(x, max_segments=5, seg_mode="equal"):
  function magnitude_warp (line 298) | def magnitude_warp(x, sigma=0.2, knot=4):
  function magnitude_warp_s (line 318) | def magnitude_warp_s(x, sigma=0.2, knot=4, plot=False):
  function time_warp (line 337) | def time_warp(x, sigma=0.2, knot=4):
  function time_warp_s (line 353) | def time_warp_s(x, sigma=0.2, knot=4, plot=False):
  function window_slice (line 372) | def window_slice(x, reduce_ratio=0.9):
  function window_slice_s (line 387) | def window_slice_s(x, reduce_ratio=0.9):
  function window_warp (line 402) | def window_warp(x, window_ratio=0.1, scales=[0.5, 2.]):
  function window_warp_s (line 422) | def window_warp_s(x, window_ratio=0.1, scales=[0.5, 2.]):
  function spawner (line 444) | def spawner(x, labels, sigma=0.05, verbose=0):
  function wdba (line 481) | def wdba(x, labels, batch_size=6, slope_constraint="symmetric", use_wind...
  function random_guided_warp (line 539) | def random_guided_warp(x, labels, slope_constraint="symmetric", use_wind...
  function discriminative_guided_warp (line 573) | def discriminative_guided_warp(x, labels, batch_size=6, slope_constraint...

FILE: ts_classification_methods/selftime_cls/utils/datasets.py
  function nb_dims (line 1) | def nb_dims(dataset):
  function nb_classes (line 6) | def nb_classes(dataset):

FILE: ts_classification_methods/selftime_cls/utils/helper.py
  function plot2d (line 3) | def plot2d(x, y, x2=None, y2=None, x3=None, y3=None, xlim=(-1, 1), ylim=...
  function plot1d (line 21) | def plot1d(x, x2=None, x3=None, ylim=(-1, 1), save_file=""):

FILE: ts_classification_methods/selftime_cls/utils/transforms.py
  class Raw (line 6) | class Raw:
    method __init__ (line 7) | def __init__(self):
    method __call__ (line 10) | def __call__(self, data):
  class CutPiece2C (line 14) | class CutPiece2C:
    method __init__ (line 15) | def __init__(self, sigma):
    method __call__ (line 18) | def __call__(self, data):
    method forward (line 21) | def forward(self, data):
  class CutPiece3C (line 26) | class CutPiece3C:
    method __init__ (line 27) | def __init__(self, sigma):
    method __call__ (line 30) | def __call__(self, data):
    method forward (line 33) | def forward(self, data):
  class CutPiece4C (line 38) | class CutPiece4C:
    method __init__ (line 39) | def __init__(self, sigma):
    method __call__ (line 42) | def __call__(self, data):
    method forward (line 45) | def forward(self, data):
  class CutPiece5C (line 50) | class CutPiece5C:
    method __init__ (line 51) | def __init__(self, sigma):
    method __call__ (line 54) | def __call__(self, data):
    method forward (line 57) | def forward(self, data):
  class CutPiece6C (line 62) | class CutPiece6C:
    method __init__ (line 63) | def __init__(self, sigma):
    method __call__ (line 66) | def __call__(self, data):
    method forward (line 69) | def forward(self, data):
  class CutPiece7C (line 74) | class CutPiece7C:
    method __init__ (line 75) | def __init__(self, sigma):
    method __call__ (line 78) | def __call__(self, data):
    method forward (line 81) | def forward(self, data):
  class CutPiece8C (line 86) | class CutPiece8C:
    method __init__ (line 87) | def __init__(self, sigma):
    method __call__ (line 90) | def __call__(self, data):
    method forward (line 93) | def forward(self, data):
  class Jitter (line 98) | class Jitter:
    method __init__ (line 99) | def __init__(self, sigma, p):
    method __call__ (line 103) | def __call__(self, data):
    method forward (line 110) | def forward(self, data):
  class Scaling (line 115) | class Scaling:
    method __init__ (line 116) | def __init__(self, sigma, p):
    method __call__ (line 120) | def __call__(self, data):
    method forward (line 128) | def forward(self, data):
  class Cutout (line 132) | class Cutout:
    method __init__ (line 133) | def __init__(self, sigma, p):
    method __call__ (line 137) | def __call__(self, data):
    method forward (line 144) | def forward(self, data):
  class MagnitudeWrap (line 148) | class MagnitudeWrap:
    method __init__ (line 149) | def __init__(self, sigma, knot, p):
    method __call__ (line 154) | def __call__(self, data):
    method forward (line 162) | def forward(self, data):
  class TimeWarp (line 166) | class TimeWarp:
    method __init__ (line 167) | def __init__(self, sigma, knot, p):
    method __call__ (line 172) | def __call__(self, data):
    method forward (line 178) | def forward(self, data):
  class WindowSlice (line 182) | class WindowSlice:
    method __init__ (line 183) | def __init__(self, reduce_ratio, p):
    method __call__ (line 187) | def __call__(self, data):
    method forward (line 193) | def forward(self, data):
  class WindowWarp (line 197) | class WindowWarp:
    method __init__ (line 198) | def __init__(self, window_ratio, scales, p):
    method __call__ (line 203) | def __call__(self, data):
    method forward (line 209) | def forward(self, data):
  class ToTensor (line 213) | class ToTensor:
    method __init__ (line 224) | def __init__(self, basic=False):
    method __call__ (line 227) | def __call__(self, img):
    method forward (line 230) | def forward(self, img):
  class Compose (line 245) | class Compose:
    method __init__ (line 246) | def __init__(self, transforms):
    method __call__ (line 249) | def __call__(self, img):
    method forward (line 252) | def forward(self, img):

FILE: ts_classification_methods/selftime_cls/utils/utils.py
  function get_config_from_json (line 5) | def get_config_from_json(json_file):

FILE: ts_classification_methods/selftime_cls/utils/utils_plot.py
  function show_samples (line 7) | def show_samples(X_train, y_train, dataset_name, figname='', num_shown=5):

FILE: ts_classification_methods/timesnet/main_timesnet.py
  function collate_fn (line 23) | def collate_fn(data, device, max_len=None):
  function padding_mask (line 61) | def padding_mask(lengths, max_len=None):
  function evaluate_gpt4ts (line 75) | def evaluate_gpt4ts(args, val_loader, model, loss):

FILE: ts_classification_methods/timesnet/main_timesnet_ucr.py
  function collate_fn (line 23) | def collate_fn(data, device, max_len=None):
  function padding_mask (line 61) | def padding_mask(lengths, max_len=None):
  function evaluate_gpt4ts (line 75) | def evaluate_gpt4ts(args, val_loader, model, loss):

FILE: ts_classification_methods/timesnet/models/Conv_Blocks.py
  class Inception_Block_V1 (line 5) | class Inception_Block_V1(nn.Module):
    method __init__ (line 6) | def __init__(self, in_channels, out_channels, num_kernels=6, init_weig...
    method _initialize_weights (line 18) | def _initialize_weights(self):
    method forward (line 25) | def forward(self, x):
  class Inception_Block_V2 (line 33) | class Inception_Block_V2(nn.Module):
    method __init__ (line 34) | def __init__(self, in_channels, out_channels, num_kernels=6, init_weig...
    method _initialize_weights (line 48) | def _initialize_weights(self):
    method forward (line 55) | def forward(self, x):

FILE: ts_classification_methods/timesnet/models/Embed.py
  class PositionalEmbedding (line 6) | class PositionalEmbedding(nn.Module):
    method __init__ (line 7) | def __init__(self, d_model, max_len=25000):
    method forward (line 23) | def forward(self, x):
  class TokenEmbedding (line 27) | class TokenEmbedding(nn.Module):
    method __init__ (line 28) | def __init__(self, c_in, d_model):
    method forward (line 38) | def forward(self, x):
  class FixedEmbedding (line 43) | class FixedEmbedding(nn.Module):
    method __init__ (line 44) | def __init__(self, c_in, d_model):
    method forward (line 60) | def forward(self, x):
  class TemporalEmbedding (line 64) | class TemporalEmbedding(nn.Module):
    method __init__ (line 65) | def __init__(self, d_model, embed_type='fixed', freq='h'):
    method forward (line 82) | def forward(self, x):
  class TimeFeatureEmbedding (line 94) | class TimeFeatureEmbedding(nn.Module):
    method __init__ (line 95) | def __init__(self, d_model, embed_type='timeF', freq='h'):
    method forward (line 103) | def forward(self, x):
  class DataEmbedding (line 107) | class DataEmbedding(nn.Module):
    method __init__ (line 108) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 118) | def forward(self, x, x_mark):
  class DataEmbedding_inverted (line 127) | class DataEmbedding_inverted(nn.Module):
    method __init__ (line 128) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 133) | def forward(self, x, x_mark):
  class DataEmbedding_wo_pos (line 144) | class DataEmbedding_wo_pos(nn.Module):
    method __init__ (line 145) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 155) | def forward(self, x, x_mark):
  class PatchEmbedding (line 163) | class PatchEmbedding(nn.Module):
    method __init__ (line 164) | def __init__(self, d_model, patch_len, stride, padding, dropout):
    method forward (line 180) | def forward(self, x):

FILE: ts_classification_methods/timesnet/models/SelfAttention_Family.py
  class TriangularCausalMask (line 8) | class TriangularCausalMask():
    method __init__ (line 9) | def __init__(self, B, L, device="cpu"):
    method mask (line 15) | def mask(self):
  class ProbMask (line 19) | class ProbMask():
    method __init__ (line 20) | def __init__(self, B, H, L, index, scores, device="cpu"):
    method mask (line 29) | def mask(self):
  class DSAttention (line 34) | class DSAttention(nn.Module):
    method __init__ (line 37) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method forward (line 44) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class FullAttention (line 72) | class FullAttention(nn.Module):
    method __init__ (line 73) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method forward (line 80) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class ProbAttention (line 102) | class ProbAttention(nn.Module):
    method __init__ (line 103) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method _prob_QK (line 111) | def _prob_QK(self, Q, K, sample_k, n_top):  # n_top: c*ln(L_q)
    method _get_initial_context (line 137) | def _get_initial_context(self, V, L_Q):
    method _update_context (line 150) | def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):
    method forward (line 171) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class AttentionLayer (line 203) | class AttentionLayer(nn.Module):
    method __init__ (line 204) | def __init__(self, attention, d_model, n_heads, d_keys=None,
    method forward (line 218) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class TwoStageAttentionLayer (line 270) | class TwoStageAttentionLayer(nn.Module):
    method __init__ (line 276) | def __init__(self, configs,
    method forward (line 302) | def forward(self, x, attn_mask=None, tau=None, delta=None):

FILE: ts_classification_methods/timesnet/models/TimesNet.py
  function FFT_for_Period (line 9) | def FFT_for_Period(x, k=2):
  class TimesBlock (line 23) | class TimesBlock(nn.Module):
    method __init__ (line 24) | def __init__(self, configs):
    method forward (line 38) | def forward(self, x):
  class Model (line 85) | class Model(nn.Module):
    method __init__ (line 90) | def __init__(self, configs):
    method forecast (line 117) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 144) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 172) | def anomaly_detection(self, x_enc):
    method classification (line 197) | def classification(self, x_enc, x_mark_enc):
    method forward (line 215) | def forward(self, x_enc, x_mark_enc, x_dec=None, x_mark_dec=None, mask...

FILE: ts_classification_methods/timesnet/models/Transformer.py
  class Model (line 10) | class Model(nn.Module):
    method __init__ (line 17) | def __init__(self, configs):
    method forecast (line 74) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 83) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 91) | def anomaly_detection(self, x_enc):
    method classification (line 99) | def classification(self, x_enc, x_mark_enc):
    method forward (line 131) | def forward(self, x_enc, x_mark_enc, x_dec=None, x_mark_dec=None, mask...

FILE: ts_classification_methods/timesnet/models/Transformer_EncDec.py
  class ConvLayer (line 6) | class ConvLayer(nn.Module):
    method __init__ (line 7) | def __init__(self, c_in):
    method forward (line 18) | def forward(self, x):
  class EncoderLayer (line 27) | class EncoderLayer(nn.Module):
    method __init__ (line 28) | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activat...
    method forward (line 39) | def forward(self, x, attn_mask=None, tau=None, delta=None):
  class Encoder (line 54) | class Encoder(nn.Module):
    method __init__ (line 55) | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
    method forward (line 61) | def forward(self, x, attn_mask=None, tau=None, delta=None):
  class DecoderLayer (line 83) | class DecoderLayer(nn.Module):
    method __init__ (line 84) | def __init__(self, self_attention, cross_attention, d_model, d_ff=None,
    method forward (line 98) | def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, de...
  class Decoder (line 119) | class Decoder(nn.Module):
    method __init__ (line 120) | def __init__(self, layers, norm_layer=None, projection=None):
    method forward (line 126) | def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, de...

FILE: ts_classification_methods/tloss_cls/losses/triplet_loss.py
  class TripletLoss (line 23) | class TripletLoss(torch.nn.modules.loss._Loss):
    method __init__ (line 51) | def __init__(self, compared_length, nb_random_samples, negative_penalty):
    method forward (line 59) | def forward(self, batch, encoder, train, save_memory=False):
  class TripletLossVaryingLength (line 160) | class TripletLossVaryingLength(torch.nn.modules.loss._Loss):
    method __init__ (line 189) | def __init__(self, compared_length, nb_random_samples, negative_penalty):
    method forward (line 197) | def forward(self, batch, encoder, train, save_memory=False):

FILE: ts_classification_methods/tloss_cls/networks/causal_cnn.py
  class Chomp1d (line 49) | class Chomp1d(torch.nn.Module):
    method __init__ (line 60) | def __init__(self, chomp_size):
    method forward (line 64) | def forward(self, x):
  class SqueezeChannels (line 68) | class SqueezeChannels(torch.nn.Module):
    method __init__ (line 72) | def __init__(self):
    method forward (line 75) | def forward(self, x):
  class CausalConvolutionBlock (line 79) | class CausalConvolutionBlock(torch.nn.Module):
    method __init__ (line 94) | def __init__(self, in_channels, out_channels, kernel_size, dilation,
    method forward (line 131) | def forward(self, x):
  class CausalCNN (line 140) | class CausalCNN(torch.nn.Module):
    method __init__ (line 155) | def __init__(self, in_channels, channels, depth, out_channels,
    method forward (line 176) | def forward(self, x):
  class CausalCNNEncoder (line 180) | class CausalCNNEncoder(torch.nn.Module):
    method __init__ (line 199) | def __init__(self, in_channels, channels, depth, reduced_size,
    method forward (line 212) | def forward(self, x):

FILE: ts_classification_methods/tloss_cls/networks/lstm.py
  class LSTMEncoder (line 22) | class LSTMEncoder(torch.nn.Module):
    method __init__ (line 33) | def __init__(self):
    method forward (line 40) | def forward(self, x):

FILE: ts_classification_methods/tloss_cls/scikit_wrappers.py
  class TimeSeriesEncoderClassifier (line 34) | class TimeSeriesEncoderClassifier(sklearn.base.BaseEstimator,
    method __init__ (line 69) | def __init__(self, compared_length, nb_random_samples, negative_penalty,
    method save_encoder (line 94) | def save_encoder(self, prefix_file):
    method save (line 106) | def save(self, prefix_file):
    method load_encoder (line 123) | def load_encoder(self, prefix_file):
    method load (line 141) | def load(self, prefix_file):
    method fit_classifier (line 154) | def fit_classifier(self, features, y):
    method fit_encoder (line 215) | def fit_encoder(self, X, y=None, save_memory=False, verbose=True):
    method fit (line 308) | def fit(self, X, y, save_memory=False, verbose=False):
    method encode (line 332) | def encode(self, X, batch_size=50):
    method encode_window (line 376) | def encode_window(self, X, window, batch_size=50, window_batch_size=10...
    method predict (line 417) | def predict(self, X, batch_size=50):
    method score (line 429) | def score(self, X, y, batch_size=50):
  class CausalCNNEncoderClassifier (line 443) | class CausalCNNEncoderClassifier(TimeSeriesEncoderClassifier):
    method __init__ (line 479) | def __init__(self, compared_length=50, nb_random_samples=10,
    method __create_encoder (line 499) | def __create_encoder(self, in_channels, channels, depth, reduced_size,
    method __encoder_params (line 510) | def __encoder_params(self, in_channels, channels, depth, reduced_size,
    method encode_sequence (line 521) | def encode_sequence(self, X, batch_size=50):
    method get_params (line 615) | def get_params(self, deep=True):
    method set_params (line 635) | def set_params(self, compared_length, nb_random_samples, negative_pena...
  class LSTMEncoderClassifier (line 647) | class LSTMEncoderClassifier(TimeSeriesEncoderClassifier):
    method __init__ (line 676) | def __init__(self, compared_length=50, nb_random_samples=10,
    method __create_encoder (line 688) | def __create_encoder(self, cuda, gpu):
    method get_params (line 695) | def get_params(self, deep=True):
    method set_params (line 710) | def set_params(self, compared_length, nb_random_samples, negative_pena...

FILE: ts_classification_methods/tloss_cls/transfer_ucr.py
  function parse_arguments (line 29) | def parse_arguments():

FILE: ts_classification_methods/tloss_cls/ucr.py
  function load_UCR_dataset (line 36) | def load_UCR_dataset(path, dataset):
  function fit_hyperparameters (line 115) | def fit_hyperparameters(file, train, train_labels, cuda, gpu,
  function parse_arguments (line 146) | def parse_arguments():

FILE: ts_classification_methods/tloss_cls/uea.py
  function fit_hyperparameters (line 38) | def fit_hyperparameters(file, train, train_labels, cuda, gpu,
  function parse_arguments (line 68) | def parse_arguments():

FILE: ts_classification_methods/tloss_cls/utils.py
  class Dataset (line 23) | class Dataset(torch.utils.data.Dataset):
    method __init__ (line 29) | def __init__(self, dataset):
    method __len__ (line 32) | def __len__(self):
    method __getitem__ (line 35) | def __getitem__(self, index):
  class LabelledDataset (line 39) | class LabelledDataset(torch.utils.data.Dataset):
    method __init__ (line 47) | def __init__(self, dataset, labels):
    method __len__ (line 51) | def __len__(self):
    method __getitem__ (line 54) | def __getitem__(self, index):

FILE: ts_classification_methods/ts2vec_cls/datautils.py
  function load_UCR (line 14) | def load_UCR(dataset):
  function load_UEA (line 83) | def load_UEA(dataset):
  function load_forecast_npy (line 112) | def load_forecast_npy(name, univar=False):
  function _get_time_features (line 129) | def _get_time_features(dt):
  function load_forecast_csv (line 141) | def load_forecast_csv(name, univar=False):
  function load_anomaly (line 188) | def load_anomaly(name):
  function gen_ano_train_data (line 195) | def gen_ano_train_data(all_train_data):

FILE: ts_classification_methods/ts2vec_cls/models/dilated_conv.py
  class SamePadConv (line 6) | class SamePadConv(nn.Module):
    method __init__ (line 7) | def __init__(self, in_channels, out_channels, kernel_size, dilation=1,...
    method forward (line 19) | def forward(self, x):
  class ConvBlock (line 25) | class ConvBlock(nn.Module):
    method __init__ (line 26) | def __init__(self, in_channels, out_channels, kernel_size, dilation, f...
    method forward (line 32) | def forward(self, x):
  class DilatedConvEncoder (line 40) | class DilatedConvEncoder(nn.Module):
    method __init__ (line 41) | def __init__(self, in_channels, channels, kernel_size):
    method forward (line 54) | def forward(self, x):

FILE: ts_classification_methods/ts2vec_cls/models/encoder.py
  function generate_continuous_mask (line 7) | def generate_continuous_mask(B, T, n=5, l=0.1):
  function generate_binomial_mask (line 23) | def generate_binomial_mask(B, T, p=0.5):
  class TSEncoder (line 26) | class TSEncoder(nn.Module):
    method __init__ (line 27) | def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, ...
    method forward (line 41) | def forward(self, x, mask=None):  # x: B x T x input_dims

FILE: ts_classification_methods/ts2vec_cls/models/losses.py
  function hierarchical_contrastive_loss (line 5) | def hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):
  function instance_contrastive_loss (line 23) | def instance_contrastive_loss(z1, z2):
  function temporal_contrastive_loss (line 38) | def temporal_contrastive_loss(z1, z2):

FILE: ts_classification_methods/ts2vec_cls/tasks/_eval_protocols.py
  function fit_svm (line 10) | def fit_svm(features, y, MAX_SAMPLES=10000):
  function fit_lr (line 52) | def fit_lr(features, y, MAX_SAMPLES=100000):
  function fit_knn (line 73) | def fit_knn(features, y):
  function fit_ridge (line 81) | def fit_ridge(train_features, train_y, valid_features, valid_y, MAX_SAMP...

FILE: ts_classification_methods/ts2vec_cls/tasks/classification.py
  function eval_classification (line 6) | def eval_classification(model, train_data, train_labels, test_data, test...

FILE: ts_classification_methods/ts2vec_cls/train.py
  function save_checkpoint_callback (line 12) | def save_checkpoint_callback(

FILE: ts_classification_methods/ts2vec_cls/train_fcn.py
  function save_checkpoint_callback (line 22) | def save_checkpoint_callback(

FILE: ts_classification_methods/ts2vec_cls/train_tsm.py
  function save_checkpoint_callback (line 21) | def save_checkpoint_callback(

FILE: ts_classification_methods/ts2vec_cls/train_tsm_uea.py
  function save_checkpoint_callback (line 22) | def save_checkpoint_callback(

FILE: ts_classification_methods/ts2vec_cls/ts2vec.py
  class TS2Vec (line 10) | class TS2Vec:
    method __init__ (line 13) | def __init__(
    method fit (line 60) | def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method _eval_with_pooling (line 164) | def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_wind...
    method encode (line 208) | def encode(self, data, mask=None, encoding_window=None, casual=False, ...
    method save (line 305) | def save(self, fn):
    method load (line 313) | def load(self, fn):

FILE: ts_classification_methods/ts2vec_cls/utils.py
  function pkl_save (line 8) | def pkl_save(name, var):
  function pkl_load (line 12) | def pkl_load(name):
  function torch_pad_nan (line 16) | def torch_pad_nan(arr, left=0, right=0, dim=0):
  function pad_nan_to_target (line 27) | def pad_nan_to_target(array, target_length, axis=0, both_side=False):
  function split_with_nan (line 39) | def split_with_nan(x, sections, axis=0):
  function take_per_row (line 47) | def take_per_row(A, indx, num_elem):
  function centerize_vary_length_series (line 51) | def centerize_vary_length_series(x):
  function data_dropout (line 60) | def data_dropout(arr, p):
  function name_with_datetime (line 73) | def name_with_datetime(prefix='default'):
  function init_dl_program (line 77) | def init_dl_program(

FILE: ts_classification_methods/tsm_utils.py
  function set_seed (line 14) | def set_seed(args):
  function build_model (line 22) | def build_model(args):
  function build_dataset (line 44) | def build_dataset(args):
  function build_loss (line 51) | def build_loss(args):
  function build_optimizer (line 58) | def build_optimizer(args):
  function evaluate (line 65) | def evaluate(val_loader, model, classifier, loss, device):
  function save_finetune_result (line 86) | def save_finetune_result(args, accu, std):
  function save_cls_result (line 102) | def save_cls_result(args, test_accu, test_std, train_time, end_val_epoch...
  function get_all_datasets (line 120) | def get_all_datasets(data, target):

FILE: ts_classification_methods/tst_cls/src/dataprepare.py
  function load_data (line 16) | def load_data(dataroot, dataset):
  function transfer_labels (line 38) | def transfer_labels(labels):
  function k_fold (line 49) | def k_fold(data, target):
  function normalize_per_series (line 80) | def normalize_per_series(data):
  function fill_nan_value (line 86) | def fill_nan_value(train_set, val_set, test_set):
  function fill_nan_and_normalize (line 105) | def fill_nan_and_normalize(train_data, val_data, test_data, train_indice...

FILE: ts_classification_methods/tst_cls/src/datasets/data.py
  class Normalizer (line 19) | class Normalizer(object):
    method __init__ (line 24) | def __init__(self, norm_type, mean=None, std=None, min_val=None, max_v...
    method normalize (line 39) | def normalize(self, df):
  function interpolate_missing (line 72) | def interpolate_missing(y):
  function subsample (line 81) | def subsample(y, limit=256, factor=2):
  class BaseData (line 90) | class BaseData(object):
    method set_num_processes (line 92) | def set_num_processes(self, n_proc):
  class HDD_data (line 100) | class HDD_data(BaseData):
    method __init__ (line 109) | def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, l...
    method load_all (line 125) | def load_all(self, dir_path):
    method load_single (line 152) | def load_single(filepath):
    method read_data (line 159) | def read_data(filepath):
    method select_columns (line 167) | def select_columns(df):
    method process_columns (line 176) | def process_columns(df):
  class WeldData (line 186) | class WeldData(BaseData):
    method __init__ (line 199) | def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, l...
    method load_all (line 225) | def load_all(self, root_dir, file_list=None, pattern=None):
    method load_single (line 276) | def load_single(filepath):
    method read_data (line 288) | def read_data(filepath):
    method select_columns (line 295) | def select_columns(df):
  class TSRegressionArchive (line 311) | class TSRegressionArchive(BaseData):
    method __init__ (line 327) | def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, l...
    method load_all (line 350) | def load_all(self, root_dir, file_list=None, pattern=None):
    method load_single (line 408) | def load_single(self, filepath):
  class SemicondTraceData (line 475) | class SemicondTraceData(BaseData):
    method __init__ (line 553) | def __init__(self, root_dir, file_list=None, pattern=None, n_proc=8, l...
    method make_pjid (line 612) | def make_pjid(self, toolID, pjID):
    method convert_tracefilename (line 616) | def convert_tracefilename(self, filepath):
    method get_measurements (line 625) | def get_measurements(self, wafer_measurements_path):
    method get_metadata (line 685) | def get_metadata(self, catalog_path, measurements_df):
    method load_all (line 706) | def load_all(self, root_dir, file_list=None, pattern=None, mode=None):
    method load_single (line 767) | def load_single(filepath):
    method read_data (line 786) | def read_data(filepath):
    method select_columns (line 793) | def select_columns(df):
  class PMUData (line 810) | class PMUData(BaseData):
    method __init__ (line 823) | def __init__(self, root_dir, file_list=None, pattern=None, n_proc=1, l...
    method load_all (line 860) | def load_all(self, root_dir, file_list=None, pattern=None):
    method load_single (line 910) | def load_single(filepath):
    method read_data (line 922) | def read_data(filepath):

FILE: ts_classification_methods/tst_cls/src/datasets/dataset.py
  class ImputationDataset (line 6) | class ImputationDataset(Dataset):
    method __init__ (line 9) | def __init__(self, data, indices, mean_mask_length=3, masking_ratio=0.15,
    method __getitem__ (line 27) | def __getitem__(self, ind):
    method update (line 45) | def update(self):
    method __len__ (line 49) | def __len__(self):
  class TransductionDataset (line 53) | class TransductionDataset(Dataset):
    method __init__ (line 55) | def __init__(self, data, indices, mask_feats, start_hint=0.0, end_hint...
    method __getitem__ (line 69) | def __getitem__(self, ind):
    method update (line 87) | def update(self):
    method __len__ (line 91) | def __len__(self):
  function collate_superv (line 95) | def collate_superv(data, max_len=None, device=None):
  class ClassiregressionDataset (line 134) | class ClassiregressionDataset(Dataset):
    method __init__ (line 136) | def __init__(self, data, indices, device=None, feature_df=None):
    method __getitem__ (line 154) | def __getitem__(self, ind):
    method __len__ (line 173) | def __len__(self):
  function transduct_mask (line 177) | def transduct_mask(X, mask_feats, start_hint=0.0, end_hint=0.0):
  function compensate_masking (line 198) | def compensate_masking(X, mask):
  function collate_unsuperv (line 218) | def collate_unsuperv(data, max_len=None, mask_compensation=False):
  function noise_mask (line 263) | def noise_mask(X, masking_ratio, lm=3, mode='separate', distribution='ge...
  function geom_noise_mask_single (line 306) | def geom_noise_mask_single(L, lm, masking_ratio):
  function padding_mask (line 337) | def padding_mask(lengths, max_len=None):

FILE: ts_classification_methods/tst_cls/src/datasets/datasplit.py
  function split_dataset (line 5) | def split_dataset(data_indices, validation_method, n_splits, validation_...
  class DataSplitter (line 50) | class DataSplitter(object):
    method __init__ (line 53) | def __init__(self, data_indices, data_labels=None, ith=None):
    method factory (line 68) | def factory(split_type, *args, **kwargs):
    method split_testset (line 79) | def split_testset(self, test_ratio, random_state=1337):
    method split_validation (line 91) | def split_validation(self):
  class StratifiedKFoldSplitter (line 114) | class StratifiedKFoldSplitter(DataSplitter):
    method split_testset (line 115) | def split_testset(self, test_ratio, random_state=42):
    method split_validation (line 139) | def split_validation(self, n_splits, validation_ratio, random_state=42):
  class StratifiedShuffleSplitter (line 161) | class StratifiedShuffleSplitter(DataSplitter):
    method split_testset (line 168) | def split_testset(self, test_ratio, random_state=1337):
    method split_validation (line 191) | def split_validation(self, n_splits, validation_ratio, random_state=13...
  class ShuffleSplitter (line 218) | class ShuffleSplitter(DataSplitter):
    method split_testset (line 225) | def split_testset(self, test_ratio, random_state=1337):
    method split_validation (line 249) | def split_validation(self, n_splits, validation_ratio, random_state=13...

FILE: ts_classification_methods/tst_cls/src/datasets/utils.py
  function uniform_scaling (line 32) | def uniform_scaling(data, max_len):
  class TsFileParseException (line 46) | class TsFileParseException(Exception):
  function load_from_tsfile_to_dataframe (line 53) | def load_from_tsfile_to_dataframe(full_file_path_and_name, return_separa...
  function process_data (line 562) | def process_data(X, min_len, normalise=None):

FILE: ts_classification_methods/tst_cls/src/main.py
  function main (line 43) | def main(config):

FILE: ts_classification_methods/tst_cls/src/models/loss.py
  function get_loss_module (line 6) | def get_loss_module(config):
  function l2_reg_loss (line 23) | def l2_reg_loss(model):
  class NoFussCrossEntropyLoss (line 31) | class NoFussCrossEntropyLoss(nn.CrossEntropyLoss):
    method forward (line 37) | def forward(self, inp, target):
  class MaskedMSELoss (line 42) | class MaskedMSELoss(nn.Module):
    method __init__ (line 46) | def __init__(self, reduction: str = 'mean'):
    method forward (line 53) | def forward(self,

FILE: ts_classification_methods/tst_cls/src/models/ts_transformer.py
  function model_factory (line 10) | def model_factory(config, data, labels=None):
  function _get_activation_fn (line 77) | def _get_activation_fn(activation):
  class FixedPositionalEncoding (line 87) | class FixedPositionalEncoding(nn.Module):
    method __init__ (line 102) | def __init__(self, d_model, dropout=0.1, max_len=1024, scale_factor=1.0):
    method forward (line 116) | def forward(self, x):
  class LearnablePositionalEncoding (line 129) | class LearnablePositionalEncoding(nn.Module):
    method __init__ (line 131) | def __init__(self, d_model, dropout=0.1, max_len=1024):
    method forward (line 140) | def forward(self, x):
  function get_pos_encoder (line 153) | def get_pos_encoder(pos_encoding):
  class TransformerBatchNormEncoderLayer (line 163) | class TransformerBatchNormEncoderLayer(nn.modules.Module):
    method __init__ (line 176) | def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, ...
    method __setstate__ (line 192) | def __setstate__(self, state):
    method forward (line 197) | def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
  class TSTransformerEncoder (line 224) | class TSTransformerEncoder(nn.Module):
    method __init__ (line 226) | def __init__(self, feat_dim, max_len, d_model, n_heads, num_layers, di...
    method forward (line 256) | def forward(self, X, padding_masks):
  class TSTransformerEncoderClassiregressor (line 285) | class TSTransformerEncoderClassiregressor(nn.Module):
    method __init__ (line 291) | def __init__(self, feat_dim, max_len, d_model, n_heads, num_layers, di...
    method build_output_module (line 322) | def build_output_module(self, d_model, max_len, num_classes, nonlinear...
    method forward (line 340) | def forward(self, X, padding_masks):

FILE: ts_classification_methods/tst_cls/src/optimizers.py
  function get_optimizer (line 6) | def get_optimizer(name):
  class RAdam (line 15) | class RAdam(Optimizer):
    method __init__ (line 17) | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weig...
    method __setstate__ (line 36) | def __setstate__(self, state):
    method step (line 39) | def step(self, closure=None):
  class PlainRAdam (line 110) | class PlainRAdam(Optimizer):
    method __init__ (line 112) | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weig...
    method __setstate__ (line 127) | def __setstate__(self, state):
    method step (line 130) | def step(self, closure=None):
  class AdamW (line 188) | class AdamW(Optimizer):
    method __init__ (line 190) | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weig...
    method __setstate__ (line 204) | def __setstate__(self, state):
    method step (line 207) | def step(self, closure=None):

FILE: ts_classification_methods/tst_cls/src/options.py
  class Options (line 4) | class Options(object):
    method __init__ (line 6) | def __init__(self):
    method parse (line 179) | def parse(self):

FILE: ts_classification_methods/tst_cls/src/running.py
  function pipeline_factory (line 32) | def pipeline_factory(config, device):
  function setup (line 56) | def setup(args):
  function fold_evaluate (line 104) | def fold_evaluate(dataset, model, device, loss_module, target_feats, con...
  function convert_metrics_per_batch_to_per_sample (line 149) | def convert_metrics_per_batch_to_per_sample(metrics, target_masks):
  function evaluate (line 171) | def evaluate(evaluator):
  function validate (line 189) | def validate(val_evaluator, tensorboard_writer, config, best_metrics, be...
  function check_progress (line 213) | def check_progress(epoch):
  class BaseRunner (line 221) | class BaseRunner(object):
    method __init__ (line 223) | def __init__(self, model, dataloader, device, loss_module, optimizer=N...
    method train_epoch (line 236) | def train_epoch(self, epoch_num=None):
    method evaluate (line 239) | def evaluate(self, epoch_num=None, keep_all=True):
    method print_callback (line 242) | def print_callback(self, i_batch, metrics, prefix=''):
  class UnsupervisedRunner (line 257) | class UnsupervisedRunner(BaseRunner):
    method train_epoch (line 259) | def train_epoch(self, epoch_num=None):
    method evaluate (line 306) | def evaluate(self, epoch_num=None, keep_all=False):
  class SupervisedRunner (line 363) | class SupervisedRunner(BaseRunner):
    method __init__ (line 365) | def __init__(self, *args, **kwargs):
    method train_epoch (line 375) | def train_epoch(self, epoch_num=None):
    method evaluate (line 429) | def evaluate(self, epoch_num=None, keep_all=True):

FILE: ts_classification_methods/tst_cls/src/utils/analysis.py
  function acc_top_k (line 18) | def acc_top_k(predictions, y_true):
  function accuracy (line 44) | def accuracy(y_pred, y_true, excluded_labels=None):
  function precision (line 60) | def precision(y_true, y_pred, label):
  function recall (line 70) | def recall(y_true, y_pred, label):
  function limiter (line 80) | def limiter(metric_functions, y_true, y_pred, y_scores, score_thr, label):
  function prec_rec_parametrized_by_thr (line 93) | def prec_rec_parametrized_by_thr(y_true, y_pred, y_scores, label, Npoint...
  function plot_prec_vs_rec (line 121) | def plot_prec_vs_rec(score_grid, rec, prec, prec_requirement=None, thr_o...
  function plot_confusion_matrix (line 177) | def plot_confusion_matrix(ConfMat, label_strings=None, title='Confusion ...
  function print_confusion_matrix (line 191) | def print_confusion_matrix(ConfMat, label_strings=None, title='Confusion...
  class Analyzer (line 206) | class Analyzer(object):
    method __init__ (line 208) | def __init__(self, maxcharlength=35, plot=False, print_conf_mat=False,...
    method show_acc_top_k_improvement (line 234) | def show_acc_top_k_improvement(self, y_pred, y_true, k=5, inp='scores'):
    method generate_classification_report (line 274) | def generate_classification_report(self, digits=3, number_of_thieves=2...
    method get_avg_prec_recall (line 340) | def get_avg_prec_recall(self, ConfMatrix, existing_class_names, exclud...
    method prec_rec_histogram (line 366) | def prec_rec_histogram(self, precision, recall, binedges=None):
    method analyze_classification (line 409) | def analyze_classification(self, y_pred, y_true, class_names, excluded...

FILE: ts_classification_methods/tst_cls/src/utils/utils.py
  function timer (line 21) | def timer(func):
  function save_model (line 34) | def save_model(path, epoch, model, optimizer=None):
  function load_model (line 46) | def load_model(model, model_path, optimizer=None, resume=False, change_o...
  function load_config (line 78) | def load_config(config_filepath):
  function create_dirs (line 90) | def create_dirs(dirs):
  function export_performance_metrics (line 107) | def export_performance_metrics(filepath, metrics_table, header, book=Non...
  function write_row (line 121) | def write_row(sheet, row_ind, data_list):
  function write_table_to_sheet (line 130) | def write_table_to_sheet(table, work_book, sheet_name=None):
  function export_record (line 141) | def export_record(filepath, values):
  function register_record (line 154) | def register_record(filepath, timestamp, experiment_name, best_metrics, ...
  class Printer (line 195) | class Printer(object):
    method __init__ (line 198) | def __init__(self, console=True):
    method dyn_print (line 206) | def dyn_print(data):
  function readable_time (line 212) | def readable_time(time_difference):
  function check_model (line 243) | def check_model(model, verbose=False, zero_thresh=1e-8, inf_thresh=1e6, ...
  function check_tensor (line 265) | def check_tensor(X, verbose=True, zero_thresh=1e-8, inf_thresh=1e6):
  function count_parameters (line 298) | def count_parameters(model, trainable=False):
  function recursively_hook (line 305) | def recursively_hook(model, hook_fn):
  function compute_loss (line 314) | def compute_loss(net: torch.nn.Module,

FILE: ts_classification_methods/tstcc_cls/config_files/ucr_Configs.py
  class Config (line 1) | class Config(object):
    method __init__ (line 2) | def __init__(self):
  class augmentations (line 30) | class augmentations(object):
    method __init__ (line 31) | def __init__(self):
  class Context_Cont_configs (line 37) | class Context_Cont_configs(object):
    method __init__ (line 38) | def __init__(self):
  class TC (line 43) | class TC(object):
    method __init__ (line 44) | def __init__(self):

FILE: ts_classification_methods/tstcc_cls/config_files/uea_Configs.py
  class Config (line 1) | class Config(object):
    method __init__ (line 2) | def __init__(self):
  class augmentations (line 30) | class augmentations(object):
    method __init__ (line 31) | def __init__(self):
  class Context_Cont_configs (line 37) | class Context_Cont_configs(object):
    method __init__ (line 38) | def __init__(self):
  class TC (line 43) | class TC(object):
    method __init__ (line 44) | def __init__(self):

FILE: ts_classification_methods/tstcc_cls/dataloader/augmentations.py
  function DataTransform (line 5) | def DataTransform(sample, config):
  function jitter (line 13) | def jitter(x, sigma=0.8):
  function scaling (line 18) | def scaling(x, sigma=1.1):
  function permutation (line 28) | def permutation(x, max_segments=5, seg_mode="random"):

FILE: ts_classification_methods/tstcc_cls/dataloader/dataloader.py
  class Load_Dataset (line 11) | class Load_Dataset(Dataset):
    method __init__ (line 13) | def __init__(self, dataset, config, training_mode):
    method __getitem__ (line 41) | def __getitem__(self, index):
    method __len__ (line 47) | def __len__(self):
  function data_generator (line 51) | def data_generator(data_path, configs, training_mode):

FILE: ts_classification_methods/tstcc_cls/models/TC.py
  class TC (line 8) | class TC(nn.Module):
    method __init__ (line 9) | def __init__(self, configs, device):
    method forward (line 26) | def forward(self, features_aug1, features_aug2):

FILE: ts_classification_methods/tstcc_cls/models/attention.py
  class Residual (line 9) | class Residual(nn.Module):
    method __init__ (line 10) | def __init__(self, fn):
    method forward (line 14) | def forward(self, x, **kwargs):
  class PreNorm (line 18) | class PreNorm(nn.Module):
    method __init__ (line 19) | def __init__(self, dim, fn):
    method forward (line 24) | def forward(self, x, **kwargs):
  class FeedForward (line 28) | class FeedForward(nn.Module):
    method __init__ (line 29) | def __init__(self, dim, hidden_dim, dropout=0.):
    method forward (line 39) | def forward(self, x):
  class Attention (line 43) | class Attention(nn.Module):
    method __init__ (line 44) | def __init__(self, dim, heads=8, dropout=0.):
    method forward (line 55) | def forward(self, x, mask=None):
  class Transformer (line 77) | class Transformer(nn.Module):
    method __init__ (line 78) | def __init__(self, dim, depth, heads, mlp_dim, dropout):
    method forward (line 87) | def forward(self, x, mask=None):
  class Seq_Transformer (line 94) | class Seq_Transformer(nn.Module):
    method __init__ (line 95) | def __init__(self, *, patch_size, dim, depth, heads, mlp_dim, channels...
    method forward (line 104) | def forward(self, forward_seq):

FILE: ts_classification_methods/tstcc_cls/models/loss.py
  class NTXentLoss (line 4) | class NTXentLoss(torch.nn.Module):
    method __init__ (line 6) | def __init__(self, device, batch_size, temperature, use_cosine_similar...
    method _get_similarity_function (line 16) | def _get_similarity_function(self, use_cosine_similarity):
    method _get_correlated_mask (line 23) | def _get_correlated_mask(self):
    method _dot_simililarity (line 32) | def _dot_simililarity(x, y):
    method _cosine_simililarity (line 39) | def _cosine_simililarity(self, x, y):
    method forward (line 46) | def forward(self, zis, zjs):

FILE: ts_classification_methods/tstcc_cls/models/model.py
  class base_Model (line 3) | class base_Model(nn.Module):
    method __init__ (line 4) | def __init__(self, configs):
    method forward (line 33) | def forward(self, x_in):

FILE: ts_classification_methods/tstcc_cls/trainer/trainer.py
  function Trainer (line 14) | def Trainer(model, temporal_contr_model, model_optimizer, temp_cont_opti...
  function Trainer_cls (line 46) | def Trainer_cls(model, temporal_contr_model, model_optimizer, temp_cont_...
  function model_train (line 89) | def model_train(model, temporal_contr_model, model_optimizer, temp_cont_...
  function model_evaluate (line 149) | def model_evaluate(model, temporal_contr_model, test_dl, device, trainin...

FILE: ts_classification_methods/tstcc_cls/utils.py
  function generator_ucr_config (line 15) | def generator_ucr_config(data, label, configs):
  function generator_ucr (line 35) | def generator_ucr(data, label, configs, training_mode, drop_last=True):
  function generator_uea_config (line 53) | def generator_uea_config(data, label, configs):
  function generator_uea (line 72) | def generator_uea(data, label, configs, training_mode, drop_last=True):
  function set_requires_grad (line 87) | def set_requires_grad(model, dict_, requires_grad=True):
  function fix_randomness (line 93) | def fix_randomness(SEED):
  function epoch_time (line 101) | def epoch_time(start_time, end_time):
  function _calc_metrics (line 108) | def _calc_metrics(pred_labels, true_labels, log_dir, home_path):
  function _logger (line 138) | def _logger(logger_name, level=logging.DEBUG):
  function copy_Files (line 159) | def copy_Files(destination, data_type):

FILE: ts_classification_methods/visualize.py
  function heatmap (line 18) | def heatmap(xs, ys, dataset_name='MixedShapesSmallTrain', num_class=5, c...
  function multi_cam (line 83) | def multi_cam(xs, ys):

FILE: ts_forecasting_methods/CoST/cost.py
  class PretrainDataset (line 17) | class PretrainDataset(Dataset):
    method __init__ (line 19) | def __init__(self,
    method __getitem__ (line 31) | def __getitem__(self, item):
    method __len__ (line 35) | def __len__(self):
    method transform (line 38) | def transform(self, x):
    method jitter (line 41) | def jitter(self, x):
    method scale (line 46) | def scale(self, x):
    method shift (line 51) | def shift(self, x):
  class CoSTModel (line 57) | class CoSTModel(nn.Module):
    method __init__ (line 58) | def __init__(self,
    method compute_loss (line 104) | def compute_loss(self, q, k, k_negs):
    method convert_coeff (line 123) | def convert_coeff(self, x, eps=1e-6):
    method instance_contrastive_loss (line 128) | def instance_contrastive_loss(self, z1, z2):
    method forward (line 141) | def forward(self, x_q, x_k):
    method _momentum_update_key_encoder (line 177) | def _momentum_update_key_encoder(self):
    method _dequeue_and_enqueue (line 187) | def _dequeue_and_enqueue(self, keys):
  class CoST (line 200) | class CoST:
    method __init__ (line 201) | def __init__(self,
    method fit (line 250) | def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method _eval_with_pooling (line 334) | def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_wind...
    method encode (line 339) | def encode(self, data, mode, mask=None, encoding_window=None, casual=F...
    method save (line 427) | def save(self, fn):
    method load (line 435) | def load(self, fn):
  function adjust_learning_rate (line 445) | def adjust_learning_rate(optimizer, lr, epoch, epochs):

FILE: ts_forecasting_methods/CoST/datautils.py
  function load_forecast_npy (line 6) | def load_forecast_npy(name, univar=False):
  function _get_time_features (line 22) | def _get_time_features(dt):
  function load_forecast_csv (line 33) | def load_forecast_csv(name, univar=False):

FILE: ts_forecasting_methods/CoST/models/dilated_conv.py
  class SamePadConv (line 6) | class SamePadConv(nn.Module):
    method __init__ (line 7) | def __init__(self, in_channels, out_channels, kernel_size, dilation=1,...
    method forward (line 19) | def forward(self, x):
  class ConvBlock (line 26) | class ConvBlock(nn.Module):
    method __init__ (line 27) | def __init__(self, in_channels, out_channels, kernel_size, dilation, f...
    method forward (line 33) | def forward(self, x):
  class DilatedConvEncoder (line 42) | class DilatedConvEncoder(nn.Module):
    method __init__ (line 43) | def __init__(self, in_channels, channels, kernel_size, extract_layers=...
    method forward (line 61) | def forward(self, x):

FILE: ts_forecasting_methods/CoST/models/encoder.py
  function generate_continuous_mask (line 15) | def generate_continuous_mask(B, T, n=5, l=0.1):
  function generate_binomial_mask (line 32) | def generate_binomial_mask(B, T, p=0.5):
  class BandedFourierLayer (line 36) | class BandedFourierLayer(nn.Module):
    method __init__ (line 37) | def __init__(self, in_channels, out_channels, band, num_bands, length=...
    method forward (line 60) | def forward(self, input):
    method _forward (line 68) | def _forward(self, input):
    method reset_parameters (line 72) | def reset_parameters(self) -> None:
  class CoSTEncoder (line 79) | class CoSTEncoder(nn.Module):
    method __init__ (line 80) | def __init__(self, input_dims, output_dims,
    method forward (line 114) | def forward(self, x, tcn_output=False, mask='all_true'):  # x: B x T x...

FILE: ts_forecasting_methods/CoST/tasks/_eval_protocols.py
  function fit_ridge (line 6) | def fit_ridge(train_features, train_y, valid_features, valid_y, MAX_SAMP...

FILE: ts_forecasting_methods/CoST/tasks/forecasting.py
  function generate_pred_samples (line 6) | def generate_pred_samples(features, data, pred_len, drop=0):
  function cal_metrics (line 16) | def cal_metrics(pred, target):
  function eval_forecasting (line 23) | def eval_forecasting(model, data, train_slice, valid_slice, test_slice, ...

FILE: ts_forecasting_methods/CoST/train.py
  function save_checkpoint_callback (line 15) | def save_checkpoint_callback(

FILE: ts_forecasting_methods/CoST/utils.py
  function pkl_save (line 10) | def pkl_save(name, var):
  function pkl_load (line 14) | def pkl_load(name):
  function torch_pad_nan (line 18) | def torch_pad_nan(arr, left=0, right=0, dim=0):
  function pad_nan_to_target (line 29) | def pad_nan_to_target(array, target_length, axis=0, both_side=False):
  function split_with_nan (line 41) | def split_with_nan(x, sections, axis=0):
  function take_per_row (line 49) | def take_per_row(A, indx, num_elem):
  function centerize_vary_length_series (line 53) | def centerize_vary_length_series(x):
  function data_dropout (line 62) | def data_dropout(arr, p):
  function name_with_datetime (line 75) | def name_with_datetime(prefix='default'):
  function init_dl_program (line 79) | def init_dl_program(

FILE: ts_forecasting_methods/Other_baselines/data_provider/data_factory.py
  function data_provider (line 22) | def data_provider(args, flag):

FILE: ts_forecasting_methods/Other_baselines/data_provider/data_factory_tempo.py
  function data_provider (line 12) | def data_provider(args, flag, drop_last_test=True, train_all=False):

FILE: ts_forecasting_methods/Other_baselines/data_provider/data_loader.py
  class Dataset_ETT_hour (line 19) | class Dataset_ETT_hour(Dataset):
    method __init__ (line 20) | def __init__(self, args, root_path, flag='train', size=None,
    method __read_data__ (line 49) | def __read_data__(self):
    method __getitem__ (line 97) | def __getitem__(self, index):
    method __len__ (line 110) | def __len__(self):
    method inverse_transform (line 113) | def inverse_transform(self, data):
  class Dataset_ETT_minute (line 117) | class Dataset_ETT_minute(Dataset):
    method __init__ (line 118) | def __init__(self, args, root_path, flag='train', size=None,
    method __read_data__ (line 147) | def __read_data__(self):
    method __getitem__ (line 193) | def __getitem__(self, index):
    method __len__ (line 206) | def __len__(self):
    method inverse_transform (line 209) | def inverse_transform(self, data):
  class Dataset_Custom (line 213) | class Dataset_Custom(Dataset):
    method __init__ (line 214) | def __init__(self, args, root_path, flag='train', size=None,
    method __read_data__ (line 243) | def __read_data__(self):
    method __getitem__ (line 310) | def __getitem__(self, index):
    method __len__ (line 323) | def __len__(self):
    method inverse_transform (line 326) | def inverse_transform(self, data):
  class Dataset_M4 (line 330) | class Dataset_M4(Dataset):
    method __init__ (line 331) | def __init__(self, args, root_path, flag='pred', size=None,
    method __read_data__ (line 355) | def __read_data__(self):
    method __getitem__ (line 367) | def __getitem__(self, index):
    method __len__ (line 387) | def __len__(self):
    method inverse_transform (line 390) | def inverse_transform(self, data):
    method last_insample_window (line 393) | def last_insample_window(self):
  class PSMSegLoader (line 409) | class PSMSegLoader(Dataset):
    method __init__ (line 410) | def __init__(self, args, root_path, win_size, step=1, flag="train"):
    method __len__ (line 431) | def __len__(self):
    method __getitem__ (line 441) | def __getitem__(self, index):
  class MSLSegLoader (line 456) | class MSLSegLoader(Dataset):
    method __init__ (line 457) | def __init__(self, args, root_path, win_size, step=1, flag="train"):
    method __len__ (line 474) | def __len__(self):
    method __getitem__ (line 484) | def __getitem__(self, index):
  class SMAPSegLoader (line 499) | class SMAPSegLoader(Dataset):
    method __init__ (line 500) | def __init__(self, args, root_path, win_size, step=1, flag="train"):
    method __len__ (line 517) | def __len__(self):
    method __getitem__ (line 528) | def __getitem__(self, index):
  class SMDSegLoader (line 543) | class SMDSegLoader(Dataset):
    method __init__ (line 544) | def __init__(self, args, root_path, win_size, step=100, flag="train"):
    method __len__ (line 559) | def __len__(self):
    method __getitem__ (line 569) | def __getitem__(self, index):
  class SWATSegLoader (line 584) | class SWATSegLoader(Dataset):
    method __init__ (line 585) | def __init__(self, args, root_path, win_size, step=1, flag="train"):
    method __len__ (line 608) | def __len__(self):
    method __getitem__ (line 621) | def __getitem__(self, index):
  class UEAloader (line 636) | class UEAloader(Dataset):
    method __init__ (line 653) | def __init__(self, args, root_path, file_list=None, limit_size=None, f...
    method load_all (line 677) | def load_all(self, root_path, file_list=None, flag=None):
    method load_single (line 706) | def load_single(self, filepath):
    method instance_norm (line 742) | def instance_norm(self, case):
    method __getitem__ (line 752) | def __getitem__(self, ind):
    method __len__ (line 767) | def __len__(self):

FILE: ts_forecasting_methods/Other_baselines/data_provider/data_loader_tempo.py
  class Dataset_ETT_hour (line 18) | class Dataset_ETT_hour(Dataset):
    method __init__ (line 19) | def __init__(self, root_path, flag='train', size=None,
    method stl_resolve (line 55) | def stl_resolve(self, data_raw, data_name):
    method __read_data__ (line 110) | def __read_data__(self):
    method __getitem__ (line 165) | def __getitem__(self, index):
    method __len__ (line 182) | def __len__(self):
    method inverse_transform (line 185) | def inverse_transform(self, data):
  class Dataset_ETT_minute (line 189) | class Dataset_ETT_minute(Dataset):
    method __init__ (line 190) | def __init__(self, root_path, flag='train', size=None,
    method stl_resolve (line 224) | def stl_resolve(self, data_raw, data_name):
    method __read_data__ (line 281) | def __read_data__(self):
    method __getitem__ (line 334) | def __getitem__(self, index):
    method __len__ (line 351) | def __len__(self):
    method inverse_transform (line 354) | def inverse_transform(self, data):
  class Dataset_Custom (line 358) | class Dataset_Custom(Dataset):
    method __init__ (line 359) | def __init__(self, root_path, flag='train', size=None,
    method stl_resolve (line 394) | def stl_resolve(self, data_raw):
    method __read_data__ (line 453) | def __read_data__(self):
    method __getitem__ (line 528) | def __getitem__(self, index):
    method __len__ (line 549) | def __len__(self):
    method inverse_transform (line 553) | def inverse_transform(self, data):
  class Dataset_Pred (line 557) | class Dataset_Pred(Dataset):
    method __init__ (line 558) | def __init__(self, root_path, flag='pred', size=None,
    method stl_resolve (line 588) | def stl_resolve(self, data_raw, period=24):
    method __read_data__ (line 647) | def __read_data__(self):
    method __getitem__ (line 710) | def __getitem__(self, index):
    method __len__ (line 731) | def __len__(self):
    method inverse_transform (line 734) | def inverse_transform(self, data):
  class Dataset_TSF (line 738) | class Dataset_TSF(Dataset):
    method __init__ (line 739) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 760) | def __read_data__(self):
    method __getitem__ (line 802) | def __getitem__(self, index):
    method __len__ (line 833) | def __len__(self):

FILE: ts_forecasting_methods/Other_baselines/data_provider/m4.py
  function url_file_name (line 35) | def url_file_name(url: str) -> str:
  function download (line 45) | def download(url: str, file_path: str) -> None:
  class M4Dataset (line 74) | class M4Dataset:
    method load (line 82) | def load(training: bool = True, dataset_file: str = '../dataset/m4') -...
  class M4Meta (line 102) | class M4Meta:
  function load_m4_info (line 132) | def load_m4_info() -> pd.DataFrame:

FILE: ts_forecasting_methods/Other_baselines/data_provider/uea.py
  function collate_fn (line 7) | def collate_fn(data, max_len=None):
  function padding_mask (line 45) | def padding_mask(lengths, max_len=None):
  class Normalizer (line 58) | class Normalizer(object):
    method __init__ (line 63) | def __init__(self, norm_type='standardization', mean=None, std=None, m...
    method normalize (line 78) | def normalize(self, df):
  function interpolate_missing (line 110) | def interpolate_missing(y):
  function subsample (line 119) | def subsample(y, limit=256, factor=2):

FILE: ts_forecasting_methods/Other_baselines/exp/exp_basic.py
  class Exp_Basic (line 6) | class Exp_Basic(object):
    method __init__ (line 7) | def __init__(self, args):
    method _build_model (line 23) | def _build_model(self):
    method _acquire_device (line 27) | def _acquire_device(self):
    method _get_data (line 38) | def _get_data(self):
    method vali (line 41) | def vali(self):
    method train (line 44) | def train(self):
    method test (line 47) | def test(self):

FILE: ts_forecasting_methods/Other_baselines/exp/exp_basic_patch.py
  class Exp_Basic (line 6) | class Exp_Basic(object):
    method __init__ (line 7) | def __init__(self, args):
    method _build_model (line 12) | def _build_model(self):
    method _acquire_device (line 16) | def _acquire_device(self):
    method _get_data (line 27) | def _get_data(self):
    method vali (line 30) | def vali(self):
    method train (line 33) | def train(self):
    method test (line 36) | def test(self):

FILE: ts_forecasting_methods/Other_baselines/exp/exp_long_term_forecasting.py
  class Exp_Long_Term_Forecast (line 17) | class Exp_Long_Term_Forecast(Exp_Basic):
    method __init__ (line 18) | def __init__(self, args):
    method _build_model (line 21) | def _build_model(self):
    method _get_data (line 28) | def _get_data(self, flag):
    method _select_optimizer (line 32) | def _select_optimizer(self):
    method _select_criterion (line 36) | def _select_criterion(self):
    method vali (line 40) | def vali(self, vali_data, vali_loader, criterion):
    method train (line 80) | def train(self, setting):
    method test (line 181) | def test(self, setting, test=0):

FILE: ts_forecasting_methods/Other_baselines/exp/exp_main.py
  function adjust_learning_rate (line 23) | def adjust_learning_rate(optimizer, scheduler, epoch, args, printout=True):
  class Exp_Main (line 54) | class Exp_Main(Exp_Basic):
    method __init__ (line 55) | def __init__(self, args):
    method _build_model (line 58) | def _build_model(self):
    method _get_data (line 68) | def _get_data(self, flag):
    method _select_optimizer (line 72) | def _select_optimizer(self):
    method _select_criterion (line 76) | def _select_criterion(self):
    method vali (line 80) | def vali(self, vali_data, vali_loader, criterion):
    method train (line 126) | def train(self, setting):
    method test (line 245) | def test(self, setting, test=0):
    method predict (line 354) | def predict(self, setting, load=False):

FILE: ts_forecasting_methods/Other_baselines/exp/exp_short_term_forecasting.py
  class Exp_Short_Term_Forecast (line 19) | class Exp_Short_Term_Forecast(Exp_Basic):
    method __init__ (line 20) | def __init__(self, args):
    method _build_model (line 23) | def _build_model(self):
    method _get_data (line 35) | def _get_data(self, flag):
    method _select_optimizer (line 39) | def _select_optimizer(self):
    method _select_criterion (line 43) | def _select_criterion(self, loss_name='MSE'):
    method train (line 53) | def train(self, setting):
    method vali (line 129) | def vali(self, train_loader, vali_loader, criterion):
    method test (line 160) | def test(self, setting, test=0):

FILE: ts_forecasting_methods/Other_baselines/layers/AutoCorrelation.py
  class AutoCorrelation (line 11) | class AutoCorrelation(nn.Module):
    method __init__ (line 19) | def __init__(self, mask_flag=True, factor=1, scale=None, attention_dro...
    method time_delay_agg_training (line 27) | def time_delay_agg_training(self, values, corr):
    method time_delay_agg_inference (line 51) | def time_delay_agg_inference(self, values, corr):
    method time_delay_agg_full (line 78) | def time_delay_agg_full(self, values, corr):
    method forward (line 102) | def forward(self, queries, keys, values, attn_mask):
  class AutoCorrelationLayer (line 131) | class AutoCorrelationLayer(nn.Module):
    method __init__ (line 132) | def __init__(self, correlation, d_model, n_heads, d_keys=None,
    method forward (line 146) | def forward(self, queries, keys, values, attn_mask):

FILE: ts_forecasting_methods/Other_baselines/layers/Autoformer_EncDec.py
  class my_Layernorm (line 6) | class my_Layernorm(nn.Module):
    method __init__ (line 11) | def __init__(self, channels):
    method forward (line 15) | def forward(self, x):
  class moving_avg (line 21) | class moving_avg(nn.Module):
    method __init__ (line 26) | def __init__(self, kernel_size, stride):
    method forward (line 31) | def forward(self, x):
  class series_decomp (line 41) | class series_decomp(nn.Module):
    method __init__ (line 46) | def __init__(self, kernel_size):
    method forward (line 50) | def forward(self, x):
  class series_decomp_multi (line 56) | class series_decomp_multi(nn.Module):
    method __init__ (line 61) | def __init__(self, kernel_size):
    method forward (line 66) | def forward(self, x):
  class EncoderLayer (line 79) | class EncoderLayer(nn.Module):
    method __init__ (line 84) | def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropo...
    method forward (line 95) | def forward(self, x, attn_mask=None):
  class Encoder (line 109) | class Encoder(nn.Module):
    method __init__ (line 114) | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
    method forward (line 120) | def forward(self, x, attn_mask=None):
  class DecoderLayer (line 140) | class DecoderLayer(nn.Module):
    method __init__ (line 145) | def __init__(self, self_attention, cross_attention, d_model, c_out, d_...
    method forward (line 161) | def forward(self, x, cross, x_mask=None, cross_mask=None):
  class Decoder (line 182) | class Decoder(nn.Module):
    method __init__ (line 187) | def __init__(self, layers, norm_layer=None, projection=None):
    method forward (line 193) | def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None):

FILE: ts_forecasting_methods/Other_baselines/layers/Conv_Blocks.py
  class Inception_Block_V1 (line 5) | class Inception_Block_V1(nn.Module):
    method __init__ (line 6) | def __init__(self, in_channels, out_channels, num_kernels=6, init_weig...
    method _initialize_weights (line 18) | def _initialize_weights(self):
    method forward (line 25) | def forward(self, x):
  class Inception_Block_V2 (line 33) | class Inception_Block_V2(nn.Module):
    method __init__ (line 34) | def __init__(self, in_channels, out_channels, num_kernels=6, init_weig...
    method _initialize_weights (line 48) | def _initialize_weights(self):
    method forward (line 55) | def forward(self, x):

FILE: ts_forecasting_methods/Other_baselines/layers/Embed.py
  class PositionalEmbedding (line 8) | class PositionalEmbedding(nn.Module):
    method __init__ (line 9) | def __init__(self, d_model, max_len=5000):
    method forward (line 25) | def forward(self, x):
  class TokenEmbedding (line 29) | class TokenEmbedding(nn.Module):
    method __init__ (line 30) | def __init__(self, c_in, d_model):
    method forward (line 40) | def forward(self, x):
  class FixedEmbedding (line 45) | class FixedEmbedding(nn.Module):
    method __init__ (line 46) | def __init__(self, c_in, d_model):
    method forward (line 62) | def forward(self, x):
  class TemporalEmbedding (line 66) | class TemporalEmbedding(nn.Module):
    method __init__ (line 67) | def __init__(self, d_model, embed_type='fixed', freq='h'):
    method forward (line 84) | def forward(self, x):
  class TimeFeatureEmbedding (line 96) | class TimeFeatureEmbedding(nn.Module):
    method __init__ (line 97) | def __init__(self, d_model, embed_type='timeF', freq='h'):
    method forward (line 105) | def forward(self, x):
  class DataEmbedding (line 109) | class DataEmbedding(nn.Module):
    method __init__ (line 110) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 120) | def forward(self, x, x_mark):
  class DataEmbedding_inverted (line 129) | class DataEmbedding_inverted(nn.Module):
    method __init__ (line 130) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 135) | def forward(self, x, x_mark):
  class DataEmbedding_wo_pos (line 146) | class DataEmbedding_wo_pos(nn.Module):
    method __init__ (line 147) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 157) | def forward(self, x, x_mark):
  class PatchEmbedding (line 165) | class PatchEmbedding(nn.Module):
    method __init__ (line 166) | def __init__(self, d_model, patch_len, stride, padding, dropout):
    method forward (line 182) | def forward(self, x):
  class DataEmbedding_wo_time (line 193) | class DataEmbedding_wo_time(nn.Module):
    method __init__ (line 194) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 201) | def forward(self, x):

FILE: ts_forecasting_methods/Other_baselines/layers/PatchTST_backbone.py
  class PatchTST_backbone (line 14) | class PatchTST_backbone(nn.Module):
    method __init__ (line 15) | def __init__(self, c_in:int, context_window:int, target_window:int, pa...
    method forward (line 58) | def forward(self, z):                                                 ...
    method create_pretrain_head (line 82) | def create_pretrain_head(self, head_nf, vars, dropout):
  class Flatten_Head (line 88) | class Flatten_Head(nn.Module):
    method __init__ (line 89) | def __init__(self, individual, n_vars, nf, target_window, head_dropout...
    method forward (line 108) | def forward(self, x):                                 # x: [bs x nvars...
  class TSTiEncoder (line 126) | class TSTiEncoder(nn.Module):  #i means channel-independent
    method __init__ (line 127) | def __init__(self, c_in, patch_num, patch_len, max_seq_len=1024,
    method forward (line 155) | def forward(self, x) -> Tensor:                                       ...
  class TSTEncoder (line 175) | class TSTEncoder(nn.Module):
    method __init__ (line 176) | def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=N...
    method forward (line 187) | def forward(self, src:Tensor, key_padding_mask:Optional[Tensor]=None, ...
  class TSTEncoderLayer (line 199) | class TSTEncoderLayer(nn.Module):
    method __init__ (line 200) | def __init__(self, q_len, d_model, n_heads, d_k=None, d_v=None, d_ff=2...
    method forward (line 235) | def forward(self, src:Tensor, prev:Optional[Tensor]=None, key_padding_...
  class _MultiheadAttention (line 270) | class _MultiheadAttention(nn.Module):
    method __init__ (line 271) | def __init__(self, d_model, n_heads, d_k=None, d_v=None, res_attention...
    method forward (line 296) | def forward(self, Q:Tensor, K:Optional[Tensor]=None, V:Optional[Tensor...
  class _ScaledDotProductAttention (line 323) | class _ScaledDotProductAttention(nn.Module):
    method __init__ (line 328) | def __init__(self, d_model, n_heads, attn_dropout=0., res_attention=Fa...
    method forward (line 336) | def forward(self, q:Tensor, k:Tensor, v:Tensor, prev:Optional[Tensor]=...

FILE: ts_forecasting_methods/Other_baselines/layers/PatchTST_layers.py
  class Transpose (line 5) | class Transpose(nn.Module):
    method __init__ (line 6) | def __init__(self, *dims, contiguous=False):
    method forward (line 9) | def forward(self, x):
  function get_activation_fn (line 14) | def get_activation_fn(activation):
  class moving_avg (line 23) | class moving_avg(nn.Module):
    method __init__ (line 27) | def __init__(self, kernel_size, stride):
    method forward (line 32) | def forward(self, x):
  class series_decomp (line 42) | class series_decomp(nn.Module):
    method __init__ (line 46) | def __init__(self, kernel_size):
    method forward (line 50) | def forward(self, x):
  function PositionalEncoding (line 59) | def PositionalEncoding(q_len, d_model, normalize=True):
  function Coord2dPosEncoding (line 72) | def Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True...
  function Coord1dPosEncoding (line 87) | def Coord1dPosEncoding(q_len, exponential=False, normalize=True):
  function positional_encoding (line 94) | def positional_encoding(pe, learn_pe, q_len, d_model):

FILE: ts_forecasting_methods/Other_baselines/layers/RevIN.py
  class RevIN (line 6) | class RevIN(nn.Module):
    method __init__ (line 7) | def __init__(self, num_features: int, eps=1e-5, affine=True, subtract_...
    method forward (line 21) | def forward(self, x, mode:str):
    method _init_params (line 30) | def _init_params(self):
    method _get_statistics (line 35) | def _get_statistics(self, x):
    method _normalize (line 43) | def _normalize(self, x):
    method _denormalize (line 54) | def _denormalize(self, x):

FILE: ts_forecasting_methods/Other_baselines/layers/SelfAttention_Family.py
  class DSAttention (line 10) | class DSAttention(nn.Module):
    method __init__ (line 13) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method forward (line 20) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class FullAttention (line 48) | class FullAttention(nn.Module):
    method __init__ (line 49) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method forward (line 56) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class ProbAttention (line 78) | class ProbAttention(nn.Module):
    method __init__ (line 79) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method _prob_QK (line 87) | def _prob_QK(self, Q, K, sample_k, n_top):  # n_top: c*ln(L_q)
    method _get_initial_context (line 113) | def _get_initial_context(self, V, L_Q):
    method _update_context (line 126) | def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):
    method forward (line 147) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class AttentionLayer (line 179) | class AttentionLayer(nn.Module):
    method __init__ (line 180) | def __init__(self, attention, d_model, n_heads, d_keys=None,
    method forward (line 194) | def forward(self, queries, keys, values, attn_mask, tau=None, delta=No...
  class ReformerLayer (line 216) | class ReformerLayer(nn.Module):
    method __init__ (line 217) | def __init__(self, attention, d_model, n_heads, d_keys=None,
    method fit_length (line 229) | def fit_length(self, queries):
    method forward (line 239) | def forward(self, queries, keys, values, attn_mask, tau, delta):
  class TwoStageAttentionLayer (line 246) | class TwoStageAttentionLayer(nn.Module):
    method __init__ (line 252) | def __init__(self, configs,
    method forward (line 278) | def forward(self, x, attn_mask=None, tau=None, delta=None):

FILE: ts_forecasting_methods/Other_baselines/layers/Transformer_EncDec.py
  class ConvLayer (line 6) | class ConvLayer(nn.Module):
    method __init__ (line 7) | def __init__(self, c_in):
    method forward (line 18) | def forward(self, x):
  class EncoderLayer (line 27) | class EncoderLayer(nn.Module):
    method __init__ (line 28) | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activat...
    method forward (line 39) | def forward(self, x, attn_mask=None, tau=None, delta=None):
  class Encoder (line 54) | class Encoder(nn.Module):
    method __init__ (line 55) | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
    method forward (line 61) | def forward(self, x, attn_mask=None, tau=None, delta=None):
  class DecoderLayer (line 83) | class DecoderLayer(nn.Module):
    method __init__ (line 84) | def __init__(self, self_attention, cross_attention, d_model, d_ff=None,
    method forward (line 98) | def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, de...
  class Decoder (line 119) | class Decoder(nn.Module):
    method __init__ (line 120) | def __init__(self, layers, norm_layer=None, projection=None):
    method forward (line 126) | def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, de...

FILE: ts_forecasting_methods/Other_baselines/models/Autoformer.py
  class Model (line 11) | class Model(nn.Module):
    method __init__ (line 18) | def __init__(self, configs):
    method forecast (line 89) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 112) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 120) | def anomaly_detection(self, x_enc):
    method classification (line 128) | def classification(self, x_enc, x_mark_enc):
    method forward (line 144) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):

FILE: ts_forecasting_methods/Other_baselines/models/DLinear.py
  class Model (line 7) | class Model(nn.Module):
    method __init__ (line 12) | def __init__(self, configs, individual=False):
    method encoder (line 55) | def encoder(self, x):
    method forecast (line 75) | def forecast(self, x_enc):
    method imputation (line 79) | def imputation(self, x_enc):
    method anomaly_detection (line 83) | def anomaly_detection(self, x_enc):
    method classification (line 87) | def classification(self, x_enc):
    method forward (line 97) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):

FILE: ts_forecasting_methods/Other_baselines/models/GPT4TS.py
  class GPT4TS (line 10) | class GPT4TS(nn.Module):
    method __init__ (line 12) | def __init__(self, configs, device):
    method forward (line 58) | def forward(self, x, itr):

FILE: ts_forecasting_methods/Other_baselines/models/Informer.py
  class Model (line 9) | class Model(nn.Module):
    method __init__ (line 15) | def __init__(self, configs):
    method long_forecast (line 77) | def long_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method short_forecast (line 86) | def short_forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 102) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 110) | def anomaly_detection(self, x_enc):
    method classification (line 118) | def classification(self, x_enc, x_mark_enc):
    method forward (line 131) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):

FILE: ts_forecasting_methods/Other_baselines/models/LogTrans.py
  function _make_ix_like (line 40) | def _make_ix_like(X, dim):
  function _roll_last (line 48) | def _roll_last(X, dim):
  function _entmax_threshold_and_support (line 59) | def _entmax_threshold_and_support(X, dim=-1, k=None):
  class Entmax15Function (line 113) | class Entmax15Function(Function):
    method forward (line 115) | def forward(cls, ctx, X: torch.Tensor, dim=0, k=None):
    method backward (line 129) | def backward(cls, ctx, dY):
  function entmax15 (line 139) | def entmax15(X, dim=-1, k=None):
  function _sparsemax_threshold_and_support (line 165) | def _sparsemax_threshold_and_support(X, dim=-1, k=None):
  class SparsemaxFunction (line 212) | class SparsemaxFunction(Function):
    method forward (line 214) | def forward(cls, ctx, X, dim=-1, k=None):
    method backward (line 224) | def backward(cls, ctx, grad_output):
  function sparsemax (line 236) | def sparsemax(X, dim=-1, k=None):
  class Sparsemax (line 261) | class Sparsemax(nn.Module):
    method __init__ (line 262) | def __init__(self, dim=-1, k=None):
    method forward (line 281) | def forward(self, X):
  function swish (line 285) | def swish(x):
  function gelu (line 294) | def gelu(x):
  function swish (line 298) | def swish(x):
  class Attention (line 308) | class Attention(nn.Module):
    method __init__ (line 309) | def __init__(self, n_head, n_embd, win_len, scale, q_len, sub_len, spa...
    method log_mask (line 329) | def log_mask(self, win_len, sub_len):
    method row_mask (line 335) | def row_mask(self, index, sub_len, win_len):
    method attn (line 363) | def attn(self, query: torch.Tensor, key, value: torch.Tensor, activati...
    method merge_heads (line 376) | def merge_heads(self, x):
    method split_heads (line 381) | def split_heads(self, x, k=False):
    method forward (line 389) | def forward(self, x):
  class Conv1D (line 405) | class Conv1D(nn.Module):
    method __init__ (line 406) | def __init__(self, out_dim, rf, in_dim):
    method forward (line 418) | def forward(self, x):
  class LayerNorm (line 435) | class LayerNorm(nn.Module):
    method __init__ (line 438) | def __init__(self, n_embd, e=1e-5):
    method forward (line 444) | def forward(self, x):
  class MLP (line 451) | class MLP(nn.Module):
    method __init__ (line 452) | def __init__(self, n_state, n_embd, acf='relu'):
    method forward (line 460) | def forward(self, x):
  class Block (line 466) | class Block(nn.Module):
    method __init__ (line 467) | def __init__(self, n_head, win_len, n_embd, scale, q_len, sub_len):
    method forward (line 475) | def forward(self, x):
  class TransformerModel (line 483) | class TransformerModel(nn.Module):
    method __init__ (line 486) | def __init__(self, n_time_series, n_head, sub_len, num_layer, n_embd,
    method forward (line 513) | def forward(self, series_id: int, x: torch.Tensor):
  class Model (line 543) | class Model(nn.Module):
    method __init__ (line 544) | def __init__(self, configs):
    method _initialize_weights (line 583) | def _initialize_weights(self):
    method forward (line 593) | def forward(self, x: torch.Tensor, x_mark_enc, x_dec, x_mark_dec,

FILE: ts_forecasting_methods/Other_baselines/models/PatchTST.py
  function l2norm (line 10) | def l2norm(t):
  class AttentionLayer (line 13) | class AttentionLayer(nn.Module):
    method __init__ (line 14) | def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=...
    method forward (line 27) | def forward(self, queries, keys, values, attn_mask, attn_bias):
  class FullAttention (line 47) | class FullAttention(nn.Module):
    method __init__ (line 48) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method forward (line 60) | def forward(self, queries, keys, values, attn_mask, attn_bias):
  class EncoderLayer (line 84) | class EncoderLayer(nn.Module):
    method __init__ (line 85) | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activat...
    method forward (line 96) | def forward(self, x, attn_mask=None, attn_bias=None):
  class Encoder (line 110) | class Encoder(nn.Module):
    method __init__ (line 111) | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
    method forward (line 117) | def forward(self, x, attn_mask=None, attn_bias=None):
  class PatchTST (line 138) | class PatchTST(nn.Module):
    method __init__ (line 142) | def __init__(self, configs, device):
    method forward (line 184) | def forward(self, x_enc, itr):

FILE: ts_forecasting_methods/Other_baselines/models/PatchTST_raw.py
  class Model (line 13) | class Model(nn.Module):
    method __init__ (line 14) | def __init__(self, configs, max_seq_len: Optional[int] = 1024, d_k: Op...
    method forward (line 97) | def forward(self, x):  # x: [Batch, Input length, Channel]

FILE: ts_forecasting_methods/Other_baselines/models/TCN.py
  class Chomp1d (line 14) | class Chomp1d(nn.Module):
    method __init__ (line 15) | def __init__(self, chomp_size):
    method forward (line 19) | def forward(self, x):
  class TemporalBlock (line 23) | class TemporalBlock(nn.Module):
    method __init__ (line 24) | def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,...
    method init_weights (line 44) | def init_weights(self):
    method forward (line 50) | def forward(self, x):
  class TemporalConvNet (line 56) | class TemporalConvNet(nn.Module):
    method __init__ (line 57) | def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
    method forward (line 70) | def forward(self, x):
  class Model (line 75) | class Model(nn.Module):
    method __init__ (line 77) | def __init__(self, configs):
    method init_weights (line 90) | def init_weights(self):
    method forward (line 93) | def forward(self, x, x_mark_enc=None, x_dec=None, x_mark_dec=None, mas...

FILE: ts_forecasting_methods/Other_baselines/models/TEMPO.py
  class ComplexLinear (line 15) | class ComplexLinear(nn.Module):
    method __init__ (line 16) | def __init__(self, input_dim, output_dim):
    method forward (line 21) | def forward(self, x):
  function print_trainable_parameters (line 29) | def print_trainable_parameters(model):
  class MultiFourier (line 40) | class MultiFourier(torch.nn.Module):
    method __init__ (line 41) | def __init__(self, N, P):
    method forward (line 48) | def forward(self, t):
  class moving_avg (line 59) | class moving_avg(nn.Module):
    method __init__ (line 63) | def __init__(self, kernel_size, stride):
    method forward (line 68) | def forward(self, x):
  class TEMPO (line 77) | class TEMPO(nn.Module):
    method __init__ (line 79) | def __init__(self, configs, device):
    method store_tensors_in_dict (line 251) | def store_tensors_in_dict(self, original_x, original_trend, original_s...
    method l2_normalize (line 267) | def l2_normalize(self, x, dim=None, epsilon=1e-12):
    method select_prompt (line 273) | def select_prompt(self, summary, prompt_mask=None):
    method get_norm (line 308) | def get_norm(self, x, d = 'norm'):
    method get_patch (line 317) | def get_patch(self, x):
    method get_emb (line 325) | def get_emb(self, x, tokens=None, type = 'Trend'):
    method forward (line 385) | def forward(self, x, itr, trend, season, noise, test=False):

FILE: ts_forecasting_methods/Other_baselines/models/TimesNet.py
  function FFT_for_Period (line 9) | def FFT_for_Period(x, k=2):
  class TimesBlock (line 21) | class TimesBlock(nn.Module):
    method __init__ (line 22) | def __init__(self, configs):
    method forward (line 36) | def forward(self, x):
  class Model (line 71) | class Model(nn.Module):
    method __init__ (line 76) | def __init__(self, configs):
    method forecast (line 103) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 130) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 158) | def anomaly_detection(self, x_enc):
    method classification (line 183) | def classification(self, x_enc, x_mark_enc):
    method forward (line 201) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):

FILE: ts_forecasting_methods/Other_baselines/models/iTransformer.py
  class Model (line 10) | class Model(nn.Module):
    method __init__ (line 15) | def __init__(self, configs):
    method forecast (line 51) | def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
    method imputation (line 70) | def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
    method anomaly_detection (line 89) | def anomaly_detection(self, x_enc):
    method classification (line 108) | def classification(self, x_enc, x_mark_enc):
    method forward (line 120) | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):

FILE: ts_forecasting_methods/Other_baselines/train_cost.py
  function generate_pred_samples (line 25) | def generate_pred_samples(features, data, pred_len, drop=0):
  function cal_metrics (line 35) | def cal_metrics(pred, target):
  function eval_forecasting (line 42) | def eval_forecasting(model, train_data, valid_data, test_data, pred_lens...
  function save_checkpoint_callback (line 129) | def save_checkpoint_callback(

FILE: ts_forecasting_methods/Other_baselines/train_gpt4ts.py
  class SMAPE (line 164) | class SMAPE(nn.Module):
    method __init__ (line 165) | def __init__(self):
    method forward (line 168) | def forward(self, pred, true):

FILE: ts_forecasting_methods/Other_baselines/train_tempo.py
  function get_init_config (line 34) | def get_init_config(config_path=None):
  class SMAPE (line 287) | class SMAPE(nn.Module):
    method __init__ (line 288) | def __init__(self):
    method forward (line 291) | def forward(self, pred, true):

FILE: ts_forecasting_methods/Other_baselines/train_ts2vec.py
  function generate_pred_samples (line 23) | def generate_pred_samples(features, data, pred_len, drop=0):
  function cal_metrics (line 33) | def cal_metrics(pred, target):
  function eval_forecasting_new (line 40) | def eval_forecasting_new(model, train_data, valid_data, test_data, pred_...
  function save_checkpoint_callback (line 134) | def save_checkpoint_callback(

FILE: ts_forecasting_methods/Other_baselines/utils/ADFtest.py
  function calculate_ADF (line 7) | def calculate_ADF(root_path,data_path):
  function calculate_target_ADF (line 20) | def calculate_target_ADF(root_path,data_path,target='OT'):
  function archADF (line 33) | def archADF(root_path, data_path):

FILE: ts_forecasting_methods/Other_baselines/utils/augmentation.py
  function jitter (line 4) | def jitter(x, sigma=0.03):
  function scaling (line 9) | def scaling(x, sigma=0.1):
  function rotation (line 14) | def rotation(x):
  function permutation (line 21) | def permutation(x, max_segments=5, seg_mode="equal"):
  function magnitude_warp (line 46) | def magnitude_warp(x, sigma=0.2, knot=4):
  function time_warp (line 59) | def time_warp(x, sigma=0.2, knot=4):
  function window_slice (line 74) | def window_slice(x, reduce_ratio=0.9):
  function window_warp (line 88) | def window_warp(x, window_ratio=0.1, scales=[0.5, 2.]):
  function spawner (line 107) | def spawner(x, labels, sigma=0.05, verbose=0):
  function wdba (line 145) | def wdba(x, labels, batch_size=6, slope_constraint="symmetric", use_wind...
  function random_guided_warp (line 207) | def random_guided_warp(x, labels, slope_constraint="symmetric", use_wind...
  function random_guided_warp_shape (line 247) | def random_guided_warp_shape(x, labels, slope_constraint="symmetric", us...
  function discriminative_guided_warp (line 250) | def discriminative_guided_warp(x, labels, batch_size=6, slope_constraint...
  function discriminative_guided_warp_shape (line 328) | def discriminative_guided_warp_shape(x, labels, batch_size=6, slope_cons...
  function run_augmentation (line 332) | def run_augmentation(x, y, args):
  function run_augmentation_single (line 350) | def run_augmentation_single(x, y, args):
  function augment (line 368) | def augment(x, y, args):

FILE: ts_forecasting_methods/Other_baselines/utils/dtw.py
  function _traceback (line 12) | def _traceback(DTW, slope_constraint):
  function dtw (line 50) | def dtw(prototype, sample, return_flag = RETURN_VALUE, slope_constraint=...
  function _cummulative_matrix (line 79) | def _cummulative_matrix(cost, slope_constraint, window):
  function shape_dtw (line 103) | def shape_dtw(prototype, sample, return_flag = RETURN_VALUE, slope_const...
  function draw_graph2d (line 149) | def draw_graph2d(cost, DTW, path, prototype, sample):
  function draw_graph1d (line 186) | def draw_graph1d(cost, DTW, path, prototype, sample):

FILE: ts_forecasting_methods/Other_baselines/utils/dtw_metric.py
  function dtw (line 6) | def dtw(x, y, dist, warp=1, w=inf, s=1.0):
  function accelerated_dtw (line 58) | def accelerated_dtw(x, y, dist, warp=1):
  function _traceback (line 100) | def _traceback(D):

FILE: ts_forecasting_methods/Other_baselines/utils/losses.py
  function divide_no_nan (line 25) | def divide_no_nan(a, b):
  class mape_loss (line 35) | class mape_loss(nn.Module):
    method __init__ (line 36) | def __init__(self):
    method forward (line 39) | def forward(self, insample: t.Tensor, freq: int,
  class smape_loss (line 53) | class smape_loss(nn.Module):
    method __init__ (line 54) | def __init__(self):
    method forward (line 57) | def forward(self, insample: t.Tensor, freq: int,
  class mase_loss (line 71) | class mase_loss(nn.Module):
    method __init__ (line 72) | def __init__(self):
    method forward (line 75) | def forward(self, insample: t.Tensor, freq: int,

FILE: ts_forecasting_methods/Other_baselines/utils/m4_summary.py
  function group_values (line 28) | def group_values(values, groups, group_name):
  function mase (line 32) | def mase(forecast, insample, outsample, frequency):
  function smape_2 (line 36) | def smape_2(forecast, target):
  function mape (line 43) | def mape(forecast, target):
  class M4Summary (line 50) | class M4Summary:
    method __init__ (line 51) | def __init__(self, file_path, root_path):
    method evaluate (line 57) | def evaluate(self):
    method summarize_groups (line 113) | def summarize_groups(self, scores):

FILE: ts_forecasting_methods/Other_baselines/utils/masking.py
  class TriangularCausalMask (line 4) | class TriangularCausalMask():
    method __init__ (line 5) | def __init__(self, B, L, device="cpu"):
    method mask (line 11) | def mask(self):
  class ProbMask (line 15) | class ProbMask():
    method __init__ (line 16) | def __init__(self, B, H, L, index, scores, device="cpu"):
    method mask (line 25) | def mask(self):

FILE: ts_forecasting_methods/Other_baselines/utils/metrics.py
  function RSE (line 4) | def RSE(pred, true):
  function CORR (line 8) | def CORR(pred, true):
  function MAE (line 14) | def MAE(pred, true):
  function MSE (line 18) | def MSE(pred, true):
  function RMSE (line 22) | def RMSE(pred, true):
  function MAPE (line 26) | def MAPE(pred, true):
  function MSPE (line 30) | def MSPE(pred, true):
  function metric (line 34) | def metric(pred, true):

FILE: ts_forecasting_methods/Other_baselines/utils/print_args.py
  function print_args (line 1) | def print_args(args):

FILE: ts_forecasting_methods/Other_baselines/utils/rev_in.py
  class RevIn (line 12) | class RevIn(nn.Module):
    method __init__ (line 13) | def __init__(self, num_features: int, eps=1e-5, affine=True, subtract_...
    method forward (line 29) | def forward(self, x, mode: str):
    method _init_params (line 42) | def _init_params(self):
    method _get_statistics (line 47) | def _get_statistics(self, x):
    method _normalize (line 55) | def _normalize(self, x):
    method _denormalize (line 66) | def _denormalize(self, x):

FILE: ts_forecasting_methods/Other_baselines/utils/timefeatures.py
  class TimeFeature (line 23) | class TimeFeature:
    method __init__ (line 24) | def __init__(self):
    method __call__ (line 27) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
    method __repr__ (line 30) | def __repr__(self):
  class SecondOfMinute (line 34) | class SecondOfMinute(TimeFeature):
    method __call__ (line 37) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class MinuteOfHour (line 41) | class MinuteOfHour(TimeFeature):
    method __call__ (line 44) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class HourOfDay (line 48) | class HourOfDay(TimeFeature):
    method __call__ (line 51) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfWeek (line 55) | class DayOfWeek(TimeFeature):
    method __call__ (line 58) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfMonth (line 62) | class DayOfMonth(TimeFeature):
    method __call__ (line 65) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfYear (line 69) | class DayOfYear(TimeFeature):
    method __call__ (line 72) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class MonthOfYear (line 76) | class MonthOfYear(TimeFeature):
    method __call__ (line 79) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class WeekOfYear (line 83) | class WeekOfYear(TimeFeature):
    method __call__ (line 86) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  function time_features_from_frequency_str (line 90) | def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
  function time_features (line 147) | def time_features(dates, freq='h'):

FILE: ts_forecasting_methods/Other_baselines/utils/tools.py
  function adjust_learning_rate (line 15) | def adjust_learning_rate(optimizer, epoch, args):
  class EarlyStopping (line 33) | class EarlyStopping:
    method __init__ (line 34) | def __init__(self, patience=7, verbose=False, delta=0):
    method __call__ (line 43) | def __call__(self, val_loss, model, path):
    method save_checkpoint (line 58) | def save_checkpoint(self, val_loss, model, path):
  class dotdict (line 65) | class dotdict(dict):
  class StandardScaler (line 72) | class StandardScaler():
    method __init__ (line 73) | def __init__(self, mean, std):
    method transform (line 77) | def transform(self, data):
    method inverse_transform (line 80) | def inverse_transform(self, data):
  function visual (line 84) | def visual(true, preds=None, name='./pic/test.pdf'):
  function adjustment (line 96) | def adjustment(gt, pred):
  function cal_accuracy (line 120) | def cal_accuracy(y_pred, y_true):
  function vali (line 124) | def vali(model, vali_data, vali_loader, criterion, args, device, itr):
  function MASE (line 160) | def MASE(x, freq, pred, true):
  function test (line 165) | def test(model, test_data, test_loader, args, device, itr):
  function convert_tsf_to_dataframe (line 210) | def convert_tsf_to_dataframe(
  function test_params_flop (line 355) | def test_params_flop(model,x_shape):

FILE: ts_forecasting_methods/Other_baselines/utils/tools_tempo.py
  function adjust_learning_rate (line 16) | def adjust_learning_rate(optimizer, epoch, args):
  class EarlyStopping (line 44) | class EarlyStopping:
    method __init__ (line 45) | def __init__(self, patience=7, verbose=False, delta=0):
    method __call__ (line 54) | def __call__(self, val_loss, model, path):
    method save_checkpoint (line 69) | def save_checkpoint(self, val_loss, model, path):
  class dotdict (line 76) | class dotdict(dict):
  class StandardScaler (line 83) | class StandardScaler():
    method __init__ (line 84) | def __init__(self, mean, std):
    method transform (line 88) | def transform(self, data):
    method inverse_transform (line 91) | def inverse_transform(self, data):
  function visual (line 95) | def visual(true, preds=None, name='./pic/test.pdf'):
  function convert_tsf_to_dataframe (line 107) | def convert_tsf_to_dataframe(
  function vali (line 252) | def vali(model, vali_data, vali_loader, criterion, args, device, itr):
  function MASE (line 320) | def MASE(x, freq, pred, true):
  function metric_mae_mse (line 325) | def metric_mae_mse(preds, trues):
  function test (line 331) | def test(model, test_data, test_loader, args, device, itr):

FILE: ts_forecasting_methods/SupervisedBaselines/data_provider/data_factory.py
  function data_provider (line 13) | def data_provider(args, flag):

FILE: ts_forecasting_methods/SupervisedBaselines/data_provider/data_loader.py
  class Dataset_ETT_hour (line 14) | class Dataset_ETT_hour(Dataset):
    method __init__ (line 15) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 43) | def __read_data__(self):
    method __getitem__ (line 82) | def __getitem__(self, index):
    method __len__ (line 95) | def __len__(self):
    method inverse_transform (line 98) | def inverse_transform(self, data):
  class Dataset_ETT_minute (line 102) | class Dataset_ETT_minute(Dataset):
    method __init__ (line 103) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 131) | def __read_data__(self):
    method __getitem__ (line 172) | def __getitem__(self, index):
    method __len__ (line 185) | def __len__(self):
    method inverse_transform (line 188) | def inverse_transform(self, data):
  class Dataset_Custom (line 192) | class Dataset_Custom(Dataset):
    method __init__ (line 193) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 221) | def __read_data__(self):
    method __getitem__ (line 267) | def __getitem__(self, index):
    method __len__ (line 280) | def __len__(self):
    method inverse_transform (line 283) | def inverse_transform(self, data):
  class Dataset_Pred (line 287) | class Dataset_Pred(Dataset):
    method __init__ (line 288) | def __init__(self, root_path, flag='pred', size=None,
    method __read_data__ (line 315) | def __read_data__(self):
    method __getitem__ (line 370) | def __getitem__(self, index):
    method __len__ (line 386) | def __len__(self):
    method inverse_transform (line 389) | def inverse_transform(self, data):

FILE: ts_forecasting_methods/SupervisedBaselines/exp/exp_basic.py
  class Exp_Basic (line 6) | class Exp_Basic(object):
    method __init__ (line 7) | def __init__(self, args):
    method _build_model (line 12) | def _build_model(self):
    method _acquire_device (line 16) | def _acquire_device(self):
    method _get_data (line 27) | def _get_data(self):
    method vali (line 30) | def vali(self):
    method train (line 33) | def train(self):
    method test (line 36) | def test(self):

FILE: ts_forecasting_methods/SupervisedBaselines/exp/exp_informer.py
  class Exp_Informer (line 22) | class Exp_Informer(Exp_Basic):
    method __init__ (line 23) | def __init__(self, args):
    method _build_model (line 26) | def _build_model(self):
    method _select_optimizer (line 61) | def _select_optimizer(self):
    method _select_optimizer_p (line 65) | def _select_optimizer_p(self):
    method _select_criterion (line 70) | def _select_criterion(self):
    method vali (line 74) | def vali(self, vali_loader, scaler,criterion):
    method train (line 112) | def train(self, setting):
    method test (line 210) | def test(self, setting):
    method predict (line 280) | def predict(self, setting, load=False):

FILE: ts_forecasting_methods/SupervisedBaselines/exp/exp_main.py
  class Exp_Main (line 24) | class Exp_Main(Exp_Basic):
    method __init__ (line 25) | def __init__(self, args):
    method _build_model (line 28) | def _build_model(self):
    method _get_data (line 44) | def _get_data(self, flag):
    method _select_optimizer (line 48) | def _select_optimizer(self):
    method _select_criterion (line 52) | def _select_criterion(self):
    method vali (line 56) | def vali(self, vali_data, vali_loader, criterion):
    method train (line 96) | def train(self, setting):
    method test (line 195) | def test(self, setting, test=0):
    method predict (line 277) | def predict(self, setting, load=False):

FILE: ts_forecasting_methods/SupervisedBaselines/layers/AutoCorrelation.py
  class AutoCorrelation (line 11) | class AutoCorrelation(nn.Module):
    method __init__ (line 18) | def __init__(self, mask_flag=True, factor=1, scale=None, attention_dro...
    method time_delay_agg_training (line 26) | def time_delay_agg_training(self, values, corr):
    method time_delay_agg_inference (line 50) | def time_delay_agg_inference(self, values, corr):
    method time_delay_agg_full (line 77) | def time_delay_agg_full(self, values, corr):
    method forward (line 101) | def forward(self, queries, keys, values, attn_mask):
  class AutoCorrelationLayer (line 130) | class AutoCorrelationLayer(nn.Module):
    method __init__ (line 131) | def __init__(self, correlation, d_model, n_heads, d_keys=None,
    method forward (line 145) | def forward(self, queries, keys, values, attn_mask):

FILE: ts_forecasting_methods/SupervisedBaselines/layers/Autoformer_EncDec.py
  class my_Layernorm (line 6) | class my_Layernorm(nn.Module):
    method __init__ (line 10) | def __init__(self, channels):
    method forward (line 14) | def forward(self, x):
  class moving_avg (line 20) | class moving_avg(nn.Module):
    method __init__ (line 24) | def __init__(self, kernel_size, stride):
    method forward (line 29) | def forward(self, x):
  class series_decomp (line 39) | class series_decomp(nn.Module):
    method __init__ (line 43) | def __init__(self, kernel_size):
    method forward (line 47) | def forward(self, x):
  class EncoderLayer (line 53) | class EncoderLayer(nn.Module):
    method __init__ (line 57) | def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropo...
    method forward (line 68) | def forward(self, x, attn_mask=None):
  class Encoder (line 82) | class Encoder(nn.Module):
    method __init__ (line 86) | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
    method forward (line 92) | def forward(self, x, attn_mask=None):
  class DecoderLayer (line 112) | class DecoderLayer(nn.Module):
    method __init__ (line 116) | def __init__(self, self_attention, cross_attention, d_model, c_out, d_...
    method forward (line 132) | def forward(self, x, cross, x_mask=None, cross_mask=None):
  class Decoder (line 153) | class Decoder(nn.Module):
    method __init__ (line 157) | def __init__(self, layers, norm_layer=None, projection=None):
    method forward (line 163) | def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None):

FILE: ts_forecasting_methods/SupervisedBaselines/layers/Embed.py
  class PositionalEmbedding (line 8) | class PositionalEmbedding(nn.Module):
    method __init__ (line 9) | def __init__(self, d_model, max_len=5000):
    method forward (line 24) | def forward(self, x):
  class TokenEmbedding (line 28) | class TokenEmbedding(nn.Module):
    method __init__ (line 29) | def __init__(self, c_in, d_model):
    method forward (line 38) | def forward(self, x):
  class FixedEmbedding (line 43) | class FixedEmbedding(nn.Module):
    method __init__ (line 44) | def __init__(self, c_in, d_model):
    method forward (line 59) | def forward(self, x):
  class TemporalEmbedding (line 63) | class TemporalEmbedding(nn.Module):
    method __init__ (line 64) | def __init__(self, d_model, embed_type='fixed', freq='h'):
    method forward (line 81) | def forward(self, x):
  class TimeFeatureEmbedding (line 93) | class TimeFeatureEmbedding(nn.Module):
    method __init__ (line 94) | def __init__(self, d_model, embed_type='timeF', freq='h'):
    method forward (line 101) | def forward(self, x):
  class DataEmbedding (line 105) | class DataEmbedding(nn.Module):
    method __init__ (line 106) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 116) | def forward(self, x, x_mark):
  class DataEmbedding_wo_pos (line 121) | class DataEmbedding_wo_pos(nn.Module):
    method __init__ (line 122) | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropou...
    method forward (line 132) | def forward(self, x, x_mark):

FILE: ts_forecasting_methods/SupervisedBaselines/layers/SelfAttention_Family.py
  class FullAttention (line 15) | class FullAttention(nn.Module):
    method __init__ (line 16) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method forward (line 23) | def forward(self, queries, keys, values, attn_mask):
  class ProbAttention (line 45) | class ProbAttention(nn.Module):
    method __init__ (line 46) | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dro...
    method _prob_QK (line 54) | def _prob_QK(self, Q, K, sample_k, n_top):  # n_top: c*ln(L_q)
    method _get_initial_context (line 77) | def _get_initial_context(self, V, L_Q):
    method _update_context (line 88) | def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):
    method forward (line 107) | def forward(self, queries, keys, values, attn_mask):
  class AttentionLayer (line 135) | class AttentionLayer(nn.Module):
    method __init__ (line 136) | def __init__(self, attention, d_model, n_heads, d_keys=None,
    method forward (line 150) | def forward(self, queries, keys, values, attn_mask):
  class ReformerLayer (line 170) | class ReformerLayer(nn.Module):
    method __init__ (line 171) | def __init__(self, attention, d_model, n_heads, d_keys=None,
    method fit_length (line 183) | def fit_length(self, queries):
    method forward (line 193) | def forward(self, queries, keys, values, attn_mask):

FILE: ts_forecasting_methods/SupervisedBaselines/layers/Transformer_EncDec.py
  class ConvLayer (line 6) | class ConvLayer(nn.Module):
    method __init__ (line 7) | def __init__(self, c_in):
    method forward (line 18) | def forward(self, x):
  class EncoderLayer (line 27) | class EncoderLayer(nn.Module):
    method __init__ (line 28) | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activat...
    method forward (line 39) | def forward(self, x, attn_mask=None):
  class Encoder (line 53) | class Encoder(nn.Module):
    method __init__ (line 54) | def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
    method forward (line 60) | def forward(self, x, attn_mask=None):
  class DecoderLayer (line 81) | class DecoderLayer(nn.Module):
    method __init__ (line 82) | def __init__(self, self_attention, cross_attention, d_model, d_ff=None,
    method forward (line 96) | def forward(self, x, cross, x_mask=None, cross_mask=None):
  class Decoder (line 115) | class Decoder(nn.Module):
    method __init__ (line 116) | def __init__(self, layers, norm_layer=None, projection=None):
    method forward (line 122) | def forward(self, x, cross, x_mask=None, cross_mask=None):

FILE: ts_forecasting_methods/SupervisedBaselines/utils/masking.py
  class TriangularCausalMask (line 4) | class TriangularCausalMask():
    method __init__ (line 5) | def __init__(self, B, L, device="cpu"):
    method mask (line 11) | def mask(self):
  class ProbMask (line 15) | class ProbMask():
    method __init__ (line 16) | def __init__(self, B, H, L, index, scores, device="cpu"):
    method mask (line 25) | def mask(self):

FILE: ts_forecasting_methods/SupervisedBaselines/utils/metrics.py
  function RSE (line 4) | def RSE(pred, true):
  function CORR (line 8) | def CORR(pred, true):
  function MAE (line 14) | def MAE(pred, true):
  function MSE (line 18) | def MSE(pred, true):
  function RMSE (line 22) | def RMSE(pred, true):
  function MAPE (line 26) | def MAPE(pred, true):
  function MSPE (line 30) | def MSPE(pred, true):
  function metric (line 34) | def metric(pred, true):

FILE: ts_forecasting_methods/SupervisedBaselines/utils/timefeatures.py
  class TimeFeature (line 9) | class TimeFeature:
    method __init__ (line 10) | def __init__(self):
    method __call__ (line 13) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
    method __repr__ (line 16) | def __repr__(self):
  class SecondOfMinute (line 20) | class SecondOfMinute(TimeFeature):
    method __call__ (line 23) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class MinuteOfHour (line 27) | class MinuteOfHour(TimeFeature):
    method __call__ (line 30) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class HourOfDay (line 34) | class HourOfDay(TimeFeature):
    method __call__ (line 37) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfWeek (line 41) | class DayOfWeek(TimeFeature):
    method __call__ (line 44) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfMonth (line 48) | class DayOfMonth(TimeFeature):
    method __call__ (line 51) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfYear (line 55) | class DayOfYear(TimeFeature):
    method __call__ (line 58) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class MonthOfYear (line 62) | class MonthOfYear(TimeFeature):
    method __call__ (line 65) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class WeekOfYear (line 69) | class WeekOfYear(TimeFeature):
    method __call__ (line 72) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  function time_features_from_frequency_str (line 76) | def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
  function time_features (line 133) | def time_features(dates, freq='h'):

FILE: ts_forecasting_methods/SupervisedBaselines/utils/tools.py
  function adjust_learning_rate (line 8) | def adjust_learning_rate(optimizer, epoch, args):
  class EarlyStopping (line 24) | class EarlyStopping:
    method __init__ (line 25) | def __init__(self, patience=7, verbose=False, delta=0):
    method __call__ (line 34) | def __call__(self, val_loss, model, path):
    method save_checkpoint (line 49) | def save_checkpoint(self, val_loss, model, path):
  class dotdict (line 56) | class dotdict(dict):
  class StandardScaler (line 63) | class StandardScaler():
    method __init__ (line 64) | def __init__(self, mean, std):
    method transform (line 68) | def transform(self, data):
    method inverse_transform (line 71) | def inverse_transform(self, data):
  function visual (line 75) | def visual(true, preds=None, name='./pic/test.pdf'):

FILE: ts_forecasting_methods/ts2vec/data_provider/data_factory.py
  function data_provider (line 105) | def data_provider(args, flag, drop_last_test=True, train_all=False):

FILE: ts_forecasting_methods/ts2vec/data_provider/data_loader.py
  class TimeFeature (line 27) | class TimeFeature:
    method __init__ (line 28) | def __init__(self):
    method __call__ (line 31) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
    method __repr__ (line 34) | def __repr__(self):
  class SecondOfMinute (line 38) | class SecondOfMinute(TimeFeature):
    method __call__ (line 41) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class MinuteOfHour (line 45) | class MinuteOfHour(TimeFeature):
    method __call__ (line 48) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class HourOfDay (line 52) | class HourOfDay(TimeFeature):
    method __call__ (line 55) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfWeek (line 59) | class DayOfWeek(TimeFeature):
    method __call__ (line 62) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfMonth (line 66) | class DayOfMonth(TimeFeature):
    method __call__ (line 69) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class DayOfYear (line 73) | class DayOfYear(TimeFeature):
    method __call__ (line 76) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class MonthOfYear (line 80) | class MonthOfYear(TimeFeature):
    method __call__ (line 83) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  class WeekOfYear (line 87) | class WeekOfYear(TimeFeature):
    method __call__ (line 90) | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
  function time_features_from_frequency_str (line 94) | def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
  function time_features (line 152) | def time_features(dates, freq='h'):
  class Dataset_ETT_hour (line 157) | class Dataset_ETT_hour(Dataset):
    method __init__ (line 158) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 193) | def __read_data__(self):
    method __getitem__ (line 235) | def __getitem__(self, index):
    method __len__ (line 249) | def __len__(self):
    method inverse_transform (line 252) | def inverse_transform(self, data):
  class Dataset_ETT_minute (line 256) | class Dataset_ETT_minute(Dataset):
    method __init__ (line 257) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 290) | def __read_data__(self):
    method __getitem__ (line 333) | def __getitem__(self, index):
    method __len__ (line 347) | def __len__(self):
    method inverse_transform (line 350) | def inverse_transform(self, data):
  class Dataset_Custom (line 354) | class Dataset_Custom(Dataset):
    method __init__ (line 355) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 388) | def __read_data__(self):
    method __getitem__ (line 441) | def __getitem__(self, index):
    method __len__ (line 455) | def __len__(self):
    method inverse_transform (line 458) | def inverse_transform(self, data):
  class Dataset_Pred (line 462) | class Dataset_Pred(Dataset):
    method __init__ (line 463) | def __init__(self, root_path, flag='pred', size=None,
    method __read_data__ (line 491) | def __read_data__(self):
    method __getitem__ (line 546) | def __getitem__(self, index):
    method __len__ (line 562) | def __len__(self):
    method inverse_transform (line 565) | def inverse_transform(self, data):
  class Dataset_TSF (line 569) | class Dataset_TSF(Dataset):
    method __init__ (line 570) | def __init__(self, root_path, flag='train', size=None,
    method __read_data__ (line 591) | def __read_data__(self):
    method __getitem__ (line 633) | def __getitem__(self, index):
    method __len__ (line 664) | def __len__(self):

FILE: ts_forecasting_methods/ts2vec/data_provider/m4.py
  function url_file_name (line 35) | def url_file_name(url: str) -> str:
  function download (line 45) | def download(url: str, file_path: str) -> None:
  class M4Dataset (line 74) | class M4Dataset:
    method load (line 82) | def load(training: bool = True, dataset_file: str = '../dataset/m4') -...
  class M4Meta (line 102) | class M4Meta:
  function load_m4_info (line 132) | def load_m4_info() -> pd.DataFrame:

FILE: ts_forecasting_methods/ts2vec/data_provider/metrics.py
  function RSE (line 4) | def RSE(pred, true):
  function CORR (line 8) | def CORR(pred, true):
  function MAE (line 14) | def MAE(pred, true):
  function MSE (line 18) | def MSE(pred, true):
  function RMSE (line 22) | def RMSE(pred, true):
  function MAPE (line 26) | def MAPE(pred, true):
  function MSPE (line 30) | def MSPE(pred, true):
  function SMAPE (line 33) | def SMAPE(pred, true):
  function ND (line 37) | def ND(pred, true):
  function metric (line 40) | def metric(pred, true):

FILE: ts_forecasting_methods/ts2vec/data_provider/tools.py
  function adjust_learning_rate (line 16) | def adjust_learning_rate(optimizer, epoch, args):
  class EarlyStopping (line 44) | class EarlyStopping:
    method __init__ (line 45) | def __init__(self, patience=7, verbose=False, delta=0):
    method __call__ (line 54) | def __call__(self, val_loss, model, path):
    method save_checkpoint (line 69) | def save_checkpoint(self, val_loss, model, path):
  class dotdict (line 76) | class dotdict(dict):
  class StandardScaler (line 83) | class StandardScaler():
    method __init__ (line 84) | def __init__(self, mean, std):
    method transform (line 88) | def transform(self, data):
    method inverse_transform (line 91) | def inverse_transform(self, data):
  function visual (line 95) | def visual(true, preds=None, name='./pic/test.pdf'):
  function convert_tsf_to_dataframe (line 107) | def convert_tsf_to_dataframe(
  function vali (line 252) | def vali(model, vali_data, vali_loader, criterion, args, device, itr):
  function MASE (line 288) | def MASE(x, freq, pred, true):
  function test (line 293) | def test(model, test_data, test_loader, args, device, itr):

FILE: ts_forecasting_methods/ts2vec/data_provider/uea.py
  function collate_fn (line 7) | def collate_fn(data, max_len=None):
  function padding_mask (line 45) | def padding_mask(lengths, max_len=None):
  class Normalizer (line 58) | class Normalizer(object):
    method __init__ (line 63) | def __init__(self, norm_type='standardization', mean=None, std=None, m...
    method normalize (line 78) | def normalize(self, df):
  function interpolate_missing (line 110) | def interpolate_missing(y):
  function subsample (line 119) | def subsample(y, limit=256, factor=2):

FILE: ts_forecasting_methods/ts2vec/datautils.py
  function load_UCR (line 12) | def load_UCR(dataset):
  function load_UEA (line 79) | def load_UEA(dataset):
  function load_forecast_npy (line 108) | def load_forecast_npy(name, univar=False):
  function _get_time_features (line 125) | def _get_time_features(dt):
  function load_forecast_csv (line 137) | def load_forecast_csv(name, univar=False):
  function load_anomaly (line 195) | def load_anomaly(name):
  function gen_ano_train_data (line 202) | def gen_ano_train_data(all_train_data):

FILE: ts_forecasting_methods/ts2vec/models/dilated_conv.py
  class SamePadConv (line 6) | class SamePadConv(nn.Module):
    method __init__ (line 7) | def __init__(self, in_channels, out_channels, kernel_size, dilation=1,...
    method forward (line 19) | def forward(self, x):
  class ConvBlock (line 25) | class ConvBlock(nn.Module):
    method __init__ (line 26) | def __init__(self, in_channels, out_channels, kernel_size, dilation, f...
    method forward (line 32) | def forward(self, x):
  class DilatedConvEncoder (line 40) | class DilatedConvEncoder(nn.Module):
    method __init__ (line 41) | def __init__(self, in_channels, channels, kernel_size):
    method forward (line 54) | def forward(self, x):

FILE: ts_forecasting_methods/ts2vec/models/encoder.py
  function generate_continuous_mask (line 7) | def generate_continuous_mask(B, T, n=5, l=0.1):
  function generate_binomial_mask (line 23) | def generate_binomial_mask(B, T, p=0.5):
  class TSEncoder (line 26) | class TSEncoder(nn.Module):
    method __init__ (line 27) | def __init__(self, input_dims, output_dims, hidden_dims=64, depth=10, ...
    method forward (line 41) | def forward(self, x, mask=None):  # x: B x T x input_dims

FILE: ts_forecasting_methods/ts2vec/models/losses.py
  function hierarchical_contrastive_loss (line 5) | def hierarchical_contrastive_loss(z1, z2, alpha=0.5, temporal_unit=0):
  function instance_contrastive_loss (line 23) | def instance_contrastive_loss(z1, z2):
  function temporal_contrastive_loss (line 38) | def temporal_contrastive_loss(z1, z2):

FILE: ts_forecasting_methods/ts2vec/tasks/_eval_protocols.py
  function fit_svm (line 10) | def fit_svm(features, y, MAX_SAMPLES=10000):
  function fit_lr (line 52) | def fit_lr(features, y, MAX_SAMPLES=100000):
  function fit_knn (line 73) | def fit_knn(features, y):
  function fit_ridge (line 81) | def fit_ridge(train_features, train_y, valid_features, valid_y, MAX_SAMP...

FILE: ts_forecasting_methods/ts2vec/tasks/anomaly_detection.py
  function get_range_proba (line 7) | def get_range_proba(predict, label, delay=7):
  function reconstruct_label (line 33) | def reconstruct_label(timestamp, label):
  function eval_ad_result (line 51) | def eval_ad_result(test_pred_list, test_labels_list, test_timestamps_lis...
  function np_shift (line 70) | def np_shift(arr, num, fill_value=np.nan):
  function eval_anomaly_detection (line 83) | def eval_anomaly_detection(model, all_train_data, all_train_labels, all_...
  function eval_anomaly_detection_coldstart (line 152) | def eval_anomaly_detection_coldstart(model, all_train_data, all_train_la...

FILE: ts_forecasting_methods/ts2vec/tasks/classification.py
  function eval_classification (line 6) | def eval_classification(model, train_data, train_labels, test_data, test...

FILE: ts_forecasting_methods/ts2vec/tasks/forecasting.py
  function generate_pred_samples (line 5) | def generate_pred_samples(features, data, pred_len, drop=0):
  function cal_metrics (line 14) | def cal_metrics(pred, target):
  function eval_forecasting (line 20) | def eval_forecasting(model, data, train_slice, valid_slice, test_slice, ...
  function eval_forecasting_new (line 99) | def eval_forecasting_new(model, train_data, valid_data, test_data, pred_...

FILE: ts_forecasting_methods/ts2vec/train.py
  function save_checkpoint_callback (line 13) | def save_checkpoint_callback(

FILE: ts_forecasting_methods/ts2vec/ts2vec.py
  class TS2Vec (line 10) | class TS2Vec:
    method __init__ (line 13) | def __init__(
    method fit (line 60) | def fit(self, train_data, n_epochs=None, n_iters=None, verbose=False):
    method _eval_with_pooling (line 162) | def _eval_with_pooling(self, x, mask=None, slicing=None, encoding_wind...
    method encode (line 206) | def encode(self, data, mask=None, encoding_window=None, casual=False, ...
    method save (line 303) | def save(self, fn):
    method load (line 311) | def load(self, fn):

FILE: ts_forecasting_methods/ts2vec/utils.py
  function pkl_save (line 8) | def pkl_save(name, var):
  function pkl_load (line 12) | def pkl_load(name):
  function torch_pad_nan (line 16) | def torch_pad_nan(arr, left=0, right=0, dim=0):
  function pad_nan_to_target (line 27) | def pad_nan_to_target(array, target_length, axis=0, both_side=False):
  function split_with_nan (line 39) | def split_with_nan(x, sections, axis=0):
  function take_per_row (line 47) | def take_per_row(A, indx, num_elem):
  function centerize_vary_length_series (line 51) | def centerize_vary_length_series(x):
  function data_dropout (line 60) | def data_dropout(arr, p):
  function name_
Condensed preview — 486 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (3,948K chars).
[
  {
    "path": ".idea/.gitignore",
    "chars": 176,
    "preview": "# Default ignored files\n/shelf/\n/workspace.xml\n# Editor-based HTTP Client requests\n/httpRequests/\n# Datasource local sto"
  },
  {
    "path": ".idea/deployment.xml",
    "chars": 2405,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"PublishConfigData\" remoteFilesAllowedToD"
  },
  {
    "path": ".idea/inspectionProfiles/Project_Default.xml",
    "chars": 859,
    "preview": "<component name=\"InspectionProjectProfileManager\">\n  <profile version=\"1.0\">\n    <option name=\"myName\" value=\"Project De"
  },
  {
    "path": ".idea/inspectionProfiles/profiles_settings.xml",
    "chars": 174,
    "preview": "<component name=\"InspectionProjectProfileManager\">\n  <settings>\n    <option name=\"USE_PROJECT_PROFILE\" value=\"false\" />\n"
  },
  {
    "path": ".idea/modules.xml",
    "chars": 284,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectModuleManager\">\n    <modules>\n   "
  },
  {
    "path": ".idea/time-series-ptms.iml",
    "chars": 443,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n  <component name=\"NewModuleRootManager"
  },
  {
    "path": ".idea/vcs.xml",
    "chars": 180,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping dire"
  },
  {
    "path": "README.md",
    "chars": 7774,
    "preview": "# [A Survey on Time-Series Pre-Trained Models](https://arxiv.org/pdf/2305.10716v2)\n\nThis is the training code for our pa"
  },
  {
    "path": "ts_anomaly_detection_methods/README.md",
    "chars": 2269,
    "preview": "This is the time-series anomaly detection training code for our paper *\"A Survey on Time-Series Pre-Trained Models\"*\n\n##"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/ATmodelbatch.py",
    "chars": 8807,
    "preview": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nimport numpy as np\nfrom utils"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/datautils.py",
    "chars": 3379,
    "preview": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nf"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/__init__.py",
    "chars": 31,
    "preview": "from .encoder import TSEncoder\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/anomaly_transformer_model.py",
    "chars": 4961,
    "preview": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass AnomalyAttentio"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/dilated_conv.py",
    "chars": 1921,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    "
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/encoder.py",
    "chars": 2479,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .dilated_conv import DilatedCo"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/models/losses.py",
    "chars": 1874,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef hierarchical_contrastive_loss(z1, z2, alpha=0.5, "
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/tasks/__init__.py",
    "chars": 112,
    "preview": "from .anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart,np_shift,eval_ad_result\n"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/tasks/anomaly_detection.py",
    "chars": 7287,
    "preview": "import numpy as np\nimport time\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport bottleneck as "
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/train.py",
    "chars": 5174,
    "preview": "import torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2V"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/trainATbatch.py",
    "chars": 6628,
    "preview": "import logging\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset, SequentialSample"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/ts2vec.py",
    "chars": 14537,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nf"
  },
  {
    "path": "ts_anomaly_detection_methods/anomaly_transformer/utils.py",
    "chars": 5102,
    "preview": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\n\ndef pkl_save(name, "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/AT_solver.py",
    "chars": 38991,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\nimport os\nim"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/ATmodelbatch.py",
    "chars": 8757,
    "preview": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nimport numpy as np\nfrom utils"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/README.md",
    "chars": 3374,
    "preview": "## README_Anomaly_Detection\n\n### Usage\n\n|  ID  |                            Method                            | Year |  "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/dataset_read_test.py",
    "chars": 3963,
    "preview": "import datautils\nimport numpy as np\nfrom sklearn.metrics import f1_score, precision_score, recall_score\n\n\ndef get_range_"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/datautils.py",
    "chars": 3331,
    "preview": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nf"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/dcdetector_solver.py",
    "chars": 38483,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\n# from utils"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/donut.py",
    "chars": 14997,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nf"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/exp_anomaly_detection.py",
    "chars": 25689,
    "preview": "from sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score\nimport torch.mult"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/hello_test_evo.py",
    "chars": 23,
    "preview": "print(\"Hello World!!!\")"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/lstm_vae.py",
    "chars": 13501,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nf"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/AUC.py",
    "chars": 4712,
    "preview": "# used by paper: TSB-UAD as the main evaluator\n# github: https://github.com/johnpaparrizos/TSB-UAD/blob/main/TSB_AD/util"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/Matthews_correlation_coefficient.py",
    "chars": 613,
    "preview": "from sklearn.metrics import confusion_matrix\nimport numpy as np\n\n\ndef MCC(y_test, pred_labels):\n    tn, fp, fn, tp = con"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_affiliation_zone.py",
    "chars": 3529,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom other_anomaly_baselines.metrics.affiliation._integral_interval impor"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_integral_interval.py",
    "chars": 20888,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport math\nfrom other_anomaly_baselines.metrics.affiliation.generics imp"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/_single_ground_truth_event.py",
    "chars": 3574,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport math\nfrom other_anomaly_baselines.metrics.affiliation._affiliation"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/generics.py",
    "chars": 4915,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom itertools import groupby\nfrom operator import itemgetter\nimport math"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/affiliation/metrics.py",
    "chars": 5292,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom other_anomaly_baselines.metrics.affiliation.generics import (\n      "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/combine_all_scores.py",
    "chars": 2988,
    "preview": "from f1_score_f1_pa import *\nfrom fc_score import *\nfrom precision_at_k import *\nfrom customizable_f1_score import *\nfro"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/customizable_f1_score.py",
    "chars": 3032,
    "preview": "# used by paper: Exathlon: A Benchmark for Explainable Anomaly Detection over Time Series_VLDB 2021\n# github: https://gi"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/evaluate_utils.py",
    "chars": 1891,
    "preview": "import numpy as np\nfrom statsmodels.tsa.stattools import acf\nfrom scipy.signal import argrelextrema\n\n\ndef get_composite_"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/evaluator.py",
    "chars": 27920,
    "preview": "import logging\nimport os\nimport pickle\nimport copy\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom logger_conf"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/f1_score_f1_pa.py",
    "chars": 3228,
    "preview": "import numpy as np\nfrom sklearn.metrics import precision_recall_curve, roc_curve, auc, roc_auc_score, precision_score, r"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/f1_series.py",
    "chars": 5683,
    "preview": "from fc_score import *\nfrom f1_score_f1_pa import *\nfrom evaluate_utils import *\n\ndefault_thres_config = {\"top_k_time\": "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/fc_score.py",
    "chars": 1622,
    "preview": "import numpy as np\nfrom sklearn.metrics import precision_score\n\n\ndef get_events(y_test, outlier=1, normal=0):\n    events"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/metrics.py",
    "chars": 2390,
    "preview": "from other_anomaly_baselines.metrics.f1_score_f1_pa import *\nfrom other_anomaly_baselines.metrics.fc_score import *\nfrom"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/precision_at_k.py",
    "chars": 495,
    "preview": "# k is defined as the number of anomalies\n# only calculate the range top k not the whole set\nimport numpy as np\n\n\ndef pr"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/analysis/robustness_eval.py",
    "chars": 13040,
    "preview": "from random import shuffle\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimpo"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/analysis/score_computation.py",
    "chars": 7503,
    "preview": "\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport pandas as pd\nfrom tqdm "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/metrics.py",
    "chars": 489,
    "preview": "from .utils.metrics import metricor\nfrom .analysis.robustness_eval import generate_curve\n\n\ndef get_range_vus_roc(score, "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/models/distance.py",
    "chars": 29323,
    "preview": "# -*- coding: utf-8 -*-\n\"\"\"Classes of distance measure for model type A\n\"\"\"\n\nimport numpy as np\n# import matplotlib.pypl"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/models/feature.py",
    "chars": 12497,
    "preview": "# -*- coding: utf-8 -*-\n\"\"\"Classes of feature mapping for model type B\n\"\"\"\n\nimport numpy as np\n# import matplotlib.pyplo"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/utils/metrics.py",
    "chars": 14238,
    "preview": "from sklearn import metrics\nimport numpy as np\nimport math\n# import matplotlib.pyplot as plt\n\nclass metricor:\n    def __"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/metrics/vus/utils/slidingWindows.py",
    "chars": 716,
    "preview": "from statsmodels.tsa.stattools import acf\nfrom scipy.signal import argrelextrema\nimport numpy as np\n\nimport matplotlib.p"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/AnomalyTransformer.py",
    "chars": 8743,
    "preview": "import numpy as np\nimport math\nfrom math import sqrt\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functi"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/DCdetector.py",
    "chars": 11861,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom einops import rearrange\nimport torch\nimport torc"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/GPT4TS.py",
    "chars": 14627,
    "preview": "import torch.nn.functional as F\n\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\nfrom einops import rearran"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/TimesNet.py",
    "chars": 14057,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nimport math\n\n\nclass Inception_Block_"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/__init__.py",
    "chars": 31,
    "preview": "from .encoder import TSEncoder\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/dilated_conv.py",
    "chars": 1921,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass SamePadConv(nn.Module):\n    "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/donut_model.py",
    "chars": 5366,
    "preview": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass VariationalNet(nn.Module):\n    '''\n    En"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/encoder.py",
    "chars": 2479,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom .dilated_conv import DilatedCo"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/losses.py",
    "chars": 1874,
    "preview": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef hierarchical_contrastive_loss(z1, z2, alpha=0.5, "
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/models/lstm_vae_model.py",
    "chars": 5933,
    "preview": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass LSTM_Encoder(nn.Module):\n    '''\n    Enco"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/new_dataset_read_test.py",
    "chars": 615,
    "preview": "from datasets.data_loader import get_loader_segment\n\n\n\n\nindex = 143\ndatapath = './datasets/'\n\ndataset_name = 'MSL' ##  S"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/at_zeta0.sh",
    "chars": 389,
    "preview": "python train_lstm_vae_multi.py --dataset PSM --save_csv_name train_lstm_vae_multi_0717.csv --gpu 0;\npython train_donut_m"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/at_zeta1.sh",
    "chars": 812,
    "preview": "python train_at_multi.py --anormly_ratio 0.5 --dataset SMD --save_csv_name train_at_multi_0719.csv --cuda cuda:0;\npython"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/generator_sh.py",
    "chars": 5339,
    "preview": "\n\nuni_datasets = ['kpi', 'yahoo']\nmulti_datasets = ['SMD', 'MSL', 'SMAP', 'PSM', 'SWAT', 'NIPS_TS_Swan', 'NIPS_TS_Water'"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/kpi.sh",
    "chars": 213,
    "preview": "python train_at_uni.py --dataset kpi --batch_size 8 --save_csv_name train_at_uni_0720_.csv --cuda cuda:0;\npython train_a"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/multi_at.sh",
    "chars": 3768,
    "preview": "python train_lstm_vae_multi.py --dataset SMD --save_csv_name train_lstm_vae_multi_0717.csv --gpu 1;\npython train_donut_m"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at.sh",
    "chars": 752,
    "preview": "python train_dcdetector.py --index 38;\npython train_dcdetector.py --index 54;\npython train_dcdetector.py --index 71;\npyt"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_delta_0.sh",
    "chars": 55214,
    "preview": "python train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 35 --save_csv_name train_timesnet_ucr_0717.csv --gpu 0;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_delta_1.sh",
    "chars": 29142,
    "preview": "python train_gpt4ts.py --anomaly_ratio 0.5 --data UCR --index 1 --save_csv_name train_gpt4ts_ucr_0717.csv --gpu 1;\npytho"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_delta_1_2.sh",
    "chars": 5639,
    "preview": "python train_timesnet.py --anomaly_ratio 0.5 --data UCR --index 35 --save_csv_name train_timesnet_ucr_0719.csv --gpu 1;\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/ucr_at_zeta0.sh",
    "chars": 4810,
    "preview": "python train_at_multi.py --anormly_ratio 0.5 --dataset UCR --index 214 --save_csv_name train_at_multi_ucr_0719.csv --cud"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/uni_at.sh",
    "chars": 696,
    "preview": "python train_gpt4ts_uni.py --anomaly_ratio 1 --data kpi --save_csv_name train_gpt4ts_uni_hm_0720.csv --gpu 1;\npython tra"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/scripts/yahoo.sh",
    "chars": 243,
    "preview": "python train_dcdetector_nui.py --anormly_ratio 1 --dataset kpi --save_csv_name train_dcdetector_nui_hm_0720.csv --gpu 0;"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/spot.py",
    "chars": 30650,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 12 10:08:16 2016\n\n@author: Alban Siffer \n@company:"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/tasks/__init__.py",
    "chars": 88,
    "preview": "from .anomaly_detection import eval_anomaly_detection, eval_anomaly_detection_coldstart\n"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/tasks/anomaly_detection.py",
    "chars": 18955,
    "preview": "import numpy as np\nimport time\nimport bottleneck as bn\nfrom sklearn.metrics import f1_score, precision_score, recall_sco"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train.py",
    "chars": 5135,
    "preview": "import torch\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport time\nimport datetime\nfrom ts2vec import TS2V"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/trainATbatch.py",
    "chars": 12218,
    "preview": "import logging\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset, SequentialSample"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_at_multi.py",
    "chars": 4427,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_at_uni.py",
    "chars": 6759,
    "preview": "import os\nimport sys\n\nimport numpy as np\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split("
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dcdetector.py",
    "chars": 6769,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dcdetector_nui.py",
    "chars": 8663,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_donut.py",
    "chars": 7534,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_donut_multi.py",
    "chars": 7891,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dspot.py",
    "chars": 9837,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_dspot_multi.py",
    "chars": 10664,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_gpt4ts.py",
    "chars": 11471,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_gpt4ts_uni.py",
    "chars": 14498,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_lstm_vae.py",
    "chars": 8456,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_lstm_vae_multi.py",
    "chars": 8877,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_spot.py",
    "chars": 10151,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_spot_multi.py",
    "chars": 11739,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_timesnet.py",
    "chars": 10870,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_timesnet_uni.py",
    "chars": 13068,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_ts2vec.py",
    "chars": 8137,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/train_ts2vec_multi.py",
    "chars": 8600,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/ts2vec.py",
    "chars": 14857,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nf"
  },
  {
    "path": "ts_anomaly_detection_methods/other_anomaly_baselines/utils.py",
    "chars": 4936,
    "preview": "import os\nimport numpy as np\nimport pickle\nimport torch\nimport random\nfrom datetime import datetime\n\ndef pkl_save(name, "
  },
  {
    "path": "ts_classification_methods/.gitignore",
    "chars": 690,
    "preview": "*.log\n\ndilated_result\nfcn_result\nfcn_result_v2\nresult_v2\nrnn_result\n__pychache__\ndata/__pychache__\nlogs_v2\nlogs_v3\nlogs\n"
  },
  {
    "path": "ts_classification_methods/README.md",
    "chars": 6060,
    "preview": "# A Survey on Time-Series Pre-Trained Models\n\nThis is the training code for our paper *\"A Survey on Time-Series Pre-Trai"
  },
  {
    "path": "ts_classification_methods/data/__init__.py",
    "chars": 28,
    "preview": "from .preprocessing import *"
  },
  {
    "path": "ts_classification_methods/data/dataloader.py",
    "chars": 1145,
    "preview": "import torch\nimport torch.utils.data as data\n\n\n# Dataset 仅用来加载5 fold中的一个fold\nclass UCRDataset(data.Dataset):\n    def __i"
  },
  {
    "path": "ts_classification_methods/data/preprocessing.py",
    "chars": 4526,
    "preview": "import os\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.io.arff import loadarff\nfrom sklearn.model_selection import"
  },
  {
    "path": "ts_classification_methods/environment.yaml",
    "chars": 372,
    "preview": "name: from_transfer_to_transformer\nchannels:\n  - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/\n  - https://mi"
  },
  {
    "path": "ts_classification_methods/gpt4ts/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/gpt4ts/gpt4ts_utils.py",
    "chars": 6151,
    "preview": "import os\nimport torch.utils.data as data\nimport numpy as np\nimport pandas as pd\nfrom scipy.io.arff import loadarff\nfrom"
  },
  {
    "path": "ts_classification_methods/gpt4ts/main_gpt4ts.py",
    "chars": 9944,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/gpt4ts/main_gpt4ts_ucr.py",
    "chars": 10037,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/embed.py",
    "chars": 6805,
    "preview": "import torch\nimport torch.nn as nn\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, m"
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/gpt4ts.py",
    "chars": 2877,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom transformers.models.gpt2.modeling_gpt2 import G"
  },
  {
    "path": "ts_classification_methods/gpt4ts/models/loss.py",
    "chars": 2465,
    "preview": "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\ndef get_loss_module(config):\n\n    task = confi"
  },
  {
    "path": "ts_classification_methods/gpt4ts/scripts/generator_gpt4ts.py",
    "chars": 3518,
    "preview": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Crick"
  },
  {
    "path": "ts_classification_methods/model/__init__.py",
    "chars": 25,
    "preview": "from .tsm_model import *\n"
  },
  {
    "path": "ts_classification_methods/model/loss.py",
    "chars": 163,
    "preview": "import torch.nn as nn\n\n\ndef cross_entropy():\n    loss = nn.CrossEntropyLoss()\n    return loss\n\n\ndef reconstruction_loss("
  },
  {
    "path": "ts_classification_methods/model/tsm_model.py",
    "chars": 10564,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.utils as utils\n\n\n# (B, C, T) -> (B, C, T-s)\nclass Chomp1d(nn.Module):"
  },
  {
    "path": "ts_classification_methods/patchtst/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/patchtst/main_patchtst_iota.py",
    "chars": 12650,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/patchtst/main_patchtst_ucr.py",
    "chars": 13412,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/patchtst/mian_patchtst.py",
    "chars": 12736,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/patchtst/models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/patchtst/models/attention.py",
    "chars": 6105,
    "preview": "import torch\nfrom torch import nn\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom typing import Optional\n\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/basics.py",
    "chars": 1738,
    "preview": "__all__ = ['Transpose', 'LinBnDrop', 'SigmoidRange', 'sigmoid_range', 'get_activation_fn']\n\nimport torch\nfrom torch impo"
  },
  {
    "path": "ts_classification_methods/patchtst/models/heads.py",
    "chars": 3757,
    "preview": "import torch\nfrom torch import nn\n\n\nclass LinearRegressionHead(nn.Module):\n    def __init__(self, n_vars, d_model, outpu"
  },
  {
    "path": "ts_classification_methods/patchtst/models/patchTST.py",
    "chars": 13404,
    "preview": "__all__ = ['PatchTST']\n\nfrom patchtst.models.pos_encoding import *\nfrom patchtst.models.basics import *\nfrom patchtst.mo"
  },
  {
    "path": "ts_classification_methods/patchtst/models/pos_encoding.py",
    "chars": 1668,
    "preview": "__all__ = ['PositionalEncoding', 'SinCosPosEncoding', 'positional_encoding']\n\n# Cell\n\nimport torch\nfrom torch import nn\n"
  },
  {
    "path": "ts_classification_methods/patchtst/models/revin.py",
    "chars": 1693,
    "preview": "import torch\nfrom torch import nn\n\nclass RevIN(nn.Module):\n    def __init__(self, num_features: int, eps=1e-5, affine=Tr"
  },
  {
    "path": "ts_classification_methods/patchtst/patch_mask.py",
    "chars": 11529,
    "preview": "\nfrom torch import nn\nimport torch\n\n\nDTYPE = torch.float32\n\n\nclass GetAttr:\n    \"Inherit from this to have all attr acce"
  },
  {
    "path": "ts_classification_methods/patchtst/scripts/generator_patchtst.py",
    "chars": 3926,
    "preview": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Crick"
  },
  {
    "path": "ts_classification_methods/scripts/dilated_single_norm.sh",
    "chars": 149096,
    "preview": "python train.py --backbone dilated --classifier linear --classifier_input 320 --dataroot /SSD/lz/UCRArchive_2018 --norma"
  },
  {
    "path": "ts_classification_methods/scripts/fcn_lin_set_norm.sh",
    "chars": 34330,
    "preview": "python train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize"
  },
  {
    "path": "ts_classification_methods/scripts/fcn_lin_single_norm.sh",
    "chars": 34330,
    "preview": "python train.py --backbone fcn --classifier linear --classifier_input 128 --dataroot /SSD/lz/UCRArchive_2018 --normalize"
  },
  {
    "path": "ts_classification_methods/scripts/generator_dilated.py",
    "chars": 4475,
    "preview": "ucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n "
  },
  {
    "path": "ts_classification_methods/scripts/generator_fcn.py",
    "chars": 5884,
    "preview": "ucr_dataset = ['ACSF1', 'Adiac', 'AllGestureWiimoteX', 'AllGestureWiimoteY', 'AllGestureWiimoteZ', 'ArrowHead', 'BME',\n "
  },
  {
    "path": "ts_classification_methods/scripts/generator_pretrain_cls.py",
    "chars": 8105,
    "preview": "source_datasets = ['Crop', 'ElectricDevices', 'StarLightCurves', 'Wafer', 'ECG5000', 'TwoPatterns', 'FordA',\n           "
  },
  {
    "path": "ts_classification_methods/scripts/transfer_pretrain_finetune.sh",
    "chars": 893151,
    "preview": "python train.py --backbone fcn --task classification --dataroot /SSD/lz/UCRArchive_2018 --normalize_way single --dataroo"
  },
  {
    "path": "ts_classification_methods/selftime_cls/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/CricketX_config.json",
    "chars": 49,
    "preview": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"3C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/DodgerLoopDay_config.json",
    "chars": 50,
    "preview": "{\n    \"piece_size\": 0.35,\n    \"class_type\": \"5C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/InsectWingbeatSound_config.json",
    "chars": 49,
    "preview": "{\n    \"piece_size\": 0.4,\n    \"class_type\": \"6C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/MFPT_config.json",
    "chars": 49,
    "preview": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"4C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/UWaveGestureLibraryAll_config.json",
    "chars": 49,
    "preview": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"4C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/config/XJTU_config.json",
    "chars": 49,
    "preview": "{\n    \"piece_size\": 0.2,\n    \"class_type\": \"4C\"\n}"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataloader/TSC_data_loader.py",
    "chars": 978,
    "preview": "\nfrom sklearn import preprocessing\nimport numpy as np\n\n\ndef set_nan_to_zero(a):\n    where_are_NaNs = np.isnan(a)\n    a[w"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataloader/__init__.py",
    "chars": 24,
    "preview": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataloader/ucr2018.py",
    "chars": 8307,
    "preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch.utils.data as data\n'''\nfrom TSC_data_loader import TSC_data_loa"
  },
  {
    "path": "ts_classification_methods/selftime_cls/dataprepare.py",
    "chars": 2924,
    "preview": "import pandas as pd\nfrom sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\nfrom sklearn import prep"
  },
  {
    "path": "ts_classification_methods/selftime_cls/evaluation/__init__.py",
    "chars": 24,
    "preview": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/evaluation/eval_ssl.py",
    "chars": 4682,
    "preview": "# -*- coding: utf-8 -*-\n\nimport torch\nimport utils.transforms as transforms\nfrom dataloader.ucr2018 import UCR2018\nimpor"
  },
  {
    "path": "ts_classification_methods/selftime_cls/model/__init__.py",
    "chars": 24,
    "preview": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/model/model_RelationalReasoning.py",
    "chars": 12733,
    "preview": "# -*- coding: utf-8 -*-\n\nimport torch\nfrom optim.pytorchtools import EarlyStopping\nimport torch.nn as nn\n\n\nclass Relatio"
  },
  {
    "path": "ts_classification_methods/selftime_cls/model/model_backbone.py",
    "chars": 2255,
    "preview": "# -*- coding: utf-8 -*-\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SimConv4"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/__init__.py",
    "chars": 24,
    "preview": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/pretrain.py",
    "chars": 9018,
    "preview": "# -*- coding: utf-8 -*-\n\nimport torch\nimport utils.transforms as transforms\nfrom dataloader.ucr2018 import *\nimport torc"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/pytorchtools.py",
    "chars": 1971,
    "preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch\n\nclass EarlyStopping:\n    \"\"\"Early stops the training if valida"
  },
  {
    "path": "ts_classification_methods/selftime_cls/optim/train.py",
    "chars": 5968,
    "preview": "# -*- coding: utf-8 -*-\n\n\nimport torch\nimport utils.transforms as transforms\nfrom dataloader.ucr2018 import UCR2018\nimpo"
  },
  {
    "path": "ts_classification_methods/selftime_cls/scripts/ucr.sh",
    "chars": 11674,
    "preview": "python -u train_ssl.py --dataset_name Herring --model_name SelfTime --random_seed 42\npython -u train_ssl.py --dataset_na"
  },
  {
    "path": "ts_classification_methods/selftime_cls/train_ssl.py",
    "chars": 6318,
    "preview": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom evaluation.eval_ssl import evaluation\nfrom utils.utils import get_confi"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/__init__.py",
    "chars": 24,
    "preview": "# -*- coding: utf-8 -*-\n"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/augmentation.py",
    "chars": 23789,
    "preview": "import numpy as np\nfrom tqdm import tqdm\nimport utils.helper as hlp\n\n\n\ndef slidewindow(ts, horizon=.2, stride=0.2):\n    "
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/datasets.py",
    "chars": 451,
    "preview": "def nb_dims(dataset):\n    if dataset in [\"unipen1a\", \"unipen1b\", \"unipen1c\"]:\n        return 2\n    return 1\n\ndef nb_clas"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/helper.py",
    "chars": 938,
    "preview": "import numpy as np\n\ndef plot2d(x, y, x2=None, y2=None, x3=None, y3=None, xlim=(-1, 1), ylim=(-1, 1), save_file=\"\"):\n    "
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/transforms.py",
    "chars": 5192,
    "preview": "import random\nimport torch\nfrom utils.augmentation import *\n\n\nclass Raw:\n    def __init__(self):\n        pass\n\n    def _"
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/utils.py",
    "chars": 362,
    "preview": "# -*- coding: utf-8 -*-\n\nimport json\n\ndef get_config_from_json(json_file):\n    \"\"\"\n    Get the config from a json file\n "
  },
  {
    "path": "ts_classification_methods/selftime_cls/utils/utils_plot.py",
    "chars": 857,
    "preview": "# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\ndef show_samples(X_train, y_train"
  },
  {
    "path": "ts_classification_methods/test/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/test/train_uea_test.py",
    "chars": 11979,
    "preview": "import argparse\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom data.dataload"
  },
  {
    "path": "ts_classification_methods/test/uea_test.py",
    "chars": 1713,
    "preview": "import numpy as np\nimport torch\n\nfrom data.preprocessing import fill_nan_value, normalize_uea_set\nfrom data.preprocessin"
  },
  {
    "path": "ts_classification_methods/timesnet/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/timesnet/main_timesnet.py",
    "chars": 17445,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/timesnet/main_timesnet_ucr.py",
    "chars": 17481,
    "preview": "import os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Conv_Blocks.py",
    "chars": 2367,
    "preview": "import torch\nimport torch.nn as nn\n\n\nclass Inception_Block_V1(nn.Module):\n    def __init__(self, in_channels, out_channe"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Embed.py",
    "chars": 6942,
    "preview": "import torch\nimport torch.nn as nn\nimport math\n\n\nclass PositionalEmbedding(nn.Module):\n    def __init__(self, d_model, m"
  },
  {
    "path": "ts_classification_methods/timesnet/models/SelfAttention_Family.py",
    "chars": 12829,
    "preview": "import torch\nimport torch.nn as nn\nimport numpy as np\nfrom math import sqrt\nfrom einops import rearrange, repeat\n\n\nclass"
  },
  {
    "path": "ts_classification_methods/timesnet/models/TimesNet.py",
    "chars": 9337,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nfrom timesnet.models.Embed import Da"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Transformer.py",
    "chars": 6050,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.fft\nfrom timesnet.models.Embed import Da"
  },
  {
    "path": "ts_classification_methods/timesnet/models/Transformer_EncDec.py",
    "chars": 4929,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvLayer(nn.Module):\n    def __init__(self, "
  },
  {
    "path": "ts_classification_methods/timesnet/models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/timesnet/scripts/generator_timesnet.py",
    "chars": 3874,
    "preview": "uea_all = ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',\n           'Crick"
  },
  {
    "path": "ts_classification_methods/tloss_cls/default_hyperparameters.json",
    "chars": 328,
    "preview": "{\n    \"batch_size\": 10,\n    \"channels\": 40,\n    \"compared_length\": null,\n    \"depth\": 10,\n    \"nb_steps\": 600,\n    \"in_c"
  },
  {
    "path": "ts_classification_methods/tloss_cls/losses/__init__.py",
    "chars": 1022,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/losses/triplet_loss.py",
    "chars": 14842,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/networks/__init__.py",
    "chars": 1022,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/networks/causal_cnn.py",
    "chars": 8314,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/networks/lstm.py",
    "chars": 1537,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/scikit_wrappers.py",
    "chars": 30166,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/scripts/ucr.sh",
    "chars": 17946,
    "preview": "python ucr.py --dataset AllGestureWiimoteY --path /dev_data/zzj/hzy/datasets/UCR --hyper ./default_hyperparameters.json "
  },
  {
    "path": "ts_classification_methods/tloss_cls/scripts/uea.sh",
    "chars": 4202,
    "preview": "python uea.py --dataset CharacterTrajectories --path /dev_data/zzj/hzy/datasets/UEA --hyper ./default_hyperparameters.js"
  },
  {
    "path": "ts_classification_methods/tloss_cls/transfer_ucr.py",
    "chars": 3140,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/ucr.py",
    "chars": 10011,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/uea.py",
    "chars": 6664,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/tloss_cls/utils.py",
    "chars": 1744,
    "preview": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE f"
  },
  {
    "path": "ts_classification_methods/train.py",
    "chars": 24309,
    "preview": "import argparse\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom dat"
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "ts_classification_methods/ts2vec_cls/datautils.py",
    "chars": 7056,
    "preview": "import os\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom datetime import datetime\nimport pickle\nf"
  }
]

// ... and 286 more files (download for full content)

About this extraction

This page contains the full source code of the qianlima-lab/time-series-ptms GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 486 files (3.7 MB), approximately 982.3k tokens, and a symbol index with 2458 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!