Full Code of augmentedstartups/AS-One for AI

main 1a6eb0952fdc cached
501 files
3.3 MB
882.7k tokens
4580 symbols
1 requests
Download .txt
Showing preview only (3,525K chars total). Download the full file or copy to clipboard to get everything.
Repository: augmentedstartups/AS-One
Branch: main
Commit: 1a6eb0952fdc
Files: 501
Total size: 3.3 MB

Directory structure:
gitextract_lrbvjsnv/

├── .dockerignore
├── .gitignore
├── Dockerfile
├── LICENCE
├── README.md
├── asone/
│   ├── __init__.py
│   ├── asone.py
│   ├── demo_detector.py
│   ├── demo_ocr.py
│   ├── demo_pose_estimator.py
│   ├── demo_segmentor.py
│   ├── demo_tracker.py
│   ├── detectors/
│   │   ├── __init__.py
│   │   ├── detector.py
│   │   ├── easyocr_detector/
│   │   │   ├── __init__.py
│   │   │   └── text_detector.py
│   │   ├── utils/
│   │   │   ├── __init__.py
│   │   │   ├── cfg_path.py
│   │   │   ├── coreml_utils.py
│   │   │   ├── exp_name.py
│   │   │   └── weights_path.py
│   │   ├── yolonas/
│   │   │   ├── __init__.py
│   │   │   └── yolonas.py
│   │   ├── yolor/
│   │   │   ├── __init__.py
│   │   │   ├── cfg/
│   │   │   │   ├── yolor_csp.cfg
│   │   │   │   ├── yolor_csp_x.cfg
│   │   │   │   └── yolor_p6.cfg
│   │   │   ├── models/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── common.py
│   │   │   │   ├── export.py
│   │   │   │   └── models.py
│   │   │   ├── utils/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── activations.py
│   │   │   │   ├── autoanchor.py
│   │   │   │   ├── datasets.py
│   │   │   │   ├── export.py
│   │   │   │   ├── general.py
│   │   │   │   ├── google_utils.py
│   │   │   │   ├── layers.py
│   │   │   │   ├── loss.py
│   │   │   │   ├── metrics.py
│   │   │   │   ├── parse_config.py
│   │   │   │   ├── plots.py
│   │   │   │   ├── torch_utils.py
│   │   │   │   └── yolor_utils.py
│   │   │   └── yolor_detector.py
│   │   ├── yolov5/
│   │   │   ├── __init__.py
│   │   │   ├── yolov5/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── export.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   ├── experimental.py
│   │   │   │   │   ├── general.py
│   │   │   │   │   ├── tf.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── activations.py
│   │   │   │       ├── augmentations.py
│   │   │   │       ├── dataloaders.py
│   │   │   │       ├── downloads.py.py
│   │   │   │       ├── general.py
│   │   │   │       ├── metrics.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       └── yolov5_utils.py
│   │   │   └── yolov5_detector.py
│   │   ├── yolov6/
│   │   │   ├── __init__.py
│   │   │   ├── yolov6/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── assigners/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── anchor_generator.py
│   │   │   │   │   ├── assigner_utils.py
│   │   │   │   │   ├── atss_assigner.py
│   │   │   │   │   ├── iou2d_calculator.py
│   │   │   │   │   └── tal_assigner.py
│   │   │   │   ├── layers/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   └── dbb_transforms.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── efficientrep.py
│   │   │   │   │   ├── effidehead.py
│   │   │   │   │   ├── end2end.py
│   │   │   │   │   ├── loss.py
│   │   │   │   │   ├── loss_distill.py
│   │   │   │   │   ├── reppan.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── checkpoint.py
│   │   │   │       ├── events.py
│   │   │   │       ├── figure_iou.py
│   │   │   │       ├── general.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       └── yolov6_utils.py
│   │   │   └── yolov6_detector.py
│   │   ├── yolov7/
│   │   │   ├── __init__.py
│   │   │   ├── yolov7/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   ├── experimental.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       └── yolov7_utils.py
│   │   │   └── yolov7_detector.py
│   │   ├── yolov8/
│   │   │   ├── __init__.py
│   │   │   ├── utils/
│   │   │   │   ├── __init__.py
│   │   │   │   └── yolov8_utils.py
│   │   │   └── yolov8_detector.py
│   │   ├── yolov9/
│   │   │   ├── __init__.py
│   │   │   ├── export.py
│   │   │   ├── yolov9/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   ├── experimental.py
│   │   │   │   │   ├── tf.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── activations.py
│   │   │   │       ├── augmentations.py
│   │   │   │       ├── autoanchor.py
│   │   │   │       ├── autobatch.py
│   │   │   │       ├── callbacks.py
│   │   │   │       ├── coco_utils.py
│   │   │   │       ├── dataloaders.py
│   │   │   │       ├── downloads.py
│   │   │   │       ├── general.py
│   │   │   │       ├── lion.py
│   │   │   │       ├── loss.py
│   │   │   │       ├── loss_tal.py
│   │   │   │       ├── loss_tal_dual.py
│   │   │   │       ├── loss_tal_triple.py
│   │   │   │       ├── metrics.py
│   │   │   │       ├── plots.py
│   │   │   │       ├── segment/
│   │   │   │       │   ├── __init__.py
│   │   │   │       │   ├── augmentations.py
│   │   │   │       │   ├── dataloaders.py
│   │   │   │       │   ├── general.py
│   │   │   │       │   ├── loss.py
│   │   │   │       │   ├── loss_tal.py
│   │   │   │       │   ├── loss_tal_dual.py
│   │   │   │       │   ├── metrics.py
│   │   │   │       │   ├── plots.py
│   │   │   │       │   └── tal/
│   │   │   │       │       ├── __init__.py
│   │   │   │       │       ├── anchor_generator.py
│   │   │   │       │       └── assigner.py
│   │   │   │       ├── tal/
│   │   │   │       │   ├── __init__.py
│   │   │   │       │   ├── anchor_generator.py
│   │   │   │       │   └── assigner.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       ├── triton.py
│   │   │   │       └── yolov9_utils.py
│   │   │   └── yolov9_detector.py
│   │   └── yolox/
│   │       ├── __init__.py
│   │       ├── exps/
│   │       │   ├── __init__.py
│   │       │   ├── yolov3.py
│   │       │   ├── yolox_l.py
│   │       │   ├── yolox_m.py
│   │       │   ├── yolox_nano.py
│   │       │   ├── yolox_s.py
│   │       │   ├── yolox_tiny.py
│   │       │   └── yolox_x.py
│   │       ├── yolox/
│   │       │   ├── __init__.py
│   │       │   ├── core/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── launch.py
│   │       │   │   └── trainer.py
│   │       │   ├── data/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── data_augment.py
│   │       │   │   ├── data_prefetcher.py
│   │       │   │   ├── dataloading.py
│   │       │   │   ├── datasets/
│   │       │   │   │   ├── __init__.py
│   │       │   │   │   ├── coco.py
│   │       │   │   │   ├── coco_classes.py
│   │       │   │   │   ├── datasets_wrapper.py
│   │       │   │   │   ├── mosaicdetection.py
│   │       │   │   │   ├── voc.py
│   │       │   │   │   └── voc_classes.py
│   │       │   │   └── samplers.py
│   │       │   ├── evaluators/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── coco_evaluator.py
│   │       │   │   ├── voc_eval.py
│   │       │   │   └── voc_evaluator.py
│   │       │   ├── exp/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── base_exp.py
│   │       │   │   ├── build.py
│   │       │   │   ├── default/
│   │       │   │   │   └── __init__.py
│   │       │   │   └── yolox_base.py
│   │       │   ├── models/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── build.py
│   │       │   │   ├── darknet.py
│   │       │   │   ├── losses.py
│   │       │   │   ├── network_blocks.py
│   │       │   │   ├── yolo_fpn.py
│   │       │   │   ├── yolo_head.py
│   │       │   │   ├── yolo_pafpn.py
│   │       │   │   └── yolox.py
│   │       │   └── utils/
│   │       │       ├── __init__.py
│   │       │       ├── allreduce_norm.py
│   │       │       ├── boxes.py
│   │       │       ├── checkpoint.py
│   │       │       ├── compat.py
│   │       │       ├── demo_utils.py
│   │       │       ├── dist.py
│   │       │       ├── ema.py
│   │       │       ├── logger.py
│   │       │       ├── lr_scheduler.py
│   │       │       ├── metric.py
│   │       │       ├── model_utils.py
│   │       │       ├── setup_env.py
│   │       │       └── visualize.py
│   │       ├── yolox_detector.py
│   │       └── yolox_utils.py
│   ├── linux/
│   │   ├── Instructions/
│   │   │   ├── Benchmarking.md
│   │   │   ├── Demo-Detectron2.md
│   │   │   ├── Docker-Setup.md
│   │   │   ├── Driver-Installations.md
│   │   │   ├── Manual-Build.md
│   │   │   └── Manual-Installation.md
│   │   ├── README.md
│   │   ├── docker-installation.sh
│   │   └── main.py
│   ├── pose_estimator.py
│   ├── pose_estimators/
│   │   ├── __init__.py
│   │   ├── yolov7_pose/
│   │   │   ├── __init__.py
│   │   │   ├── main.py
│   │   │   ├── models/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── common.py
│   │   │   │   ├── experimental.py
│   │   │   │   └── yolo.py
│   │   │   ├── requirements.txt
│   │   │   ├── utils/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── activations.py
│   │   │   │   ├── add_nms.py
│   │   │   │   ├── autoanchor.py
│   │   │   │   ├── datasets.py
│   │   │   │   ├── general.py
│   │   │   │   ├── google_utils.py
│   │   │   │   ├── loss.py
│   │   │   │   ├── metrics.py
│   │   │   │   ├── plots.py
│   │   │   │   ├── torch_utils.py
│   │   │   │   ├── wandb_logging/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── log_dataset.py
│   │   │   │   │   └── wandb_utils.py
│   │   │   │   └── yolov7_pose_utils.py
│   │   │   └── yolov7.py
│   │   └── yolov8_pose/
│   │       ├── __init__.py
│   │       ├── plots.py
│   │       └── yolov8.py
│   ├── recognizers/
│   │   ├── __init__.py
│   │   ├── easyocr_recognizer/
│   │   │   ├── __init__.py
│   │   │   └── easyocr_recognizer.py
│   │   ├── recognizer.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       └── recognizer_name.py
│   ├── schemas/
│   │   └── output_schemas.py
│   ├── segmentors/
│   │   ├── __init__.py
│   │   ├── segment_anything/
│   │   │   ├── __init__.py
│   │   │   └── sam.py
│   │   ├── segmentor.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       └── weights_path.py
│   ├── trackers/
│   │   ├── __init__.py
│   │   ├── byte_track/
│   │   │   ├── __init__.py
│   │   │   ├── bytetracker.py
│   │   │   └── tracker/
│   │   │       ├── __init__.py
│   │   │       ├── basetrack.py
│   │   │       ├── byte_tracker.py
│   │   │       ├── kalman_filter.py
│   │   │       └── matching.py
│   │   ├── deep_sort/
│   │   │   ├── __init__.py
│   │   │   ├── deepsort.py
│   │   │   └── tracker/
│   │   │       ├── .gitignore
│   │   │       ├── README.md
│   │   │       ├── __init__.py
│   │   │       ├── deep/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── evaluate.py
│   │   │       │   ├── feature_extractor.py
│   │   │       │   ├── model.py
│   │   │       │   ├── original_model.py
│   │   │       │   ├── test.py
│   │   │       │   └── train.py
│   │   │       ├── deep_sort.py
│   │   │       ├── parser.py
│   │   │       └── sort/
│   │   │           ├── __init__.py
│   │   │           ├── detection.py
│   │   │           ├── iou_matching.py
│   │   │           ├── kalman_filter.py
│   │   │           ├── linear_assignment.py
│   │   │           ├── nn_matching.py
│   │   │           ├── preprocessing.py
│   │   │           ├── track.py
│   │   │           └── tracker.py
│   │   ├── motpy/
│   │   │   ├── __init__.py
│   │   │   └── motpy.py
│   │   ├── nor_fair/
│   │   │   ├── __init__.py
│   │   │   └── norfair.py
│   │   ├── oc_sort/
│   │   │   ├── __init__.py
│   │   │   ├── ocsort.py
│   │   │   └── tracker/
│   │   │       ├── __init__.py
│   │   │       ├── association.py
│   │   │       ├── kalmanfilter.py
│   │   │       └── ocsort.py
│   │   ├── strong_sort/
│   │   │   ├── __init__.py
│   │   │   ├── strongsort.py
│   │   │   └── tracker/
│   │   │       ├── __init__.py
│   │   │       ├── configs/
│   │   │       │   └── strong_sort.yaml
│   │   │       ├── deep/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── checkpoint/
│   │   │       │   │   └── .gitkeep
│   │   │       │   ├── reid/
│   │   │       │   │   ├── .flake8
│   │   │       │   │   ├── .gitignore
│   │   │       │   │   ├── .isort.cfg
│   │   │       │   │   ├── .style.yapf
│   │   │       │   │   ├── LICENSE
│   │   │       │   │   ├── README.rst
│   │   │       │   │   ├── configs/
│   │   │       │   │   │   ├── im_osnet_ain_x1_0_softmax_256x128_amsgrad_cosine.yaml
│   │   │       │   │   │   ├── im_osnet_ibn_x1_0_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x0_25_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x0_5_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x0_75_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x1_0_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x1_0_softmax_256x128_amsgrad_cosine.yaml
│   │   │       │   │   │   ├── im_r50_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   └── im_r50fc512_softmax_256x128_amsgrad.yaml
│   │   │       │   │   ├── docs/
│   │   │       │   │   │   ├── AWESOME_REID.md
│   │   │       │   │   │   ├── MODEL_ZOO.md
│   │   │       │   │   │   ├── Makefile
│   │   │       │   │   │   ├── conf.py
│   │   │       │   │   │   ├── datasets.rst
│   │   │       │   │   │   ├── evaluation.rst
│   │   │       │   │   │   ├── index.rst
│   │   │       │   │   │   ├── pkg/
│   │   │       │   │   │   │   ├── data.rst
│   │   │       │   │   │   │   ├── engine.rst
│   │   │       │   │   │   │   ├── losses.rst
│   │   │       │   │   │   │   ├── metrics.rst
│   │   │       │   │   │   │   ├── models.rst
│   │   │       │   │   │   │   ├── optim.rst
│   │   │       │   │   │   │   └── utils.rst
│   │   │       │   │   │   └── user_guide.rst
│   │   │       │   │   ├── linter.sh
│   │   │       │   │   ├── projects/
│   │   │       │   │   │   ├── DML/
│   │   │       │   │   │   │   ├── README.md
│   │   │       │   │   │   │   ├── default_config.py
│   │   │       │   │   │   │   ├── dml.py
│   │   │       │   │   │   │   ├── im_osnet_x1_0_dml_256x128_amsgrad_cosine.yaml
│   │   │       │   │   │   │   └── main.py
│   │   │       │   │   │   ├── OSNet_AIN/
│   │   │       │   │   │   │   ├── README.md
│   │   │       │   │   │   │   ├── default_config.py
│   │   │       │   │   │   │   ├── main.py
│   │   │       │   │   │   │   ├── nas.yaml
│   │   │       │   │   │   │   ├── osnet_child.py
│   │   │       │   │   │   │   ├── osnet_search.py
│   │   │       │   │   │   │   └── softmax_nas.py
│   │   │       │   │   │   ├── README.md
│   │   │       │   │   │   └── attribute_recognition/
│   │   │       │   │   │       ├── README.md
│   │   │       │   │   │       ├── datasets/
│   │   │       │   │   │       │   ├── __init__.py
│   │   │       │   │   │       │   ├── dataset.py
│   │   │       │   │   │       │   └── pa100k.py
│   │   │       │   │   │       ├── default_parser.py
│   │   │       │   │   │       ├── main.py
│   │   │       │   │   │       ├── models/
│   │   │       │   │   │       │   ├── __init__.py
│   │   │       │   │   │       │   └── osnet.py
│   │   │       │   │   │       └── train.sh
│   │   │       │   │   ├── scripts/
│   │   │       │   │   │   ├── default_config.py
│   │   │       │   │   │   └── main.py
│   │   │       │   │   ├── setup.py
│   │   │       │   │   ├── tools/
│   │   │       │   │   │   ├── compute_mean_std.py
│   │   │       │   │   │   ├── parse_test_res.py
│   │   │       │   │   │   └── visualize_actmap.py
│   │   │       │   │   └── torchreid/
│   │   │       │   │       ├── __init__.py
│   │   │       │   │       ├── data/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── datamanager.py
│   │   │       │   │       │   ├── datasets/
│   │   │       │   │       │   │   ├── __init__.py
│   │   │       │   │       │   │   ├── dataset.py
│   │   │       │   │       │   │   ├── image/
│   │   │       │   │       │   │   │   ├── __init__.py
│   │   │       │   │       │   │   │   ├── cuhk01.py
│   │   │       │   │       │   │   │   ├── cuhk02.py
│   │   │       │   │       │   │   │   ├── cuhk03.py
│   │   │       │   │       │   │   │   ├── cuhksysu.py
│   │   │       │   │       │   │   │   ├── dukemtmcreid.py
│   │   │       │   │       │   │   │   ├── grid.py
│   │   │       │   │       │   │   │   ├── ilids.py
│   │   │       │   │       │   │   │   ├── market1501.py
│   │   │       │   │       │   │   │   ├── msmt17.py
│   │   │       │   │       │   │   │   ├── prid.py
│   │   │       │   │       │   │   │   ├── sensereid.py
│   │   │       │   │       │   │   │   ├── university1652.py
│   │   │       │   │       │   │   │   └── viper.py
│   │   │       │   │       │   │   └── video/
│   │   │       │   │       │   │       ├── __init__.py
│   │   │       │   │       │   │       ├── dukemtmcvidreid.py
│   │   │       │   │       │   │       ├── ilidsvid.py
│   │   │       │   │       │   │       ├── mars.py
│   │   │       │   │       │   │       └── prid2011.py
│   │   │       │   │       │   ├── sampler.py
│   │   │       │   │       │   └── transforms.py
│   │   │       │   │       ├── engine/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── engine.py
│   │   │       │   │       │   ├── image/
│   │   │       │   │       │   │   ├── __init__.py
│   │   │       │   │       │   │   ├── softmax.py
│   │   │       │   │       │   │   └── triplet.py
│   │   │       │   │       │   └── video/
│   │   │       │   │       │       ├── __init__.py
│   │   │       │   │       │       ├── softmax.py
│   │   │       │   │       │       └── triplet.py
│   │   │       │   │       ├── losses/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── cross_entropy_loss.py
│   │   │       │   │       │   └── hard_mine_triplet_loss.py
│   │   │       │   │       ├── metrics/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── accuracy.py
│   │   │       │   │       │   ├── distance.py
│   │   │       │   │       │   ├── rank.py
│   │   │       │   │       │   └── rank_cylib/
│   │   │       │   │       │       ├── Makefile
│   │   │       │   │       │       ├── __init__.py
│   │   │       │   │       │       ├── rank_cy.pyx
│   │   │       │   │       │       ├── setup.py
│   │   │       │   │       │       └── test_cython.py
│   │   │       │   │       ├── models/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── densenet.py
│   │   │       │   │       │   ├── hacnn.py
│   │   │       │   │       │   ├── inceptionresnetv2.py
│   │   │       │   │       │   ├── inceptionv4.py
│   │   │       │   │       │   ├── mlfn.py
│   │   │       │   │       │   ├── mobilenetv2.py
│   │   │       │   │       │   ├── mudeep.py
│   │   │       │   │       │   ├── nasnet.py
│   │   │       │   │       │   ├── osnet.py
│   │   │       │   │       │   ├── osnet_ain.py
│   │   │       │   │       │   ├── pcb.py
│   │   │       │   │       │   ├── resnet.py
│   │   │       │   │       │   ├── resnet_ibn_a.py
│   │   │       │   │       │   ├── resnet_ibn_b.py
│   │   │       │   │       │   ├── resnetmid.py
│   │   │       │   │       │   ├── senet.py
│   │   │       │   │       │   ├── shufflenet.py
│   │   │       │   │       │   ├── shufflenetv2.py
│   │   │       │   │       │   ├── squeezenet.py
│   │   │       │   │       │   └── xception.py
│   │   │       │   │       ├── optim/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── lr_scheduler.py
│   │   │       │   │       │   ├── optimizer.py
│   │   │       │   │       │   └── radam.py
│   │   │       │   │       └── utils/
│   │   │       │   │           ├── GPU-Re-Ranking/
│   │   │       │   │           │   ├── README.md
│   │   │       │   │           │   ├── extension/
│   │   │       │   │           │   │   ├── adjacency_matrix/
│   │   │       │   │           │   │   │   ├── build_adjacency_matrix.cpp
│   │   │       │   │           │   │   │   ├── build_adjacency_matrix_kernel.cu
│   │   │       │   │           │   │   │   └── setup.py
│   │   │       │   │           │   │   ├── make.sh
│   │   │       │   │           │   │   └── propagation/
│   │   │       │   │           │   │       ├── gnn_propagate.cpp
│   │   │       │   │           │   │       ├── gnn_propagate_kernel.cu
│   │   │       │   │           │   │       └── setup.py
│   │   │       │   │           │   ├── gnn_reranking.py
│   │   │       │   │           │   ├── main.py
│   │   │       │   │           │   └── utils.py
│   │   │       │   │           ├── __init__.py
│   │   │       │   │           ├── avgmeter.py
│   │   │       │   │           ├── feature_extractor.py
│   │   │       │   │           ├── loggers.py
│   │   │       │   │           ├── model_complexity.py
│   │   │       │   │           ├── reidtools.py
│   │   │       │   │           ├── rerank.py
│   │   │       │   │           ├── tools.py
│   │   │       │   │           └── torchtools.py
│   │   │       │   └── reid_model_factory.py
│   │   │       ├── sort/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── detection.py
│   │   │       │   ├── iou_matching.py
│   │   │       │   ├── kalman_filter.py
│   │   │       │   ├── linear_assignment.py
│   │   │       │   ├── nn_matching.py
│   │   │       │   ├── preprocessing.py
│   │   │       │   ├── track.py
│   │   │       │   └── tracker.py
│   │   │       ├── strong_sort.py
│   │   │       └── utils/
│   │   │           ├── __init__.py
│   │   │           ├── asserts.py
│   │   │           ├── draw.py
│   │   │           ├── evaluation.py
│   │   │           ├── io.py
│   │   │           ├── json_logger.py
│   │   │           ├── log.py
│   │   │           ├── parser.py
│   │   │           └── tools.py
│   │   └── tracker.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── classes.py
│   │   ├── colors.py
│   │   ├── counting.py
│   │   ├── default_cfg.py
│   │   ├── download.py
│   │   ├── draw.py
│   │   ├── ponits_conversion.py
│   │   ├── pose_estimators_weights.py
│   │   ├── temp_loader.py
│   │   ├── utils.py
│   │   ├── video_reader.py
│   │   └── weights.json
│   └── windows/
│       ├── README.md
│       ├── cam2ip-1.6-64bit-cv/
│       │   ├── AUTHORS
│       │   └── COPYING
│       ├── enable_feature.bat
│       ├── installation.bat
│       ├── test-display.py
│       └── test-webcam.py
├── docker-compose.yml
├── main.py
├── requirements.txt
└── setup.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .dockerignore
================================================
.env/
results/
**__pycache__**
*.onnx
*.pt
*.mlmodel
**byte_track_results**
**deep_sort_results**
**nor_fair_results**
test_env/

================================================
FILE: .gitignore
================================================
.env/
**__pycache__**
*.onnx
*.pt
*.pth
*.mlmodel
**byte_track_results**
**deep_sort_results**
**nor_fair_results**
build/
dist/
asone.egg-info/
test_custom_tracker.py
custom_weights.py
data/results/
temp/

================================================
FILE: Dockerfile
================================================
FROM pytorch/pytorch:latest

# Set Time Zone to prevent issues for installing some apt packages
ENV TZ=Europe/Minsk
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

# install apt packages
RUN apt-get update -y 
RUN apt-get install git gcc \
        g++ python3-opencv \
        vim -y

RUN mkdir /app
WORKDIR /app 

ADD asone asone

ADD sample_videos sample_videos
ADD main.py main.py
# ADD demo.py demo.py

ADD setup.py setup.py
ADD requirements.txt requirements.txt


RUN pip3 install Cython numpy
RUN pip3 install cython-bbox
ADD pypi_README.md pypi_README.md

RUN pip3 install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
RUN pip3 install .


WORKDIR /workspace
# Entry Point
CMD /bin/bash


================================================
FILE: LICENCE
================================================
                    GNU GENERAL PUBLIC LICENSE
                       Version 3, 29 June 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

                            Preamble

  The GNU General Public License is a free, copyleft license for
software and other kinds of works.

  The licenses for most software and other practical works are designed
to take away your freedom to share and change the works.  By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.  We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors.  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.

  To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights.  Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received.  You must make sure that they, too, receive
or can get the source code.  And you must show them these terms so they
know their rights.

  Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.

  For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software.  For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.

  Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so.  This is fundamentally incompatible with the aim of
protecting users' freedom to change the software.  The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable.  Therefore, we
have designed this version of the GPL to prohibit the practice for those
products.  If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.

  Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary.  To prevent this, the GPL assures that
patents cannot be used to render the program non-free.

  The precise terms and conditions for copying, distribution and
modification follow.

                       TERMS AND CONDITIONS

  0. Definitions.

  "This License" refers to version 3 of the GNU General Public License.

  "Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.

  "The Program" refers to any copyrightable work licensed under this
License.  Each licensee is addressed as "you".  "Licensees" and
"recipients" may be individuals or organizations.

  To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy.  The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.

  A "covered work" means either the unmodified Program or a work based
on the Program.

  To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy.  Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.

  To "convey" a work means any kind of propagation that enables other
parties to make or receive copies.  Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.

  An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License.  If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.

  1. Source Code.

  The "source code" for a work means the preferred form of the work
for making modifications to it.  "Object code" means any non-source
form of a work.

  A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.

  The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form.  A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.

  The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities.  However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work.  For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.

  The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.

  The Corresponding Source for a work in source code form is that
same work.

  2. Basic Permissions.

  All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met.  This License explicitly affirms your unlimited
permission to run the unmodified Program.  The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work.  This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.

  You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force.  You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright.  Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.

  Conveying under any other circumstances is permitted solely under
the conditions stated below.  Sublicensing is not allowed; section 10
makes it unnecessary.

  3. Protecting Users' Legal Rights From Anti-Circumvention Law.

  No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.

  When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.

  4. Conveying Verbatim Copies.

  You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.

  You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.

  5. Conveying Modified Source Versions.

  You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:

    a) The work must carry prominent notices stating that you modified
    it, and giving a relevant date.

    b) The work must carry prominent notices stating that it is
    released under this License and any conditions added under section
    7.  This requirement modifies the requirement in section 4 to
    "keep intact all notices".

    c) You must license the entire work, as a whole, under this
    License to anyone who comes into possession of a copy.  This
    License will therefore apply, along with any applicable section 7
    additional terms, to the whole of the work, and all its parts,
    regardless of how they are packaged.  This License gives no
    permission to license the work in any other way, but it does not
    invalidate such permission if you have separately received it.

    d) If the work has interactive user interfaces, each must display
    Appropriate Legal Notices; however, if the Program has interactive
    interfaces that do not display Appropriate Legal Notices, your
    work need not make them do so.

  A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit.  Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.

  6. Conveying Non-Source Forms.

  You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:

    a) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by the
    Corresponding Source fixed on a durable physical medium
    customarily used for software interchange.

    b) Convey the object code in, or embodied in, a physical product
    (including a physical distribution medium), accompanied by a
    written offer, valid for at least three years and valid for as
    long as you offer spare parts or customer support for that product
    model, to give anyone who possesses the object code either (1) a
    copy of the Corresponding Source for all the software in the
    product that is covered by this License, on a durable physical
    medium customarily used for software interchange, for a price no
    more than your reasonable cost of physically performing this
    conveying of source, or (2) access to copy the
    Corresponding Source from a network server at no charge.

    c) Convey individual copies of the object code with a copy of the
    written offer to provide the Corresponding Source.  This
    alternative is allowed only occasionally and noncommercially, and
    only if you received the object code with such an offer, in accord
    with subsection 6b.

    d) Convey the object code by offering access from a designated
    place (gratis or for a charge), and offer equivalent access to the
    Corresponding Source in the same way through the same place at no
    further charge.  You need not require recipients to copy the
    Corresponding Source along with the object code.  If the place to
    copy the object code is a network server, the Corresponding Source
    may be on a different server (operated by you or a third party)
    that supports equivalent copying facilities, provided you maintain
    clear directions next to the object code saying where to find the
    Corresponding Source.  Regardless of what server hosts the
    Corresponding Source, you remain obligated to ensure that it is
    available for as long as needed to satisfy these requirements.

    e) Convey the object code using peer-to-peer transmission, provided
    you inform other peers where the object code and Corresponding
    Source of the work are being offered to the general public at no
    charge under subsection 6d.

  A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.

  A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling.  In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage.  For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product.  A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.

  "Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source.  The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.

  If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information.  But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).

  The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed.  Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.

  Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.

  7. Additional Terms.

  "Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law.  If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.

  When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it.  (Additional permissions may be written to require their own
removal in certain cases when you modify the work.)  You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.

  Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:

    a) Disclaiming warranty or limiting liability differently from the
    terms of sections 15 and 16 of this License; or

    b) Requiring preservation of specified reasonable legal notices or
    author attributions in that material or in the Appropriate Legal
    Notices displayed by works containing it; or

    c) Prohibiting misrepresentation of the origin of that material, or
    requiring that modified versions of such material be marked in
    reasonable ways as different from the original version; or

    d) Limiting the use for publicity purposes of names of licensors or
    authors of the material; or

    e) Declining to grant rights under trademark law for use of some
    trade names, trademarks, or service marks; or

    f) Requiring indemnification of licensors and authors of that
    material by anyone who conveys the material (or modified versions of
    it) with contractual assumptions of liability to the recipient, for
    any liability that these contractual assumptions directly impose on
    those licensors and authors.

  All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10.  If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term.  If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.

  If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.

  Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.

  8. Termination.

  You may not propagate or modify a covered work except as expressly
provided under this License.  Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).

  However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.

  Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.

  Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License.  If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.

  9. Acceptance Not Required for Having Copies.

  You are not required to accept this License in order to receive or
run a copy of the Program.  Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance.  However,
nothing other than this License grants you permission to propagate or
modify any covered work.  These actions infringe copyright if you do
not accept this License.  Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.

  10. Automatic Licensing of Downstream Recipients.

  Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License.  You are not responsible
for enforcing compliance by third parties with this License.

  An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations.  If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.

  You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License.  For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.

  11. Patents.

  A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based.  The
work thus licensed is called the contributor's "contributor version".

  A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version.  For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.

  Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.

  In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement).  To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.

  If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients.  "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.

  If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.

  A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License.  You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.

  Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.

  12. No Surrender of Others' Freedom.

  If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all.  For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.

  13. Use with the GNU Affero General Public License.

  Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work.  The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.

  14. Revised Versions of this License.

  The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

  Each version is given a distinguishing version number.  If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation.  If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.

  If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.

  Later license versions may give you additional or different
permissions.  However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.

  15. Disclaimer of Warranty.

  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

  16. Limitation of Liability.

  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.

  17. Interpretation of Sections 15 and 16.

  If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.

                     END OF TERMS AND CONDITIONS

            How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) <year>  <name of author>

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <https://www.gnu.org/licenses/>.

Also add information on how to contact you by electronic and paper mail.

  If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:

    <program>  Copyright (C) <year>  <name of author>
    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".

  You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.

  The GNU General Public License does not permit incorporating your program
into proprietary programs.  If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library.  If this is what you want to do, use the GNU Lesser General
Public License instead of this License.  But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

================================================
FILE: README.md
================================================
# AS-One v2 : A Modular Library for YOLO Object Detection, Segmentation, Tracking & Pose



<div align="center">
  <p>
    <a align="center" href="" target="https://badge.fury.io/py/asone">
      <img
        width="100%"
        src="https://kajabi-storefronts-production.kajabi-cdn.com/kajabi-storefronts-production/file-uploads/themes/2151400015/settings_images/747367e-1d78-eead-2a2-7e5b336a775_Screenshot_2024-05-08_at_13.48.08.jpg" width="100%">
      <a href="https://www.youtube.com/watch?v=K-VcpPwcM8k" style="display:inline-block;padding:10px 20px;background-color:red;color:white;text-decoration:none;font-size:16px;font-weight:bold;border-radius:5px;transition:background-color 0.3s;" target="_blank">Watch Video</a>

    
  </p>

  <br>

  <br>

[![PyPI version](https://badge.fury.io/py/asone.svg)](https://badge.fury.io/py/asone)
[![python-version](https://img.shields.io/pypi/pyversions/supervision)](https://badge.fury.io/py/asone)
[![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://drive.google.com/file/d/1xy5P9WGI19-PzRH3ceOmoCgp63K6J_Ls/view?usp=sharing)
[![start with why](https://img.shields.io/badge/version-2.0.0-green)](https://github.com/augmentedstartups/AS-One)
[![GPLv3 License](https://img.shields.io/badge/License-GPL%20v3-yellow.svg)](https://opensource.org/licenses/)

</div>

## 👋 Hello

==UPDATE: ASOne v2 is now out! We've updated with YOLOV9 and SAM==

AS-One is a python wrapper for multiple detection and tracking algorithms all at one place. Different trackers such as `ByteTrack`, `DeepSORT` or `NorFair` can be integrated with different versions of `YOLO` with minimum lines of code.
This python wrapper provides YOLO models in `ONNX`, `PyTorch` & `CoreML` flavors. We plan to offer support for future versions of YOLO when they get released.

This is One Library for most of your computer vision needs.

If you would like to dive deeper into YOLO Object Detection and Tracking, then check out our [courses](https://www.augmentedstartups.com/store) and [projects](https://store.augmentedstartups.com)

[<img src="https://s3.amazonaws.com/kajabi-storefronts-production/blogs/22606/images/0FDx83VXSYOY0NAO2kMc_ASOne_Windows_Play.jpg" width="50%">](https://www.youtube.com/watch?v=K-VcpPwcM8k)

Watch the step-by-step tutorial 🤝



## 💻 Install
<details><summary> 🔥 Prerequisites</summary>

- Make sure to install `GPU` drivers in your system if you want to use `GPU` . Follow [driver installation](asone/linux/Instructions/Driver-Installations.md) for further instructions.
- Make sure you have [MS Build tools](https://aka.ms/vs/17/release/vs_BuildTools.exe) installed in system if using windows.
- [Download git for windows](https://git-scm.com/download/win) if not installed.
</details>

```bash
pip install asone
```

For windows machine, you will need to install from source to run `asone` library. Check out instructions in `👉 Install from Source` section below to install on windows.
<details>
<summary> 👉 Install from Source</summary>

### 💾 Clone the Repository

Navigate to an empty folder of your choice.

`git clone https://github.com/augmentedstartups/AS-One.git`

Change Directory to AS-One

`cd AS-One`

<details open>
<summary> 👉 For Linux</summary>


```shell
python3 -m venv .env
source .env/bin/activate

pip install -r requirements.txt

# for CPU
pip install torch torchvision
# for GPU
pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
```


</details>

<details>
<summary> 👉 For Windows 10/11</summary>

```shell
python -m venv .env
.env\Scripts\activate
pip install numpy Cython
pip install lap
pip install -e git+https://github.com/samson-wang/cython_bbox.git#egg=cython-bbox

pip install asone onnxruntime-gpu==1.12.1
pip install typing_extensions==4.7.1
pip install super-gradients==3.1.3
# for CPU
pip install torch torchvision

# for GPU
pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
or
pip install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio===0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
```

</details>
<details>
<summary> 👉 For MacOS</summary>

```shell
python3 -m venv .env
source .env/bin/activate


pip install -r requirements.txt

# for CPU
pip install torch torchvision
```

</details>
</details>

##  Quick Start 🏃‍♂️

Use tracker on sample video.

```python
import asone
from asone import ASOne

model = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)
tracks = model.video_tracker('data/sample_videos/test.mp4', filter_classes=['car'])

for model_output in tracks:
    annotations = ASOne.draw(model_output, display=False)
```


### Run in `Google Colab` 💻


<a href="https://drive.google.com/file/d/1xy5P9WGI19-PzRH3ceOmoCgp63K6J_Ls/view?usp=sharing"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>

##  Sample Code Snippets 📃

<details>
<summary>6.1 👉 Object Detection</summary>

```python
import asone
from asone import ASOne

model = ASOne(detector=asone.YOLOV9_C, use_cuda=True) # Set use_cuda to False for cpu
vid = model.read_video('data/sample_videos/test.mp4')

for img in vid:
    detection = model.detecter(img)
    annotations = ASOne.draw(detection, img=img, display=True)
```

Run the `asone/demo_detector.py` to test detector.

```shell
# run on gpu
python -m asone.demo_detector data/sample_videos/test.mp4

# run on cpu
python -m asone.demo_detector data/sample_videos/test.mp4 --cpu
```


<details>
<summary>6.1.1 👉 Use Custom Trained Weights for Detector</summary>
<!-- ### 6.1.2 Use Custom Trained Weights -->

Use your custom weights of a detector model trained on custom data by simply providing path of the weights file.

```python
import asone
from asone import ASOne

model = ASOne(detector=asone.YOLOV9_C, weights='data/custom_weights/yolov7_custom.pt', use_cuda=True) # Set use_cuda to False for cpu
vid = model.read_video('data/sample_videos/license_video.mp4')

for img in vid:
    detection = model.detecter(img)
    annotations = ASOne.draw(detection, img=img, display=True, class_names=['license_plate'])
```

</details>

<details>
<summary>6.1.2 👉 Changing Detector Models </summary>

Change detector by simply changing detector flag. The flags are provided in [benchmark](asone/linux/Instructions/Benchmarking.md) tables.

- Our library now supports YOLOv5, YOLOv7, and YOLOv8 on macOS.

```python
# Change detector
model = ASOne(detector=asone.YOLOX_S_PYTORCH, use_cuda=True)

# For macOs
# YOLO5
model = ASOne(detector=asone.YOLOV5X_MLMODEL)
# YOLO7
model = ASOne(detector=asone.YOLOV7_MLMODEL)
# YOLO8
model = ASOne(detector=asone.YOLOV8L_MLMODEL)
```

</details>

</details>

<details>
<summary>6.2 👉 Object Tracking </summary>

Use tracker on sample video.

```python
import asone
from asone import ASOne

# Instantiate Asone object
model = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True) #set use_cuda=False to use cpu
tracks = model.video_tracker('data/sample_videos/test.mp4', filter_classes=['car'])

# Loop over track to retrieve outputs of each frame
for model_output in tracks:
    annotations = ASOne.draw(model_output, display=True)
    # Do anything with bboxes here
```

[Note] Use can use custom weights for a detector model by simply providing path of the weights file. in `ASOne` class.

<details>
<summary>6.2.1 👉 Changing Detector and Tracking Models</summary>

<!-- ### Changing Detector and Tracking Models -->

Change Tracker by simply changing the tracker flag.

The flags are provided in [benchmark](asone/linux/Instructions/Benchmarking.md) tables.

```python
model = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)
# Change tracker
model = ASOne(tracker=asone.DEEPSORT, detector=asone.YOLOV9_C, use_cuda=True)
```

```python
# Change Detector
model = ASOne(tracker=asone.DEEPSORT, detector=asone.YOLOX_S_PYTORCH, use_cuda=True)
```

</details>

Run the `asone/demo_tracker.py` to test detector.

```shell
# run on gpu
python -m asone.demo_tracker data/sample_videos/test.mp4

# run on cpu
python -m asone.demo_tracker data/sample_videos/test.mp4 --cpu
```

</details>

<details>
<summary>6.3 👉 Segmentation</summary>


```python
import asone
from asone import ASOne

model = ASOne(detector=asone.YOLOV9_C, segmentor=asone.SAM, use_cuda=True) #set use_cuda=False to use cpu
tracks = model.video_detecter('data/sample_videos/test.mp4', filter_classes=['car'])

for model_output in tracks:
    annotations = ASOne.draw_masks(model_output, display=True) # Draw masks
```
</details>

<details>
<summary>6.4 👉 Text Detection</summary>
  
Sample code to detect text on an image

```python
# Detect and recognize text
import asone
from asone import ASOne, utils
import cv2

model = ASOne(detector=asone.CRAFT, recognizer=asone.EASYOCR, use_cuda=True) # Set use_cuda to False for cpu
img = cv2.imread('data/sample_imgs/sample_text.jpeg')
results = model.detect_text(img)
annotations = utils.draw_text(img, results, display=True)
```

Use Tracker on Text

```python
import asone
from asone import ASOne

# Instantiate Asone object
model = ASOne(tracker=asone.DEEPSORT, detector=asone.CRAFT, recognizer=asone.EASYOCR, use_cuda=True) #set use_cuda=False to use cpu
tracks = model.video_tracker('data/sample_videos/GTA_5-Unique_License_Plate.mp4')

# Loop over track to retrieve outputs of each frame
for model_output in tracks:
    annotations = ASOne.draw(model_output, display=True)

    # Do anything with bboxes here
```

Run the `asone/demo_ocr.py` to test ocr.

```shell
# run on gpu
 python -m asone.demo_ocr data/sample_videos/GTA_5-Unique_License_Plate.mp4

# run on cpu
 python -m asone.demo_ocr data/sample_videos/GTA_5-Unique_License_Plate.mp4 --cpu
```

</details>

<details>
<summary>6.5 👉 Pose Estimation</summary>


Sample code to estimate pose on an image

```python
# Pose Estimation
import asone
from asone import PoseEstimator, utils
import cv2

model = PoseEstimator(estimator_flag=asone.YOLOV8M_POSE, use_cuda=True) #set use_cuda=False to use cpu
img = cv2.imread('data/sample_imgs/test2.jpg')
kpts = model.estimate_image(img)
annotations = utils.draw_kpts(kpts, image=img, display=True)
```

- Now you can use Yolov8 and Yolov7-w6 for pose estimation. The flags are provided in [benchmark](asone/linux/Instructions/Benchmarking.md) tables.

```python
# Pose Estimation on video
import asone
from asone import PoseEstimator, utils

model = PoseEstimator(estimator_flag=asone.YOLOV7_W6_POSE, use_cuda=True) #set use_cuda=False to use cpu
estimator = model.video_estimator('data/sample_videos/football1.mp4')
for model_output in estimator:
    annotations = utils.draw_kpts(model_output)
    # Do anything with kpts here
```

Run the `asone/demo_pose_estimator.py` to test Pose estimation.

```shell
# run on gpu
 python -m asone.demo_pose_estimator data/sample_videos/football1.mp4

# run on cpu
 python -m asone.demo_pose_estimator data/sample_videos/football1.mp4 --cpu
```

</details>

To setup ASOne using Docker follow instructions given in [docker setup](asone/linux/Instructions/Docker-Setup.md)🐳

### ToDo 📝

- [x] First Release
- [x] Import trained models
- [x] Simplify code even further
- [x] Updated for YOLOv8
- [x] OCR and Counting
- [x] OCSORT, StrongSORT, MoTPy
- [x] M1/2 Apple Silicon Compatibility
- [x] Pose Estimation YOLOv7/v8
- [x] YOLO-NAS
- [x] Updated for YOLOv8.1
- [x] YOLOV9
- [x] SAM Integration


| Offered By 💼 :                                                                                                                                                  | Maintained By 👨‍💻 :                                                                                                                                    |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- |
| [![AugmentedStarups](https://user-images.githubusercontent.com/107035454/195115263-d3271ef3-973b-40a4-83c8-0ade8727dd40.png)](https://augmentedstartups.com) | [![AxcelerateAI](https://user-images.githubusercontent.com/107035454/195114870-691c8a52-fcf0-462e-9e02-a720fc83b93f.png)](https://axcelerate.ai/) |


================================================
FILE: asone/__init__.py
================================================
from .asone import ASOne
import asone.detectors
import asone.trackers
import asone.recognizers
import asone.segmentors
from .pose_estimator import PoseEstimator
from asone.utils.video_reader import VideoReader

BYTETRACK = 0
DEEPSORT = 1
NORFAIR = 2
MOTPY = 3
OCSORT = 4
STRONGSORT = 5


YOLOV5X6_PYTORCH = 0
YOLOV5S_PYTORCH = 2
YOLOV5N_PYTORCH = 4
YOLOV5M_PYTORCH = 6
YOLOV5L_PYTORCH = 8
YOLOV5X_PYTORCH = 10
YOLOV5N6_PYTORCH = 12
YOLOV5S6_PYTORCH = 14
YOLOV5M6_PYTORCH = 16
YOLOV5L6_PYTORCH = 18


YOLOV6N_PYTORCH = 20
YOLOV6T_PYTORCH = 22
YOLOV6S_PYTORCH = 24
YOLOV6M_PYTORCH = 26
YOLOV6L_PYTORCH = 28 
YOLOV6L_RELU_PYTORCH = 30
YOLOV6S_REPOPT_PYTORCH = 32

YOLOV7_TINY_PYTORCH = 34
YOLOV7_PYTORCH = 36
YOLOV7_X_PYTORCH = 38
YOLOV7_W6_PYTORCH = 40
YOLOV7_E6_PYTORCH = 42
YOLOV7_D6_PYTORCH = 44
YOLOV7_E6E_PYTORCH = 46

YOLOR_CSP_X_PYTORCH = 48
YOLOR_CSP_X_STAR_PYTORCH = 50 
YOLOR_CSP_STAR_PYTORCH = 52
YOLOR_CSP_PYTORCH = 54
YOLOR_P6_PYTORCH = 56




YOLOX_L_PYTORCH = 58
YOLOX_NANO_PYTORCH = 60 
YOLOX_TINY_PYTORCH = 62
YOLOX_DARKNET_PYTORCH = 64 
YOLOX_S_PYTORCH = 66
YOLOX_M_PYTORCH = 68
YOLOX_X_PYTORCH = 70

#ONNX

YOLOV5X6_ONNX = 1
YOLOV5S_ONNX = 3
YOLOV5N_ONNX = 5
YOLOV5M_ONNX = 7
YOLOV5L_ONNX = 9
YOLOV5X_ONNX = 11
YOLOV5N6_ONNX = 13
YOLOV5S6_ONNX = 15
YOLOV5M6_ONNX = 17
YOLOV5L6_ONNX = 19


YOLOV6N_ONNX = 21
YOLOV6T_ONNX = 23
YOLOV6S_ONNX = 25
YOLOV6M_ONNX = 27
YOLOV6L_ONNX = 29 
YOLOV6L_RELU_ONNX = 31
YOLOV6S_REPOPT_ONNX = 33

YOLOV7_TINY_ONNX = 35
YOLOV7_ONNX = 37
YOLOV7_X_ONNX = 39
YOLOV7_W6_ONNX = 41
YOLOV7_E6_ONNX = 43
YOLOV7_D6_ONNX = 45
YOLOV7_E6E_ONNX = 47

YOLOR_CSP_X_ONNX = 49
YOLOR_CSP_X_STAR_ONNX = 51
YOLOR_CSP_STAR_ONNX = 53
YOLOR_CSP_ONNX = 55
YOLOR_P6_ONNX = 57


YOLOX_L_ONNX = 59
YOLOX_NANO_ONNX = 61 
YOLOX_TINY_ONNX = 63
YOLOX_DARKNET_ONNX = 65 
YOLOX_S_ONNX = 67
YOLOX_M_ONNX = 69
YOLOX_X_ONNX = 71

# YOLOv8
YOLOV8N_PYTORCH = 72
YOLOV8N_ONNX = 73
YOLOV8S_PYTORCH = 74
YOLOV8S_ONNX = 75
YOLOV8M_PYTORCH = 76
YOLOV8M_ONNX = 77
YOLOV8L_PYTORCH = 78
YOLOV8L_ONNX = 79
YOLOV8X_PYTORCH = 80
YOLOV8X_ONNX = 81

# coreml

YOLOV5N_MLMODEL = 120
YOLOV5S_MLMODEL = 121
YOLOV5X6_MLMODEL = 122
YOLOV5M_MLMODEL = 123
YOLOV5L_MLMODEL = 124
YOLOV5X_MLMODEL = 125
YOLOV5N6_MLMODEL = 126
YOLOV5S6_MLMODEL = 127
YOLOV5M6_MLMODEL = 128
YOLOV5L6_MLMODEL = 129
    

YOLOV7_TINY_MLMODEL = 130
YOLOV7_MLMODEL = 131
YOLOV7_X_MLMODEL = 132
YOLOV7_W6_MLMODEL = 133
YOLOV7_E6_MLMODEL = 134
YOLOV7_D6_MLMODEL = 135
YOLOV7_E6E_MLMODEL = 136

YOLOV8N_MLMODEL = 139
YOLOV8S_MLMODEL = 140
YOLOV8M_MLMODEL = 141
YOLOV8L_MLMODEL = 142
YOLOV8X_MLMODEL = 143

YOLOV8N_POSE = 144
YOLOV8S_POSE = 145
YOLOV8M_POSE = 146
YOLOV8L_POSE = 147
YOLOV8X_POSE = 148

YOLOV7_W6_POSE = 149

YOLONAS_S_PYTORCH = 160
YOLONAS_M_PYTORCH = 161
YOLONAS_L_PYTORCH = 162

# YOLOv9
YOLOV9_C_CONVERTED = 164
YOLOV9_E_CONVERTED = 165
YOLOV9_C = 166
YOLOV9_E = 167
GELAN_C = 168
GELAN_E = 169

# Segmentors
SAM = 171

# Text Detectors
# easyocr
CRAFT = 82
DBNET18 = 83

# Text Recognizers
EASYOCR = 200



__all__ = ['ASOne', 'detectors', 'trackers', 'recognizers', 'segmentors', 'PoseEstimator'] 


================================================
FILE: asone/asone.py
================================================
import copy
import warnings
import cv2
from loguru import logger
import os
import time
import asone.utils as utils
from asone.trackers import Tracker
from asone.detectors import Detector
from asone.recognizers import TextRecognizer
from asone.segmentors import Segmentor
from asone.utils.default_cfg import config
from asone.utils.video_reader import VideoReader
from asone.utils import compute_color_for_labels
from asone.schemas.output_schemas import ModelOutput

import numpy as np


class ASOne:
    def __init__(self,
                 detector: int = 0,
                 tracker: int = -1,
                 segmentor: int = -1,
                 weights: str = None,
                 segmentor_weights: str = None,
                 use_cuda: bool = True,
                 recognizer: int = None,
                 languages: list = ['en'],
                 num_classes=80
                 ) -> None:

        self.use_cuda = use_cuda
        self.use_segmentation = False
        self.model_output = ModelOutput()
        
        # Check if user want to use segmentor
        if segmentor != -1:
            self.use_segmentation = True

            # Load Segmentation model
            self.segmentor = self.get_segmentor(segmentor, segmentor_weights)

        # get detector object
        self.detector = self.get_detector(detector, weights, recognizer, num_classes)
        self.recognizer = self.get_recognizer(recognizer, languages=languages)
    
        if tracker == -1:
            self.tracker = None
            return
            
        self.tracker = self.get_tracker(tracker)

    def get_detector(self, detector: int, weights: str, recognizer, num_classes):
        detector = Detector(detector, weights=weights,
                            use_cuda=self.use_cuda, recognizer=recognizer, num_classes=num_classes).get_detector()
        return detector

    def get_recognizer(self, recognizer: int, languages):
        if recognizer == None:
            return None
        recognizer = TextRecognizer(recognizer,
                            use_cuda=self.use_cuda, languages=languages).get_recognizer()

        return recognizer

    def get_tracker(self, tracker: int):
        tracker = Tracker(tracker, self.detector,
                          use_cuda=self.use_cuda)
        return tracker
    
    def get_segmentor(self, segmentor, segmentor_weights):
        segmentor = Segmentor(segmentor, segmentor_weights, self.use_cuda)
        return segmentor

    def _update_args(self, kwargs):
        for key, value in kwargs.items():
            if key in config.keys():
                config[key] = value
            else:
                print(f'"{key}" argument not found! valid args: {list(config.keys())}')
                exit()
        return config

    def track_stream(self,
                    stream_url,
                    **kwargs
                    ):
        
        # Emit the warning for DeprecationWarning
        with warnings.catch_warnings():
            warnings.simplefilter("always", DeprecationWarning)
            warnings.warn("track_stream function is deprecated. Kindly use stream_tracker instead", DeprecationWarning)

        output_filename = 'result.mp4'
        kwargs['filename'] = output_filename
        config = self._update_args(kwargs)

        for (bbox_details, frame_details) in self._start_tracking(stream_url, config):
            # yeild bbox_details, frame_details to main script
            yield bbox_details, frame_details
    
    def stream_tracker(self,
                    stream_url,
                    **kwargs
                    ):

        output_filename = 'result.mp4'
        kwargs['filename'] = output_filename
        config = self._update_args(kwargs)

        for (bbox_details, frame_details) in self._start_tracking(stream_url, config):
            # yeild bbox_details, frame_details to main script
            yield self.format_output(bbox_details, frame_details)

    def track_video(self,
                    video_path,
                    **kwargs
                    ):            
           
        # Emit the warning for DeprecationWarning
        with warnings.catch_warnings():
            warnings.simplefilter("always", DeprecationWarning)
            warnings.warn("track_video function is deprecated. Kindly use video_tracker instead", DeprecationWarning)
              
        output_filename = os.path.basename(video_path)
        kwargs['filename'] = output_filename
        config = self._update_args(kwargs)

        for (bbox_details, frame_details) in self._start_tracking(video_path, config):
            # yeild bbox_details, frame_details to main script
            yield bbox_details, frame_details
    
    def video_tracker(self,
                    video_path,
                    **kwargs
                    ):            
        output_filename = os.path.basename(video_path)
        kwargs['filename'] = output_filename
        config = self._update_args(kwargs)

        for (bbox_details, frame_details) in self._start_tracking(video_path, config):
            # yeild bbox_details, frame_details to main script
            yield self.format_output(bbox_details, frame_details)

    def detect_video(self,
                    video_path,
                    **kwargs
                    ):            
        
        # Emit the warning for DeprecationWarning
        with warnings.catch_warnings():
            warnings.simplefilter("always", DeprecationWarning)
            warnings.warn("detect_video function is deprecated. Kindly use video_detecter instead", DeprecationWarning)
            
        output_filename = os.path.basename(video_path)
        kwargs['filename'] = output_filename
        config = self._update_args(kwargs)

        for (bbox_details, frame_details) in self._start_tracking(video_path, config):
            # yeild bbox_details, frame_details to main script
            yield bbox_details, frame_details
    
    def video_detecter(self,
                    video_path,
                    **kwargs
                    ):            
        output_filename = os.path.basename(video_path)
        kwargs['filename'] = output_filename
        config = self._update_args(kwargs)

        for (bbox_details, frame_details) in self._start_tracking(video_path, config):
            # yeild bbox_details, frame_details to main script
            yield self.format_output(bbox_details, frame_details)
    
    def detect(self, source, **kwargs)->np.ndarray:
        """ Function to perform detection on an img

        Args:
            source (_type_): if str read the image. if nd.array pass it directly to detect

        Returns:
            _type_: ndarray of detection
        """
        # Emit the warning for DeprecationWarning
        with warnings.catch_warnings():
            warnings.simplefilter("always", DeprecationWarning)
            warnings.warn("detect function is deprecated. Kindly use detecter instead", DeprecationWarning)
            
        if isinstance(source, str):
            source = cv2.imread(source)
        return self.detector.detect(source, **kwargs)
    
    def detecter(self, source, **kwargs):
        """ Function to perform detection on an img

        Args:
            source (_type_): if str read the image. if nd.array pass it directly to detect

        Returns:
            _type_: ndarray of detection
        """
        if isinstance(source, str):
            source = cv2.imread(source)
        dets, _ = self.detector.detect(source, **kwargs)
        bboxes_xyxy = dets[:, :4]
        scores = dets[:, 4]
        class_ids = dets[:, 5]
        ids = None
        info = None
        return self.format_output((bboxes_xyxy, ids, scores, class_ids), info)

    def detect_and_track(self, frame, **kwargs):
        if self.tracker:
            bboxes_xyxy, ids, scores, class_ids = self.tracker.detect_and_track(
                frame, kwargs)
            info = None
        else:
            dets, info = self.detect(source=frame, **kwargs)
            bboxes_xyxy = dets[:, :4]
            scores = dets[:, 4]
            class_ids = dets[:, 5]
            ids = None

        return (bboxes_xyxy, ids, scores, class_ids), info
    
    def detect_track_manager(self, frame, **kwargs):
        if self.tracker:
            bboxes_xyxy, ids, scores, class_ids = self.tracker.detect_and_track(
                frame, kwargs)
            info = None
        else:
            model_output = self.detecter(source=frame, **kwargs)
            
            info = model_output.info            
            bboxes_xyxy = model_output.dets.bbox
            scores = model_output.dets.score
            class_ids = model_output.dets.class_ids
            ids = None

        return (bboxes_xyxy, ids, scores, class_ids), info
        
    def detect_text(self, image):
        horizontal_list, _ = self.detector.detect(image)
        if self.recognizer is None:
                raise TypeError("Recognizer can not be None")
            
        return self.recognizer.recognize(image, horizontal_list=horizontal_list,
                            free_list=[])

    def track_webcam(self,
                     cam_id=0,
                     **kwargs):
        # Emit the warning for DeprecationWarning
        with warnings.catch_warnings():
            warnings.simplefilter("always", DeprecationWarning)
            warnings.warn("track_webcam function is deprecated. Kindly use webcam_tracker instead", DeprecationWarning)
            
        output_filename = 'results.mp4'

        kwargs['filename'] = output_filename
        kwargs['fps'] = 29
        config = self._update_args(kwargs)


        for (bbox_details, frame_details) in self._start_tracking(cam_id, config):
            # yeild bbox_details, frame_details to main script
            yield bbox_details, frame_details
    
    def webcam_tracker(self,
                     cam_id=0,
                     **kwargs):
        output_filename = 'results.mp4'

        kwargs['filename'] = output_filename
        kwargs['fps'] = 29
        config = self._update_args(kwargs)


        for (bbox_details, frame_details) in self._start_tracking(cam_id, config):
            # yeild bbox_details, frame_details to main script
            yield self.format_output(bbox_details, frame_details)
        
    def _start_tracking(self,
                        stream_path: str,
                        config: dict) -> tuple:

        if not self.tracker:
            warnings.warn(f'No tracker has been selected. Only the detector is operational.')

        fps = config.pop('fps')
        output_dir = config.pop('output_dir')
        filename = config.pop('filename')
        save_result = config.pop('save_result')
        display = config.pop('display')
        draw_trails = config.pop('draw_trails')
        class_names = config.pop('class_names')

        cap = self.read_video(stream_path)
        width, height = cap.frame_size
        frame_count = cap.frame_counts

        if fps is None:
            fps = cap.fps

        if save_result:
            os.makedirs(output_dir, exist_ok=True)
            save_path = os.path.join(output_dir, filename)
            logger.info(f"video save path is {save_path}")

            video_writer = cv2.VideoWriter(
                save_path,
                cv2.VideoWriter_fourcc(*"mp4v"),
                fps,
                (int(width), int(height)),
            )

        frame_id = 1
        tic = time.time()
        prevTime = 0

        for frame in cap:
            start_time = time.time()

            im0 = copy.deepcopy(frame)
            try:
                (bboxes_xyxy, ids, scores, class_ids), _ = self.detect_track_manager(frame, **config)
            except:
                (bboxes_xyxy, ids, scores, class_ids), _ = self.detect_and_track(frame, **config)

            elapsed_time = time.time() - start_time

            logger.info(
                'frame {}/{} ({:.2f} ms)'.format(frame_id, int(frame_count),
                                                 elapsed_time * 1000))

            if self.recognizer:
                res = self.recognizer.recognize(frame, horizontal_list=bboxes_xyxy,
                            free_list=[])
                im0 = utils.draw_text(im0, res)
            else:
                im0 = self.draw((bboxes_xyxy, ids, scores, class_ids),
                                    img=im0,
                                    draw_trails=draw_trails,
                                    class_names=class_names,
                                    display=display)

            currTime = time.time()
            fps = 1 / (currTime - prevTime)
            prevTime = currTime
            cv2.line(im0, (20, 25), (127, 25), [85, 45, 255], 30)
            cv2.putText(im0, f'FPS: {int(fps)}', (11, 35), 0, 1, [
                        225, 255, 255], thickness=2, lineType=cv2.LINE_AA)
            
            if self.use_segmentation:
                if len(bboxes_xyxy) > 0: # Check if bounding box is present or not
                    # Will generate mask using SAM
                    masks = self.segmentor.create_mask(np.array(bboxes_xyxy), frame)
                    im0 = self.draw_masks(masks, img=im0, display=display)
                    bboxes_xyxy = (bboxes_xyxy, masks) 
            
            if save_result:
                video_writer.write(im0)

            frame_id += 1

            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

            # yeild required values in form of (bbox_details, frames_details)
            yield (bboxes_xyxy, ids, scores, class_ids), (im0 if display else frame, frame_id-1, fps)

        tac = time.time()
        print(f'Total Time Taken: {tac - tic:.2f}')

    @staticmethod
    def draw(dets, display=False, img=None, **kwargs):            
        draw_trails = kwargs.get('draw_trails', False)
        class_names = kwargs.get('class_names', None)
        if isinstance(dets, tuple):
            bboxes_xyxy, ids, scores, class_ids = dets
            if isinstance(bboxes_xyxy, tuple):
                bboxes_xyxy, _ = bboxes_xyxy    
                
        elif isinstance(dets, np.ndarray):
            bboxes_xyxy = dets[:, :4]
            scores = dets[:, 4]
            class_ids = dets[:, 5]
            ids = None
        
        elif isinstance(dets, ModelOutput):
            bboxes_xyxy = dets.dets.bbox
            ids = dets.dets.ids
            score = dets.dets.score
            class_ids = dets.dets.class_ids
            img = dets.info.image if dets.info.image is not None else img
            frame_no = dets.info.frame_no
            fps = dets.info.fps
        
        img = utils.draw_boxes(img,
                                bbox_xyxy=bboxes_xyxy,
                                class_ids=class_ids,
                                identities=ids,
                                draw_trails=draw_trails,
                                class_names=class_names)
        
        if display:
            cv2.imshow(' Sample', img)
        
        return img
    
    @staticmethod      
    def draw_masks(dets, display, img=None, **kwargs):
        # Check if bounding box are present
        if isinstance(dets, tuple) and len(dets) > 0 and len(dets[0]) == 0:
            return img
        
        elif isinstance(dets, ModelOutput):
            masks = dets.dets.bbox
            ids = dets.dets.ids
            score = dets.dets.score
            class_ids = dets.dets.class_ids
            img = dets.info.image if dets.info.image is not None else img
            frame_no = dets.info.frame_no
            fps = dets.info.fps
            if isinstance(masks, tuple):
                bboxes_xyxy, masks = masks
            if isinstance(masks, np.ndarray):
                return img
        
        elif isinstance(dets, tuple):
            bboxes_xyxy, ids, scores, class_ids = dets
            if isinstance(bboxes_xyxy, tuple):
                bboxes_xyxy, masks = bboxes_xyxy
        else:
            masks = dets
            class_ids = None
                
        color = [0, 255, 0]
        masked_image = img.copy()
        for idx in range(len(masks)):
            mask = masks[idx].squeeze()  # Squeeze to remove singleton dimension
            if class_ids is not None:
                color = compute_color_for_labels(int(class_ids[idx]))
            color = np.asarray(color, dtype='uint8')
            mask_color = np.expand_dims(mask, axis=-1) * color  # Apply color to the mask
            # Apply the mask to the image
            masked_image = np.where(mask_color > 0, mask_color, masked_image)

        masked_image = masked_image.astype(np.uint8)
        img = cv2.addWeighted(img, 0.5, masked_image, 0.5, 0)
        
        if display:
            cv2.imshow(' Sample', img)
        return img

    def read_video(self, video_path):
        vid = VideoReader(video_path)
        
        return vid
    
    def format_output(self, bbox_details, frame_details):

        # Set detections
        self.model_output.dets.bbox = bbox_details[0]
        self.model_output.dets.ids = bbox_details[1]
        self.model_output.dets.score = bbox_details[2]
        self.model_output.dets.class_ids = bbox_details[3]
        if frame_details:
            # Set image info
            self.model_output.info.image = frame_details[0]
            self.model_output.info.frame_no = frame_details[1]
            self.model_output.info.fps = frame_details[2]

        return self.model_output
    
if __name__ == '__main__':
    # asone = ASOne(tracker='norfair')
    asone = ASOne()

    asone.start_tracking('data/sample_videos/video2.mp4',
                         save_result=True, display=False)


================================================
FILE: asone/demo_detector.py
================================================
import sys
import argparse
import asone
from asone import ASOne
import torch


def main(args):
    filter_classes = args.filter_classes

    if filter_classes:
        filter_classes = ['person']
    # Check if cuda available
    if args.use_cuda and torch.cuda.is_available():
        args.use_cuda = True
    else:
        args.use_cuda = False
     
    if sys.platform.startswith('darwin'):
        detector = asone.YOLOV7_MLMODEL 
    else:
        detector = asone.YOLOV7_PYTORCH
    
    detect = ASOne(
        detector=detector,
        weights=args.weights,
        use_cuda=args.use_cuda
        )
    # Get tracking function
    track = detect.detect_video(args.video_path,
                                output_dir=args.output_dir,
                                conf_thres=args.conf_thres,
                                iou_thres=args.iou_thres,
                                display=args.display,
                                filter_classes=filter_classes,
                                class_names=None) # class_names=['License Plate'] for custom weights
    
    # Loop over track_fn to retrieve outputs of each frame 
    for bbox_details, frame_details in track:
        bbox_xyxy, ids, scores, class_ids = bbox_details
        frame, frame_num, fps = frame_details
        print(frame_num)
        

if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    parser.add_argument('video_path', help='Path to input video')
    parser.add_argument('--cpu', default=True, action='store_false', dest='use_cuda',
                        help='run on cpu if not provided the program will run on gpu.')
    parser.add_argument('--no_save', default=True, action='store_false',
                        dest='save_result', help='whether or not save results')
    parser.add_argument('--no_display', default=True, action='store_false',
                        dest='display', help='whether or not display results on screen')
    parser.add_argument('--output_dir', default='data/results',  help='Path to output directory')
    parser.add_argument('--draw_trails', action='store_true', default=False,
                        help='if provided object motion trails will be drawn.')
    parser.add_argument('--filter_classes', default=None, help='Filter class name')
    parser.add_argument('-w', '--weights', default=None, help='Path of trained weights')
    parser.add_argument('-ct', '--conf_thres', default=0.25, type=float, help='confidence score threshold')
    parser.add_argument('-it', '--iou_thres', default=0.45, type=float, help='iou score threshold')

    args = parser.parse_args()

    main(args)

================================================
FILE: asone/demo_ocr.py
================================================
import argparse
import asone
from asone import ASOne

def main(args):

    detect = ASOne(
        tracker=asone.DEEPSORT,
        detector=asone.CRAFT,
        weights=args.weights,
        recognizer=asone.EASYOCR,
        use_cuda=args.use_cuda
        )
    # Get tracking function
    track = detect.track_video(args.video_path,
                                output_dir=args.output_dir,
                                conf_thres=args.conf_thres,
                                iou_thres=args.iou_thres,
                                display=args.display,
                                draw_trails=args.draw_trails) # class_names=['License Plate'] for custom weights
    
    # Loop over track to retrieve outputs of each frame 
    for bbox_details, frame_details in track:
        bbox_xyxy, ids, scores, class_ids = bbox_details
        frame, frame_num, fps = frame_details
        print(frame_num)
        

if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    parser.add_argument('video_path', help='Path to input video')
    parser.add_argument('--cpu', default=True, action='store_false', dest='use_cuda',
                        help='run on cpu if not provided the program will run on gpu.')
    parser.add_argument('--no_save', default=True, action='store_false',
                        dest='save_result', help='whether or not save results')
    parser.add_argument('--no_display', default=True, action='store_false',
                        dest='display', help='whether or not display results on screen')
    parser.add_argument('--output_dir', default='data/results',  help='Path to output directory')
    parser.add_argument('--draw_trails', action='store_true', default=False,
                        help='if provided object motion trails will be drawn.')
    parser.add_argument('-w', '--weights', default=None, help='Path of trained weights')
    parser.add_argument('-ct', '--conf_thres', default=0.25, type=float, help='confidence score threshold')
    parser.add_argument('-it', '--iou_thres', default=0.45, type=float, help='iou score threshold')

    args = parser.parse_args()

    main(args)

================================================
FILE: asone/demo_pose_estimator.py
================================================
import asone
from asone import PoseEstimator
from .utils import draw_kpts
import cv2
import argparse
import time
import os


def main(args):
    
    video_path = args.video
    os.makedirs(args.output_path, exist_ok=True)
    estimator = PoseEstimator(asone.YOLOV7_W6_POSE, weights=args.weights, use_cuda=args.use_cuda)

    cap = cv2.VideoCapture(video_path)
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    FPS = cap.get(cv2.CAP_PROP_FPS)

    if args.save:
        video_writer = cv2.VideoWriter(
            os.path.join(args.output_path,os.path.basename(video_path)),
            cv2.VideoWriter_fourcc(*"mp4v"),
            FPS,
            (int(width), int(height)),
        )
    
    frame_no = 1
    tic = time.time()

    prevTime = 0
    fframe_num = 0
    while True:
        start_time = time.time()

        ret, img = cap.read()
        if not ret:
            break
        frame = img.copy()
        
        kpts = estimator.estimate_image(img)
        currTime = time.time()
        fps = 1 / (currTime - prevTime)
        prevTime = currTime

        if kpts is not None: 
            img = draw_kpts(img, kpts)
        cv2.line(img, (20, 25), (127, 25), [85, 45, 255], 30)
        cv2.putText(img, f'FPS: {int(fps)}', (11, 35), 0, 1, [
                    225, 255, 255], thickness=2, lineType=cv2.LINE_AA)


        frame_no+=1
        if args.display:
            cv2.imshow('Window', img)

        if args.save:
            video_writer.write(img)
        
        # frame_no += 1       
        print(frame_no)
        
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break

if __name__=='__main__':
    
    parser = argparse.ArgumentParser()
    parser.add_argument("video", help="Path of video")
    parser.add_argument('--cpu', default=True, action='store_false', dest='use_cuda', help='If provided the model will run on cpu otherwise it will run on gpu')
    parser.add_argument('-w', '--weights', default=None, help='Path of trained weights')
    parser.add_argument('-o', '--output_path', default='data/results', help='path of output file')
    parser.add_argument('--no_display', action='store_false', default=True, dest='display', help='if provided video will not be displayed')
    parser.add_argument('--no_save', action='store_false', default=True, dest='save', help='if provided video will not be saved')

    args = parser.parse_args()
    main(args)

================================================
FILE: asone/demo_segmentor.py
================================================
import sys
import argparse
import asone
from asone import ASOne
import torch


def main(args):
    filter_classes = args.filter_classes

    if filter_classes:
        filter_classes = ['person']
    # Check if cuda available
    if args.use_cuda and torch.cuda.is_available():
        args.use_cuda = True
    else:
        args.use_cuda = False
     
    if sys.platform.startswith('darwin'):
        detector = asone.YOLOV7_MLMODEL 
    else:
        detector = asone.YOLOV7_PYTORCH
    
    detect = ASOne(
        detector=detector,
        segmentor=asone.SAM,
        weights=args.weights,
        segmentor_weights=args.segmentor_weights,
        use_cuda=args.use_cuda
        )
    # Get tracking function
    track = detect.detect_video(args.video_path,
                                output_dir=args.output_dir,
                                conf_thres=args.conf_thres,
                                iou_thres=args.iou_thres,
                                display=args.display,
                                draw_trails=args.draw_trails,
                                filter_classes=filter_classes,
                                class_names=None) # class_names=['License Plate'] for custom weights
    
    # Loop over track_fn to retrieve outputs of each frame 
    for bbox_details, frame_details in track:
        bbox_xyxy, ids, scores, class_ids = bbox_details
        frame, frame_num, fps = frame_details
        print(frame_num)
        

if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    parser.add_argument('video_path', help='Path to input video')
    parser.add_argument('--cpu', default=True, action='store_false', dest='use_cuda',
                        help='run on cpu if not provided the program will run on gpu.')
    parser.add_argument('--no_save', default=True, action='store_false',
                        dest='save_result', help='whether or not save results')
    parser.add_argument('--no_display', default=True, action='store_false',
                        dest='display', help='whether or not display results on screen')
    parser.add_argument('--output_dir', default='data/results',  help='Path to output directory')
    parser.add_argument('--draw_trails', action='store_true', default=False,
                        help='if provided object motion trails will be drawn.')
    parser.add_argument('--filter_classes', default=None, help='Filter class name')
    parser.add_argument('-w', '--weights', default=None, help='Path of trained weights')
    parser.add_argument('--segmentor_weights', default=None, help='Path of Segmentor weights')
    parser.add_argument('-ct', '--conf_thres', default=0.25, type=float, help='confidence score threshold')
    parser.add_argument('-it', '--iou_thres', default=0.45, type=float, help='iou score threshold')

    args = parser.parse_args()

    main(args)

================================================
FILE: asone/demo_tracker.py
================================================
import asone
from asone import ASOne
from .utils import draw_boxes
import cv2
import argparse
import time
import os


def main(args):
    filter_classes = args.filter_classes
    video_path = args.video

    os.makedirs(args.output_path, exist_ok=True)

    if filter_classes:
        filter_classes = filter_classes.split(',')


    detect = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV7_PYTORCH, 
                   use_cuda=args.use_cuda)

    track = detect.track_video(video_path, output_dir=args.output_path, 
                               save_result=args.save, display=args.display,
                               filter_classes=filter_classes)
 
    for bbox_details, frame_details in track:
        bbox_xyxy, ids, scores, class_ids = bbox_details
        frame, frame_num, fps = frame_details


if __name__=='__main__':
    
    parser = argparse.ArgumentParser()
    parser.add_argument("video", help="Path of video")
    parser.add_argument('--cpu', default=True, action='store_false', dest='use_cuda', help='If provided the model will run on cpu otherwise it will run on gpu')
    parser.add_argument('--filter_classes', default=None, help='Class names seperated by comma (,). e.g. person,car ')
    parser.add_argument('-w', '--weights', default=None, help='Path of trained weights')
    parser.add_argument('-o', '--output_path', default='data/results', help='path of output file')
    parser.add_argument('--no_display', action='store_false', default=True, dest='display', help='if provided video will not be displayed')
    parser.add_argument('--no_save', action='store_false', default=True, dest='save', help='if provided video will not be saved')

    args = parser.parse_args()
    main(args)

================================================
FILE: asone/detectors/__init__.py
================================================
from asone.detectors.yolov5 import YOLOv5Detector
from asone.detectors.yolov6 import YOLOv6Detector
from asone.detectors.yolov7 import YOLOv7Detector
from asone.detectors.yolov9 import YOLOv9Detector
from asone.detectors.yolor import YOLOrDetector
from asone.detectors.yolox import YOLOxDetector
from asone.detectors.easyocr_detector import TextDetector

from asone.detectors.detector import Detector
__all__ = ['Detector'
           'YOLOv5Detector',
           'YOLOv6Detector',
           'YOLOv7Detector',
           'YOLOv9Detector',
           'YOLOrDetector',
           'YOLOxDetector',
           'TextDetector',
           'YOLOnasDetector']


================================================
FILE: asone/detectors/detector.py
================================================
import cv2

from asone.detectors.utils.weights_path import get_weight_path
from asone.detectors.utils.cfg_path import get_cfg_path
from asone.detectors.utils.exp_name import get_exp__name

class Detector:
    def __init__(self,
                 model_flag: int,
                 weights: str = None,
                 use_cuda: bool = True,
                 recognizer:int = None,
                 num_classes=80):
        
        self.model = self._select_detector(model_flag, weights, use_cuda, recognizer, num_classes)
    def _select_detector(self, model_flag, weights, cuda, recognizer, num_classes):
        # Get required weight using model_flag
        mlmodel = False
        if weights and weights.split('.')[-1] == 'onnx':
            onnx = True
            weight = weights
        elif weights and weights.split('.')[-1] == 'mlmodel':
            onnx = False
            weight = weights
            mlmodel = True    
        elif weights:
            onnx = False
            weight = weights
        else:
            mlmodel, onnx, weight = get_weight_path(model_flag)
        
        if model_flag in range(0, 20) or model_flag in range(120, 131):
            from asone.detectors.yolov5 import YOLOv5Detector
            _detector = YOLOv5Detector(weights=weight,
                                       use_onnx=onnx,
                                       mlmodel=mlmodel,
                                       use_cuda=cuda)
        elif model_flag in range(20, 34):
            from asone.detectors.yolov6 import YOLOv6Detector
            _detector = YOLOv6Detector(weights=weight,
                                       use_onnx=onnx,
                                       use_cuda=cuda)
        elif model_flag in range(34, 48) or model_flag in range(131, 139):
            from asone.detectors.yolov7 import YOLOv7Detector
            # Get exp file and corresponding model for coreml only
            _detector = YOLOv7Detector(weights=weight,
                                       use_onnx=onnx,
                                       mlmodel=mlmodel,
                                       use_cuda=cuda)
        elif model_flag in range(48, 58):
            from asone.detectors.yolor import YOLOrDetector
            # Get Configuration file for Yolor
            if model_flag in range(48, 57, 2):
                cfg = get_cfg_path(model_flag)
            else:
                cfg = None
            _detector = YOLOrDetector(weights=weight,
                                      cfg=cfg,
                                      use_onnx=onnx,
                                      use_cuda=cuda)

        elif model_flag in range(58, 72):
            from asone.detectors.yolox import YOLOxDetector
            # Get exp file and corresponding model for pytorch only
            if model_flag in range(58, 71, 2):
                exp, model_name = get_exp__name(model_flag)
            else:
                exp = model_name = None
            _detector = YOLOxDetector(model_name=model_name,
                                      exp_file=exp,
                                      weights=weight,
                                      use_onnx=onnx,
                                      use_cuda=cuda)
        elif model_flag in range(72, 82) or model_flag in range(139, 144):
            from asone.detectors.yolov8 import YOLOv8Detector
            # Get exp file and corresponding model for pytorch only
            _detector = YOLOv8Detector(weights=weight,
                                       use_onnx=onnx,
                                       mlmodel=mlmodel,
                                       use_cuda=cuda)
        # Get TextDetector model
        elif model_flag  in range(82, 85):
            from asone.detectors.easyocr_detector import TextDetector
            _detector = TextDetector(detect_network=weight, use_cuda=cuda)

        elif model_flag in range(160, 163):
            from asone.detectors.yolonas import YOLOnasDetector
            # Get exp file and corresponding model for coreml only
            _detector = YOLOnasDetector(
                                    model_flag,
                                    weights=weight,
                                    use_onnx=onnx,
                                    use_cuda=cuda,
                                    num_classes=num_classes)
        
        elif model_flag in range(164, 170):
            from asone.detectors.yolov9 import YOLOv9Detector
            # Get exp file and corresponding model for pytorch only
            _detector = YOLOv9Detector(
                                    weights=weight,
                                    use_onnx=onnx,
                                    use_cuda=cuda)
            
        return _detector

    def get_detector(self):
        return self.model

    def detect(self,
               image: list,
               return_image=False,
               **kwargs: dict):
        return self.model.detect(image,return_image,**kwargs)


if __name__ == '__main__':

    # Initialize YOLOv6 object detector
    model_type = 56
    result = Detector(model_flag=model_type, use_cuda=True)
    img = cv2.imread('asone/asone-linux/test.jpeg')
    pred = result.get_detector(img)
    print(pred)


================================================
FILE: asone/detectors/easyocr_detector/__init__.py
================================================
from .text_detector import TextDetector
__all__ = ['TextDetector']

================================================
FILE: asone/detectors/easyocr_detector/text_detector.py
================================================
import easyocr
import numpy as np


class TextDetector:
    def __init__(self, detect_network, languages: list = ['en'], use_cuda=True):
        self.use_cuda = use_cuda
        self.detect_network = detect_network
        self.reader = easyocr.Reader(languages, detect_network=self.detect_network ,gpu=self.use_cuda)
        
    def detect(self, image: list,  freelist: bool=False, return_image=False, **config) -> list:
        """_summary_
        Args:
            image : Image 
            languages (list, optional): List of languages. Defaults to ['en'].
        Returns:
            list: numpy array of extracted text and img info(heigh, width)
        """
        
        h, w = image.shape[0:2]
        horizontal_list, free_list = self.reader.detect(image) 

        if horizontal_list[0] == [] and free_list[0] == []:
            if return_image:
                return horizontal_list, image
            return np.empty((0, 6)), {'width': w, 'height': h}
        
        if freelist:
            return horizontal_list, free_list, {'width': w, 'height': h}
        
        x_list = []
        y_list = []
        new_points = []
        if free_list[0] != []:
            bbox_list = np.array(free_list[0]).astype(int)
            xmin= bbox_list[:, :, 0].min(axis=1, keepdims=True)
            xmax= bbox_list[:, :, 0].max(axis=1, keepdims=True)
            ymin= bbox_list[:, :, 1].min(axis=1, keepdims=True)
            ymax= bbox_list[:, :, 1].max(axis=1, keepdims=True)
            new_points = np.hstack((xmin, xmax, ymin,  ymax)).tolist()

        if len(horizontal_list[0]) < 1:
            horizontal_list = [new_points]
        else:
            horizontal_list = [horizontal_list[0] + new_points]

        horizontal_list = np.array(horizontal_list[0])
        horizontal_list[:, [1, 2]] = horizontal_list[:, [2, 1]]
        horizontal_list = np.hstack((horizontal_list, np.array([[0.7, 80]]*len(horizontal_list))))
        
        if return_image:
            return horizontal_list, image
            
        return  horizontal_list, {'width': w, 'height': h}


================================================
FILE: asone/detectors/utils/__init__.py
================================================


================================================
FILE: asone/detectors/utils/cfg_path.py
================================================
import os

cfg_dir = os.path.dirname(os.path.dirname(__file__))

configuration = {'0': os.path.join(cfg_dir, 'yolor','cfg','yolor_csp_x.cfg'),
                 '1': os.path.join(cfg_dir, 'yolor','cfg','yolor_csp.cfg'),
                 '2': os.path.join(cfg_dir, 'yolor','cfg','yolor_p6.cfg')}

def get_cfg_path(model_flag):
    if model_flag in [48,50]:
        cfg = configuration['0']
    if model_flag in [52,54]:
        cfg = configuration['1']
    if model_flag == 56:
        cfg = configuration['2']
    return cfg
    
    

================================================
FILE: asone/detectors/utils/coreml_utils.py
================================================
import numpy as np


def yolo_to_xyxy(bboxes, img_size):
    w, h = img_size
    
    bboxes = bboxes[:, 0:]
    bboxes[:, 0] = bboxes[:, 0]*w
    bboxes[:, 1] = bboxes[:, 1]*h
    bboxes[:, 2] = bboxes[:, 2]*w
    bboxes[:, 3] = bboxes[:, 3]*h

    bboxes[:, 0] = bboxes[:, 0] - bboxes[:, 2]/2
    bboxes[:, 1] = bboxes[:, 1] - bboxes[:, 3]/2
    bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2]
    bboxes[:, 3] = bboxes[:, 1] + bboxes[:, 3]
    
    return bboxes.astype(int)

def generalize_output_format(bboxes, confidence_list, conf_thres):
    """_summary_

    Args:
        bboxes : Bounding boxes in xyxy format
        confidence_list : List containing confidence score of each class
        conf_thres : confidence_threshold

    Returns:
        np.array: Array of format [Xmin, Ymin, Xmax, Ymax, confidence, class_id]
    """
    
    class_ids = np.argmax(confidence_list, axis=1)
    conf_scr = []
    output = []
    for i, confidence in enumerate(confidence_list):
        if conf_thres < confidence[class_ids[i]]:
            conf_scr = confidence[class_ids[i]]
            res = np.append(np.append(bboxes[i], conf_scr), class_ids[i])
            output.append(res)
    return np.array(output)

def scale_bboxes(bboxes, org_img_shape, resized_img_shape):
    # Rescaling Bounding Boxes 
    bboxes[:, :4] /= np.array([resized_img_shape[1], resized_img_shape[0], resized_img_shape[1], resized_img_shape[0]])
    bboxes[:, :4] *= np.array([org_img_shape[1], org_img_shape[0], org_img_shape[1], org_img_shape[0]])  

    return bboxes

================================================
FILE: asone/detectors/utils/exp_name.py
================================================
import os

exp_dir = os.path.dirname(os.path.dirname(__file__))

exp_file_name = {'58': (os.path.join(exp_dir, 'yolox','exps','yolox_l.py'),'yolox-l'),
                 '60': (os.path.join(exp_dir, 'yolox','exps','yolox_nano.py'),'yolox-nano'),
                 '62': (os.path.join(exp_dir, 'yolox','exps','yolox_tiny'),'yolox-tiny'),
                 '64': (os.path.join(exp_dir, 'yolox','exps','yolov3.py'),'yolox-darknet'),
                 '66': (os.path.join(exp_dir, 'yolox','exps','yolox_s.py'),'yolox-s'),
                 '68': (os.path.join(exp_dir, 'yolox','exps','yolox_m.py'),'yolox-m'),
                 '70': (os.path.join(exp_dir, 'yolox','exps','yolox_x.py'),'yolox-x')
                }


def get_exp__name(model_flag):
    
    if model_flag == 58:
        exp, model_name = exp_file_name['58'][0], exp_file_name['58'][1]
    elif model_flag == 60:
        exp, model_name = exp_file_name['60'][0], exp_file_name['60'][1]
    elif model_flag == 62:
        exp, model_name = exp_file_name['62'][0], exp_file_name['62'][1]
    elif model_flag == 64:
        exp, model_name = exp_file_name['64'][0], exp_file_name['64'][1]
    elif model_flag == 66:
        exp, model_name = exp_file_name['66'][0], exp_file_name['66'][1]
    elif model_flag == 68:
        exp, model_name = exp_file_name['68'][0], exp_file_name['68'][1]      
    elif model_flag == 70:
        exp, model_name = exp_file_name['70'][0], exp_file_name['70'][1]  

    return exp, model_name  

================================================
FILE: asone/detectors/utils/weights_path.py
================================================
import os

weights = { '0': os.path.join('yolov5','weights','yolov5x6.pt'),
            '1': os.path.join('yolov5','weights','yolov5x6.onnx'),
            '2': os.path.join('yolov5','weights','yolov5s.pt'),
            '3': os.path.join('yolov5','weights','yolov5s.onnx'),
            '4': os.path.join('yolov5','weights','yolov5n.pt'),
            '5': os.path.join('yolov5','weights','yolov5n.onnx'),
            '6': os.path.join('yolov5','weights','yolov5m.pt'),
            '7': os.path.join('yolov5','weights','yolov5m.onnx'),
            '8': os.path.join('yolov5','weights','yolov5l.pt'),
            '9': os.path.join('yolov5','weights','yolov5l.onnx'),
            '10': os.path.join('yolov5','weights','yolov5x.pt'),
            '11': os.path.join('yolov5','weights','yolov5x.onnx'),
            '12': os.path.join('yolov5','weights','yolov5n6.pt'),
            '13': os.path.join('yolov5','weights','yolov5n6.onnx'),
            '14': os.path.join('yolov5','weights','yolov5s6.pt'),
            '15': os.path.join('yolov5','weights','yolov5s6.onnx'),
            '16': os.path.join('yolov5','weights','yolov5m6.pt'),
            '17': os.path.join('yolov5','weights','yolov5m6.onnx'),
            '18': os.path.join('yolov5','weights','yolov5l6.pt'),
            '19': os.path.join('yolov5','weights','yolov5l6.onnx'),
            
            '120': os.path.join('yolov5','weights','yolov5n.mlmodel'),
            '121': os.path.join('yolov5','weights','yolov5s.mlmodel'),
            '122': os.path.join('yolov5','weights','yolov5x6.mlmodel'),
            '123': os.path.join('yolov5','weights','yolov5m.mlmodel'),
            '124': os.path.join('yolov5','weights','yolov5l.mlmodel'),
            '125': os.path.join('yolov5','weights','yolov5x.mlmodel'),
            '126': os.path.join('yolov5','weights','yolov5n6.mlmodel'),
            '127': os.path.join('yolov5','weights','yolov5s6.mlmodel'),
            '128': os.path.join('yolov5','weights','yolov5m6.mlmodel'),
            '129': os.path.join('yolov5','weights','yolov5l6.mlmodel'),
            
            # YOLOv6
            '20': os.path.join('yolov6','weights','yolov6n.pt'),
            '21': os.path.join('yolov6','weights','yolov6n.onnx'),
            '22': os.path.join('yolov6','weights','yolov6t.pt'),
            '23': os.path.join('yolov6','weights','yolov6t.onnx'),
            '24': os.path.join('yolov6','weights','yolov6s.pt'),
            '25': os.path.join('yolov6','weights','yolov6s.onnx'),
            '26': os.path.join('yolov6','weights','yolov6m.pt'),
            '27': os.path.join('yolov6','weights','yolov6m.onnx'),
            '28': os.path.join('yolov6','weights','yolov6l.pt'),
            '29': os.path.join('yolov6','weights','yolov6l.onnx'),
            '30': os.path.join('yolov6','weights','yolov6l_relu.pt'),
            '31': os.path.join('yolov6','weights','yolov6l_relu.onnx'),
            '32': os.path.join('yolov6','weights','yolov6s_repopt.pt'),
            '33': os.path.join('yolov6','weights','yolov6s_repopt.onnx'),
            # YOLOv7
            '34': os.path.join('yolov7','weights','yolov7-tiny.pt'),
            '35': os.path.join('yolov7','weights','yolov7-tiny.onnx'),
            '36': os.path.join('yolov7','weights','yolov7.pt'),
            '37': os.path.join('yolov7','weights','yolov7.onnx'),
            '38': os.path.join('yolov7','weights','yolov7x.pt'),
            '39': os.path.join('yolov7','weights','yolov7x.onnx'),
            '40': os.path.join('yolov7','weights','yolov7-w6.pt'),
            '41': os.path.join('yolov7','weights','yolov7-w6.onnx'),
            '42': os.path.join('yolov7','weights','yolov7-e6.pt'),
            '43': os.path.join('yolov7','weights','yolov7-e6.onnx'),
            '44': os.path.join('yolov7','weights','yolov7-d6.pt'),
            '45': os.path.join('yolov7','weights','yolov7-d6.onnx'),
            '46': os.path.join('yolov7','weights','yolov7-e6e.pt'),
            '47': os.path.join('yolov7','weights','yolov7-e6e.onnx'),
            
            '130': os.path.join('yolov7','weights','yolov7-tiny.mlmodel'),
            '131': os.path.join('yolov7','weights','yolov7.mlmodel'),
            '132': os.path.join('yolov7','weights','yolov7x.mlmodel'),
            '133': os.path.join('yolov7','weights','yolov7-w6.mlmodel'),
            '134': os.path.join('yolov7','weights','yolov7-e6.mlmodel'),
            '135': os.path.join('yolov7','weights','yolov7-d6.mlmodel'),
            '136': os.path.join('yolov7','weights','yolov7-e6e.mlmodel'),
            # YOLOR
            '48': os.path.join('yolor','weights','yolor_csp_x.pt'),
            '49': os.path.join('yolor','weights','yolor_csp_x.onnx'),
            '50': os.path.join('yolor','weights','yolor_csp_x_star.pt'),
            '51': os.path.join('yolor','weights','yolor_csp_x_star.onnx'),
            '52': os.path.join('yolor','weights','yolor_csp_star.pt'),
            '53': os.path.join('yolor','weights','yolor_csp_star.onnx'),
            '54': os.path.join('yolor','weights','yolor_csp.pt'),
            '55': os.path.join('yolor','weights','yolor_csp.onnx'),
            '56': os.path.join('yolor','weights','yolor_p6.pt'),
            '57': os.path.join('yolor','weights','yolor_p6.onnx'),
            # YOLOX
            '58': os.path.join('yolox','weights','yolox_l.pth'),
            '59': os.path.join('yolox','weights','yolox_l.onnx'),
            '60': os.path.join('yolox','weights','yolox_nano.pth'),
            '61': os.path.join('yolox','weights','yolox_nano.onnx'),
            '62': os.path.join('yolox','weights','yolox_tiny.pth'),
            '63': os.path.join('yolox','weights','yolox_tiny.onnx'),
            '64': os.path.join('yolox','weights','yolox_darknet.pth'),
            '65': os.path.join('yolox','weights','yolox_darknet.onnx'),
            '66': os.path.join('yolox','weights','yolox_s.pth'),
            '67': os.path.join('yolox','weights','yolox_s.onnx'),
            '68': os.path.join('yolox','weights','yolox_m.pth'),
            '69': os.path.join('yolox','weights','yolox_m.onnx'),
            '70': os.path.join('yolox','weights','yolox_x.pth'),
            '71': os.path.join('yolox','weights','yolox_x.onnx'),
            # YOLOv8
            '72': os.path.join('yolov8','weights','yolov8n.pt'),
            '73': os.path.join('yolov8','weights','yolov8n.onnx'),
            '74': os.path.join('yolov8','weights','yolov8s.pt'),
            '75': os.path.join('yolov8','weights','yolov8s.onnx'),
            '76': os.path.join('yolov8','weights','yolov8m.pt'),
            '77': os.path.join('yolov8','weights','yolov8m.onnx'),
            '78': os.path.join('yolov8','weights','yolov8l.pt'),
            '79': os.path.join('yolov8','weights','yolov8l.onnx'),
            '80': os.path.join('yolov8','weights','yolov8x.pt'),
            '81': os.path.join('yolov8','weights','yolov8x.onnx'),
            '139': os.path.join('yolov8','weights','yolov8n.mlmodel'),
            '140': os.path.join('yolov8','weights','yolov8s.mlmodel'),
            '141': os.path.join('yolov8','weights','yolov8m.mlmodel'),
            '142': os.path.join('yolov8','weights','yolov8l.mlmodel'),
            '143': os.path.join('yolov8','weights','yolov8x.mlmodel'),
            
            # Text Detectors
            '82': 'craft',
            '83': 'dbnet18',
            # YOLONAS_S_PYTORCH
            # YOLO NAS
            '160':os.path.join('yolonas','weights','yolo_nas_s.pth'),
            '161':os.path.join('yolonas','weights','yolo_nas_m.pth'),
            '162':os.path.join('yolonas','weights','yolo_nas_l.pth'),
            
            # YOLOv9
            '164':os.path.join('yolov9','weights','yolov9-c-converted.pt'),
            '165':os.path.join('yolov9','weights','yolov9-e-converted.pt'),
            '166':os.path.join('yolov9','weights','yolov9-c.pt'),
            '167':os.path.join('yolov9','weights','yolov9-e.pt'),
            '168':os.path.join('yolov9','weights','gelan-c.pt'),
            '169':os.path.join('yolov9','weights','gelan-e.pt'),
            
            
            # Segmentor
            '171':os.path.join('sam','weights','sam_vit_h_4b8939.pth'),
}

def get_weight_path(model_flag):
    coreml= False
    if model_flag in range(0, 20):
        onnx = False if (model_flag % 2 == 0) else True
        weight = weights[str(model_flag)]
    elif model_flag in range(20, 34):
        onnx = False if (model_flag % 2 == 0) else True
        weight = weights[str(model_flag)]
    elif model_flag in range(34, 48):
        onnx = False if (model_flag % 2 == 0) else True
        weight = weights[str(model_flag)]
    elif model_flag in range(48, 58):
        onnx = False if (model_flag % 2 == 0) else True
        weight = weights[str(model_flag)]
    elif model_flag in range(58, 72):
        onnx = False if (model_flag % 2 == 0) else True
        weight = weights[str(model_flag)]
    elif model_flag in range(72, 82):
        onnx = False if (model_flag % 2 == 0) else True
        weight = weights[str(model_flag)]
    elif model_flag in range(82, 85):
        onnx = False
        weight = weights[str(model_flag)]
        
    elif model_flag in range(120, 130):
        weight = weights[str(model_flag)]
        onnx=False
        coreml = True
    elif model_flag in range(130, 137):
        weight = weights[str(model_flag)]
        onnx=False
        coreml = True    
    elif model_flag in range(139, 145):
        weight = weights[str(model_flag)]
        onnx=False
        coreml = True
    elif model_flag in range(160, 163):
        weight = weights[str(model_flag)]
        onnx=False
        coreml = True
    elif model_flag in range(164, 170):
        onnx = False
        weight = weights[str(model_flag)]

    return coreml, onnx, weight
        


================================================
FILE: asone/detectors/yolonas/__init__.py
================================================
from .yolonas import YOLOnasDetector
__all__ = ['YOLOnasDetector']

================================================
FILE: asone/detectors/yolonas/yolonas.py
================================================
import os
from asone.utils import get_names
import numpy as np
import warnings
import torch
import onnxruntime
from asone import utils
import super_gradients
import numpy as np
from super_gradients.training.processing import DetectionCenterPadding, StandardizeImage, NormalizeImage, ImagePermute, ComposeProcessing, DetectionLongestMaxSizeRescale
from super_gradients.training import models
from super_gradients.common.object_names import Models

from asone.utils.utils import PathResolver


class_names = [""]


class YOLOnasDetector:
    def __init__(self,
                 model_flag,
                 weights=None,
                 cfg=None,
                 use_onnx=True,
                 use_cuda=True,
                #  checkpoint_num_classes=80,
                 num_classes=80
                 ):
        
        
        self.model_flag = model_flag
        # self.checkpoint_num_classes = checkpoint_num_classes
        if not os.path.exists(weights):
            utils.download_weights(weights)
        
        self.num_classes = num_classes
        self.device = 'cuda' if use_cuda and torch.cuda.is_available() else 'cpu'
        self.use_onnx = use_onnx

        with PathResolver():
            # Load Model
            self.model = self.load_model(weights=weights)

    def load_model(self, weights):
        
            # model = super_gradients.training.models.get(name, 
            #             checkpoint_path=weights, 
            #             checkpoint_num_classes=self.checkpoint_num_classes,
            #             num_classes=self.num_classes).to(self.device)
    
        if self.model_flag == 160: 
            model = models.get(Models.YOLO_NAS_S,
                    checkpoint_path=weights,
                    num_classes=self.num_classes).to(self.device)
        elif self.model_flag == 161:
            model = models.get(Models.YOLO_NAS_M,
                    checkpoint_path=weights,
                    num_classes=self.num_classes).to(self.device)
        elif self.model_flag == 162:
            model = models.get(Models.YOLO_NAS_L,
                    checkpoint_path=weights,
                    num_classes=self.num_classes).to(self.device)
        return model
    
    def detect(self, image: list,
               input_shape: tuple = (640, 640),
               conf_thres: float = 0.25,
               iou_thres: float = 0.45,
               max_det: int = 1000,
               filter_classes: bool = None,
               agnostic_nms: bool = True,
               with_p6: bool = False,
               return_image=False) -> list:

        if self.num_classes==80:
            self.model.set_dataset_processing_params(class_names=class_names,
            image_processor=ComposeProcessing(
                    [
                        DetectionLongestMaxSizeRescale(output_shape=(636, 636)),
                        DetectionCenterPadding(output_shape=(640, 640), pad_value=114),
                        StandardizeImage(max_value=255.0),
                        ImagePermute(permutation=(2, 0, 1)),
                    ]
                ),
            iou=iou_thres,conf=conf_thres,
            )
        original_image = image
        # Inference
        if self.use_onnx:
            pass
            
        else:

            detections = self.model.predict(image)
            image_info = {
                'width': original_image.shape[1],
                'height': original_image.shape[0],
            }
            detections = list(detections)    
            pred = detections[0].prediction
            bboxes_xyxy = pred.bboxes_xyxy
            confidence = pred.confidence
            labels = pred.labels

            confidence = confidence.reshape(-1,1)
            labels = labels.reshape(-1,1)
            arr = np.append(bboxes_xyxy, confidence, axis=1)
            predictions = np.append(arr, labels, axis=1)
       

        if return_image:
            return predictions, original_image
        else: 
            return predictions, image_info
        


================================================
FILE: asone/detectors/yolor/__init__.py
================================================
from .yolor_detector import YOLOrDetector
__all__ = ['YOLOrDetector']

================================================
FILE: asone/detectors/yolor/cfg/yolor_csp.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=512
height=512
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1

#cutmix=1
mosaic=1


# ============ Backbone ============ #

# Stem 

# 0
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=silu

# P1

# Downsample

[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu

# 4 (previous+1+3k)
[shortcut]
from=-3
activation=linear

# P2

# Downsample

[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

# Merge [-1, -(3k+4)]

[route]
layers = -1,-10

# Transition last

# 17 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

# P3

# Downsample

[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

# Merge [-1 -(4+3k)]

[route]
layers = -1,-28

# Transition last

# 48 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# P4

# Downsample

[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Merge [-1 -(3k+4)]

[route]
layers = -1,-28

# Transition last

# 79 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

# P5

# Downsample

[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

# Merge [-1 -(3k+4)]

[route]
layers = -1,-16

# Transition last

# 98 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=silu

# ============ End of Backbone ============ #

# ============ Neck ============ #

# CSPSPP

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=silu

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

### SPP ###
[maxpool]
stride=1
size=5

[route]
layers=-2

[maxpool]
stride=1
size=9

[route]
layers=-4

[maxpool]
stride=1
size=13

[route]
layers=-1,-3,-5,-6
### End SPP ###

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=silu

[route]
layers = -1, -13

# 113 (previous+6+5+2k)
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

# End of CSPSPP


# FPN-4

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 79

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -6

# Transition last

# 127 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu


# FPN-3

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 48

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=128
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=128
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -6

# Transition last

# 141 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu


# PAN-4

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=silu

[route]
layers = -1, 127

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[route]
layers = -1,-6

# Transition last

# 152 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu


# PAN-5

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=512
activation=silu

[route]
layers = -1, 113

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=silu

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=silu

[route]
layers = -1,-6

# Transition last

# 163 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu

# ============ End of Neck ============ #

# 164
[implicit_add]
filters=256

# 165
[implicit_add]
filters=512

# 166
[implicit_add]
filters=1024

# 167
[implicit_mul]
filters=255

# 168
[implicit_mul]
filters=255

# 169
[implicit_mul]
filters=255

# ============ Head ============ #

# YOLO-3

[route]
layers = 141

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[shift_channels]
from=164

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=167

[yolo]
mask = 0,1,2
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-4

[route]
layers = 152

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=silu

[shift_channels]
from=165

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=168

[yolo]
mask = 3,4,5
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-5

[route]
layers = 163

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=silu

[shift_channels]
from=166

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=169

[yolo]
mask = 6,7,8
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


================================================
FILE: asone/detectors/yolor/cfg/yolor_csp_x.cfg
================================================
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=8
width=512
height=512
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1

#cutmix=1
mosaic=1


# ============ Backbone ============ #

# Stem 

# 0
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=silu

# P1

# Downsample

[convolutional]
batch_normalize=1
filters=80
size=3
stride=2
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=40
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
pad=1
activation=silu

# 4 (previous+1+3k)
[shortcut]
from=-3
activation=linear

# P2

# Downsample

[convolutional]
batch_normalize=1
filters=160
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=80
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=80
size=1
stride=1
pad=1
activation=silu

# Merge [-1, -(3k+4)]

[route]
layers = -1,-13

# Transition last

# 20 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

# P3

# Downsample

[convolutional]
batch_normalize=1
filters=320
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

# Merge [-1 -(4+3k)]

[route]
layers = -1,-34

# Transition last

# 57 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# P4

# Downsample

[convolutional]
batch_normalize=1
filters=640
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# Merge [-1 -(3k+4)]

[route]
layers = -1,-34

# Transition last

# 94 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# P5

# Downsample

[convolutional]
batch_normalize=1
filters=1280
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# Merge [-1 -(3k+4)]

[route]
layers = -1,-19

# Transition last

# 116 (previous+7+3k)
[convolutional]
batch_normalize=1
filters=1280
size=1
stride=1
pad=1
activation=silu

# ============ End of Backbone ============ #

# ============ Neck ============ #

# CSPSPP

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

### SPP ###
[maxpool]
stride=1
size=5

[route]
layers=-2

[maxpool]
stride=1
size=9

[route]
layers=-4

[maxpool]
stride=1
size=13

[route]
layers=-1,-3,-5,-6
### End SPP ###

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[route]
layers = -1, -15

# 133 (previous+6+5+2k)
[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# End of CSPSPP


# FPN-4

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 94

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -8

# Transition last

# 149 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu


# FPN-3

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 57

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=160
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=160
activation=silu

[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=160
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -8

# Transition last

# 165 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=160
size=1
stride=1
pad=1
activation=silu


# PAN-4

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=320
activation=silu

[route]
layers = -1, 149

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[route]
layers = -1,-8

# Transition last

# 178 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu


# PAN-5

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=640
activation=silu

[route]
layers = -1, 133

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[route]
layers = -1,-8

# Transition last

# 191 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# ============ End of Neck ============ #

# 192
[implicit_add]
filters=320

# 193
[implicit_add]
filters=640

# 194
[implicit_add]
filters=1280

# 195
[implicit_mul]
filters=255

# 196
[implicit_mul]
filters=255

# 197
[implicit_mul]
filters=255

# ============ Head ============ #

# YOLO-3

[route]
layers = 165

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[shift_channels]
from=192

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=195

[yolo]
mask = 0,1,2
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-4

[route]
layers = 178

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[shift_channels]
from=193

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=196

[yolo]
mask = 3,4,5
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-5

[route]
layers = 191

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1280
activation=silu

[shift_channels]
from=194

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=197

[yolo]
mask = 6,7,8
anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
classes=80
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


================================================
FILE: asone/detectors/yolor/cfg/yolor_p6.cfg
================================================
[net]
batch=64
subdivisions=8
width=1280
height=1280
channels=3
momentum=0.949
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.00261
burn_in=1000
max_batches = 500500
policy=steps
steps=400000,450000
scales=.1,.1

mosaic=1


# ============ Backbone ============ #

# Stem 

# P1

# Downsample

# 0
[reorg]

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu


# P2

# Downsample

[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first
#
#[convolutional]
#batch_normalize=1
#filters=64
#size=1
#stride=1
#pad=1
#activation=silu

# Merge [-1, -(3k+3)]

[route]
layers = -1,-12

# Transition last

# 16 (previous+6+3k)
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu


# P3

# Downsample

[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first
#
#[convolutional]
#batch_normalize=1
#filters=128
#size=1
#stride=1
#pad=1
#activation=silu

# Merge [-1, -(3k+3)]

[route]
layers = -1,-24

# Transition last

# 43 (previous+6+3k)
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu


# P4

# Downsample

[convolutional]
batch_normalize=1
filters=384
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first
#
#[convolutional]
#batch_normalize=1
#filters=192
#size=1
#stride=1
#pad=1
#activation=silu

# Merge [-1, -(3k+3)]

[route]
layers = -1,-24

# Transition last

# 70 (previous+6+3k)
[convolutional]
batch_normalize=1
filters=384
size=1
stride=1
pad=1
activation=silu


# P5

# Downsample

[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first
#
#[convolutional]
#batch_normalize=1
#filters=256
#size=1
#stride=1
#pad=1
#activation=silu

# Merge [-1, -(3k+3)]

[route]
layers = -1,-12

# Transition last

# 85 (previous+6+3k)
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=silu


# P6

# Downsample

[convolutional]
batch_normalize=1
filters=640
size=3
stride=2
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# Residual Block

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=3
stride=1
pad=1
activation=silu

[shortcut]
from=-3
activation=linear

# Transition first
#
#[convolutional]
#batch_normalize=1
#filters=320
#size=1
#stride=1
#pad=1
#activation=silu

# Merge [-1, -(3k+3)]

[route]
layers = -1,-12

# Transition last

# 100 (previous+6+3k)
[convolutional]
batch_normalize=1
filters=640
size=1
stride=1
pad=1
activation=silu

# ============ End of Backbone ============ #

# ============ Neck ============ #

# CSPSPP

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

### SPP ###
[maxpool]
stride=1
size=5

[route]
layers=-2

[maxpool]
stride=1
size=9

[route]
layers=-4

[maxpool]
stride=1
size=13

[route]
layers=-1,-3,-5,-6
### End SPP ###

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[route]
layers = -1, -13

# 115 (previous+6+5+2k)
[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# End of CSPSPP


# FPN-5

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 85

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -8

# Transition last

# 131 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu


# FPN-4

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 70

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=192
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=192
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=192
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -8

# Transition last

# 147 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu


# FPN-3

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[upsample]
stride=2

[route]
layers = 43

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[route]
layers = -1, -3

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=128
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=128
activation=silu

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=128
activation=silu

# Merge [-1, -(2k+2)]

[route]
layers = -1, -8

# Transition last

# 163 (previous+6+4+2k)
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=silu


# PAN-4

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=192
activation=silu

[route]
layers = -1, 147

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=192
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=192
activation=silu

[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=192
activation=silu

[route]
layers = -1,-8

# Transition last

# 176 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=192
size=1
stride=1
pad=1
activation=silu


# PAN-5

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=silu

[route]
layers = -1, 131

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[route]
layers = -1,-8

# Transition last

# 189 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=silu


# PAN-6

[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=320
activation=silu

[route]
layers = -1, 115

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# Split

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[route]
layers = -2

# Plain Block

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=320
activation=silu

[route]
layers = -1,-8

# Transition last

# 202 (previous+3+4+2k)
[convolutional]
batch_normalize=1
filters=320
size=1
stride=1
pad=1
activation=silu

# ============ End of Neck ============ #

# 203
[implicit_add]
filters=256

# 204
[implicit_add]
filters=384

# 205
[implicit_add]
filters=512

# 206
[implicit_add]
filters=640

# 207
[implicit_mul]
filters=255

# 208
[implicit_mul]
filters=255

# 209
[implicit_mul]
filters=255

# 210
[implicit_mul]
filters=255

# ============ Head ============ #

# YOLO-3

[route]
layers = 163

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=silu

[shift_channels]
from=203

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=207

[yolo]
mask = 0,1,2
anchors = 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-4

[route]
layers = 176

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=384
activation=silu

[shift_channels]
from=204

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=208

[yolo]
mask = 3,4,5
anchors = 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-5

[route]
layers = 189

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=silu

[shift_channels]
from=205

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=209

[yolo]
mask = 6,7,8
anchors = 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6


# YOLO-6

[route]
layers = 202

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=640
activation=silu

[shift_channels]
from=206

[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

[control_channels]
from=210

[yolo]
mask = 9,10,11
anchors = 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792
classes=80
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6

# ============ End of Head ============ #


================================================
FILE: asone/detectors/yolor/models/__init__.py
================================================



================================================
FILE: asone/detectors/yolor/models/common.py
================================================
# This file contains modules common to various models

import math

import numpy as np
import torch
import torch.nn as nn
from PIL import Image, ImageDraw

from asone.detectors.yolor.utils.datasets import letterbox
from asone.detectors.yolor.utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
from asone.detectors.yolor.utils.plots import color_list

try:
    from pytorch_wavelets import DWTForward, DWTInverse

    class DWT(nn.Module):
        def __init__(self):
            super(DWT, self).__init__()
            self.xfm = DWTForward(J=1, wave='db1', mode='zero')

        def forward(self, x):
            b,c,w,h = x.shape
            yl, yh = self.xfm(x)
            return torch.cat([yl/2., yh[0].view(b,-1,w//2,h//2)/2.+.5], 1)
except:

    class DWT(nn.Module): # use ReOrg instead
        def __init__(self):
            super(DWT, self).__init__()

        def forward(self, x):
            return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)


class ImplicitA(nn.Module):
    def __init__(self, channel):
        super(ImplicitA, self).__init__()
        self.channel = channel
        self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1))
        nn.init.normal_(self.implicit, std=.02)

    def forward(self, x):
        return self.implicit.expand_as(x) + x


class ImplicitM(nn.Module):
    def __init__(self, channel):
        super(ImplicitM, self).__init__()
        self.channel = channel
        self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1))
        nn.init.normal_(self.implicit, mean=1., std=.02)

    def forward(self, x):
        return self.implicit.expand_as(x) * x
    
    
class ReOrg(nn.Module):
    def __init__(self):
        super(ReOrg, self).__init__()

    def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
        return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)

def autopad(k, p=None):  # kernel, padding
    # Pad to 'same'
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p


def DWConv(c1, c2, k=1, s=1, act=True):
    # Depthwise convolution
    return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)


class Conv(nn.Module):
    # Standard convolution
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
        super(Conv, self).__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        self.act = nn.SiLU() if act else nn.Identity()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

    def fuseforward(self, x):
        return self.act(self.conv(x))


class ConvSig(nn.Module):
    # Standard convolution
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
        super(ConvSig, self).__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
        self.act = nn.Sigmoid() if act else nn.Identity()

    def forward(self, x):
        return self.act(self.conv(x))

    def fuseforward(self, x):
        return self.act(self.conv(x))


class ConvSqu(nn.Module):
    # Standard convolution
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
        super(ConvSqu, self).__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
        self.act = nn.SiLU() if act else nn.Identity()

    def forward(self, x):
        return self.act(self.conv(x))

    def fuseforward(self, x):
        return self.act(self.conv(x))


class Bottleneck(nn.Module):
    # Standard bottleneck
    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, shortcut, groups, expansion
        super(Bottleneck, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c2, 3, 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


class BottleneckG(nn.Module):
    # Standard bottleneck
    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, shortcut, groups, expansion
        super(BottleneckG, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1, g=g)
        self.cv2 = Conv(c_, c2, 3, 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


class BottleneckCSP(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSP, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSPF(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPF, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        #self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        y1 = self.m(self.cv1(x))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSPL(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPL, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        #self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.act(self.bn(torch.cat((y1, y2), dim=1)))


class BottleneckCSPLG(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=3, e=0.25):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPLG, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, g*c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(g*c_, g*c_, 1, 1, groups=g, bias=False)
        #self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d((1+g) * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[BottleneckG(g*c_, g*c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.act(self.bn(torch.cat((y1, y2), dim=1)))


class BottleneckCSPSE(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPSE, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.cs = ConvSqu(c1, c1//8, 1, 1)
        self.cvsig = ConvSig(c1//8, c1, 1, 1)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        x = x * self.cvsig(self.cs(self.avg_pool(x))).expand_as(x)
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSPSEA(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPSEA, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.cs = ConvSqu(c1, c1//8, 1, 1)
        self.cvsig = ConvSig(c1//8, c1, 1, 1)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        x = x + x * self.cvsig(self.cs(self.avg_pool(x))).expand_as(x)
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSPSAM(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPSAM, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cvsig = ConvSig(c1, c1, 1, 1)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        x = x * self.cvsig(x)
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSPSAMA(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPSAMA, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cvsig = ConvSig(c1, c1, 1, 1)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        x = x + x * self.cvsig(x)
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSPSAMB(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPSAMB, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cvsig = ConvSig(c2, c2, 1, 1)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        y = self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
        return y * self.cvsig(y)


class BottleneckCSPGC(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPGC, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
                     
        self.channel_add_conv = nn.Sequential(
            nn.Conv2d(c2, c2, kernel_size=1),
            nn.LayerNorm([c2, 1, 1]),
            nn.ReLU(inplace=True),  # yapf: disable
            nn.Conv2d(c2, c2, kernel_size=1))
        
        self.conv_mask = nn.Conv2d(c2, 1, kernel_size=1)
        self.softmax = nn.Softmax(dim=2)
        
    def spatial_pool(self, x):
        
        batch, channel, height, width = x.size()
        
        input_x = x        
        # [N, C, H * W]
        input_x = input_x.view(batch, channel, height * width)
        # [N, 1, C, H * W]
        input_x = input_x.unsqueeze(1)
        # [N, 1, H, W]
        context_mask = self.conv_mask(x)
        # [N, 1, H * W]
        context_mask = context_mask.view(batch, 1, height * width)
        # [N, 1, H * W]
        context_mask = self.softmax(context_mask)
        # [N, 1, H * W, 1]
        context_mask = context_mask.unsqueeze(-1)
        # [N, 1, C, 1]
        context = torch.matmul(input_x, context_mask)
        # [N, C, 1, 1]
        context = context.view(batch, channel, 1, 1)

        return context

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        y = self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))

        return y + self.channel_add_conv(self.spatial_pool(y))


class BottleneckCSPDNL(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPDNL, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
        
        
        self.conv_query = nn.Conv2d(c2, c2, kernel_size=1)
        self.conv_key = nn.Conv2d(c2, c2, kernel_size=1)        
        self.conv_value = nn.Conv2d(c2, c2, kernel_size=1, bias=False)
        self.conv_out = None        
        self.scale = math.sqrt(c2)
        self.temperature = 0.05        
        self.softmax = nn.Softmax(dim=2)        
        self.gamma = nn.Parameter(torch.zeros(1))        
        self.conv_mask = nn.Conv2d(c2, 1, kernel_size=1)

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        y = self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))

        # [N, C, T, H, W]
        residual = y        
        # [N, C, T, H', W']        
        input_x = y
        # [N, C', T, H, W]
        query = self.conv_query(y)        
        # [N, C', T, H', W']
        key = self.conv_key(input_x)
        value = self.conv_value(input_x)
        # [N, C', H x W]
        query = query.view(query.size(0), query.size(1), -1)        
        # [N, C', H' x W']
        key = key.view(key.size(0), key.size(1), -1)
        value = value.view(value.size(0), value.size(1), -1)        
        # channel whitening
        key_mean = key.mean(2).unsqueeze(2)
        query_mean = query.mean(2).unsqueeze(2)
        key -= key_mean
        query -= query_mean
        # [N, T x H x W, T x H' x W']
        sim_map = torch.bmm(query.transpose(1, 2), key)
        sim_map = sim_map/self.scale
        sim_map = sim_map/self.temperature
        sim_map = self.softmax(sim_map)
        # [N, T x H x W, C']
        out_sim = torch.bmm(sim_map, value.transpose(1, 2))        
        # [N, C', T x H x W]
        out_sim = out_sim.transpose(1, 2)        
        # [N, C', T,  H, W]
        out_sim = out_sim.view(out_sim.size(0), out_sim.size(1), *y.size()[2:]).contiguous()
        out_sim = self.gamma * out_sim        
        # [N, 1, H', W']
        mask = self.conv_mask(input_x)
        # [N, 1, H'x W']
        mask = mask.view(mask.size(0), mask.size(1), -1)
        mask = self.softmax(mask)
        # [N, C, 1, 1]
        out_gc = torch.bmm(value, mask.permute(0,2,1)).unsqueeze(-1).contiguous()

        return out_sim + out_gc + residual


class BottleneckCSP2(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSP2, self).__init__()
        c_ = int(c2)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv3 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_) 
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        x1 = self.cv1(x)
        y1 = self.m(x1)
        y2 = self.cv2(x1)
        return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class BottleneckCSP2SAM(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSP2SAM, self).__init__()
        c_ = int(c2)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cvsig = ConvSig(c_, c_, 1, 1)
        self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv3 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_) 
        self.act = nn.SiLU()
        self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

    def forward(self, x):
        x1 = self.cv1(x)
        x1 = x1 * self.cvsig(x1).contiguous()
        y1 = self.m(x1)
        y2 = self.cv2(x1)
        return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class VoVCSP(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(VoVCSP, self).__init__()
        c_ = int(c2)  # hidden channels
        self.cv1 = Conv(c1//2, c_//2, 3, 1)
        self.cv2 = Conv(c_//2, c_//2, 3, 1)
        self.cv3 = Conv(c_, c2, 1, 1)

    def forward(self, x):
        _, x1 = x.chunk(2, dim=1)
        x1 = self.cv1(x1)
        x2 = self.cv2(x1)
        return self.cv3(torch.cat((x1,x2), dim=1))


class SPP(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, k=(5, 9, 13)):
        super(SPP, self).__init__()
        c_ = c1 // 2  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])

    def forward(self, x):
        x = self.cv1(x)
        return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))


class SPPCSP(nn.Module):
    # CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
        super(SPPCSP, self).__init__()
        c_ = int(2 * c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = Conv(c_, c_, 3, 1)
        self.cv4 = Conv(c_, c_, 1, 1)
        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
        self.cv5 = Conv(4 * c_, c_, 1, 1)
        self.cv6 = Conv(c_, c_, 3, 1)
        self.bn = nn.BatchNorm2d(2 * c_) 
        self.act = nn.SiLU()
        self.cv7 = Conv(2 * c_, c2, 1, 1)

    def forward(self, x):
        x1 = self.cv4(self.cv3(self.cv1(x)))
        y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
        y2 = self.cv2(x)
        return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class Focus(nn.Module):
    # Focus wh information into c-space
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
        super(Focus, self).__init__()
        self.conv = Conv(c1 * 4, c2, k, s, p, g, act)

    def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
        return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))


class MP(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, k=2):
        super(MP, self).__init__()
        self.m = nn.MaxPool2d(kernel_size=k, stride=k)

    def forward(self, x):
        return self.m(x)


class DownD(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, n=1, k=2):
        super(DownD, self).__init__()
        c_ = int(c1)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c_, 3, k)
        self.cv3 = Conv(c_, c2, 1, 1)
        self.cv4 = Conv(c1, c2, 1, 1)
        self.ap = nn.AvgPool2d(kernel_size=k, stride=k)

    def forward(self, x):
        return self.cv3(self.cv2(self.cv1(x))) + self.cv4(self.ap(x))


class DownC(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, n=1, k=2):
        super(DownC, self).__init__()
        c_ = int(c1)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c2//2, 3, k)
        self.cv3 = Conv(c1, c2//2, 1, 1)
        self.mp = nn.MaxPool2d(kernel_size=k, stride=k)

    def forward(self, x):
        return torch.cat((self.cv2(self.cv1(x)), self.cv3(self.mp(x))), dim=1)


class DNL(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, k=3, s=1):
        super(DNL, self).__init__()
        c_ = int(c1)  # hidden channels
        
        # 
        self.conv_query = nn.Conv2d(c1, c_, kernel_size=1)
        self.conv_key = nn.Conv2d(c1, c_, kernel_size=1)
        
        self.conv_value = nn.Conv2d(c1, c1, kernel_size=1, bias=False)
        self.conv_out = None
        
        self.scale = math.sqrt(c_)
        self.temperature = 0.05
        
        self.softmax = nn.Softmax(dim=2)
        
        self.gamma = nn.Parameter(torch.zeros(1))
        
        self.conv_mask = nn.Conv2d(c1, 1, kernel_size=1)
                
        self.cv = Conv(c1, c2, k, s)

    def forward(self, x):

        # [N, C, T, H, W]
        residual = x
        
        # [N, C, T, H', W']        
        input_x = x

        # [N, C', T, H, W]
        query = self.conv_query(x)
        
        # [N, C', T, H', W']
        key = self.conv_key(input_x)
        value = self.conv_value(input_x)

        # [N, C', H x W]
        query = query.view(query.size(0), query.size(1), -1)
        
        # [N, C', H' x W']
        key = key.view(key.size(0), key.size(1), -1)
        value = value.view(value.size(0), value.size(1), -1)
        
        # channel whitening
        key_mean = key.mean(2).unsqueeze(2)
        query_mean = query.mean(2).unsqueeze(2)
        key -= key_mean
        query -= query_mean

        # [N, T x H x W, T x H' x W']
        sim_map = torch.bmm(query.transpose(1, 2), key)
        sim_map = sim_map/self.scale
        sim_map = sim_map/self.temperature
        sim_map = self.softmax(sim_map)

        # [N, T x H x W, C']
        out_sim = torch.bmm(sim_map, value.transpose(1, 2))
        
        # [N, C', T x H x W]
        out_sim = out_sim.transpose(1, 2)
        
        # [N, C', T,  H, W]
        out_sim = out_sim.view(out_sim.size(0), out_sim.size(1), *x.size()[2:])
        out_sim = self.gamma * out_sim
        
        # [N, 1, H', W']
        mask = self.conv_mask(input_x)
        # [N, 1, H'x W']
        mask = mask.view(mask.size(0), mask.size(1), -1)
        mask = self.softmax(mask)
        # [N, C, 1, 1]
        out_gc = torch.bmm(value, mask.permute(0,2,1)).unsqueeze(-1)
        out_sim = out_sim+out_gc

        return self.cv(out_sim + residual)


class GC(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, k=3, s=1):
        super(GC, self).__init__()
        c_ = int(c1)  # hidden channels
        
        #             
        self.channel_add_conv = nn.Sequential(
            nn.Conv2d(c1, c_, kernel_size=1),
            nn.LayerNorm([c_, 1, 1]),
            nn.ReLU(inplace=True),  # yapf: disable
            nn.Conv2d(c_, c1, kernel_size=1))
        
        self.conv_mask = nn.Conv2d(c_, 1, kernel_size=1)
        self.softmax = nn.Softmax(dim=2)
                
        self.cv = Conv(c1, c2, k, s)
        
        
    def spatial_pool(self, x):
        
        batch, channel, height, width = x.size()
        
        input_x = x        
        # [N, C, H * W]
        input_x = input_x.view(batch, channel, height * width)
        # [N, 1, C, H * W]
        input_x = input_x.unsqueeze(1)
        # [N, 1, H, W]
        context_mask = self.conv_mask(x)
        # [N, 1, H * W]
        context_mask = context_mask.view(batch, 1, height * width)
        # [N, 1, H * W]
        context_mask = self.softmax(context_mask)
        # [N, 1, H * W, 1]
        context_mask = context_mask.unsqueeze(-1)
        # [N, 1, C, 1]
        context = torch.matmul(input_x, context_mask)
        # [N, C, 1, 1]
        context = context.view(batch, channel, 1, 1)

        return context

    def forward(self, x):

        return self.cv(x + self.channel_add_conv(self.spatial_pool(x)))


class SAM(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, k=3, s=1):
        super(SAM, self).__init__()
        c_ = int(c1)  # hidden channels        
        self.cvsig = ConvSig(c1, c1, 1, 1)                
        self.cv = Conv(c1, c2, k, s)

    def forward(self, x):

        return self.cv(x * self.cvsig(x))


class SAMA(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, k=3, s=1):
        super(SAMA, self).__init__()
        c_ = int(c1)  # hidden channels        
        self.cvsig = ConvSig(c1, c1, 1, 1)                
        self.cv = Conv(c1, c2, k, s)

    def forward(self, x):

        return self.cv(x + x * self.cvsig(x))


class SAMB(nn.Module):
    # Spatial pyramid pooling layer used in YOLOv3-SPP
    def __init__(self, c1, c2, k=3, s=1):
        super(SAMB, self).__init__()
        c_ = int(c1)  # hidden channels               
        self.cv = Conv(c1, c2, k, s)     
        self.cvsig = ConvSig(c2, c2, 1, 1)    

    def forward(self, x):
        
        x = self.cv(x)

        return x * self.cvsig(x)


class Concat(nn.Module):
    # Concatenate a list of tensors along dimension
    def __init__(self, dimension=1):
        super(Concat, self).__init__()
        self.d = dimension

    def forward(self, x):
        return torch.cat(x, self.d)


class NMS(nn.Module):
    # Non-Maximum Suppression (NMS) module
    conf = 0.25  # confidence threshold
    iou = 0.45  # IoU threshold
    classes = None  # (optional list) filter by class

    def __init__(self):
        super(NMS, self).__init__()

    def forward(self, x):
        return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)


class autoShape(nn.Module):
    # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
    img_size = 640  # inference size (pixels)
    conf = 0.25  # NMS confidence threshold
    iou = 0.45  # NMS IoU threshold
    classes = None  # (optional list) filter by class

    def __init__(self, model):
        super(autoShape, self).__init__()
        self.model = model.eval()

    def forward(self, imgs, size=640, augment=False, profile=False):
        # supports inference from various sources. For height=720, width=1280, RGB images example inputs are:
        #   opencv:     imgs = cv2.imread('image.jpg')[:,:,::-1]  # HWC BGR to RGB x(720,1280,3)
        #   PIL:        imgs = Image.open('image.jpg')  # HWC x(720,1280,3)
        #   numpy:      imgs = np.zeros((720,1280,3))  # HWC
        #   torch:      imgs = torch.zeros(16,3,720,1280)  # BCHW
        #   multiple:   imgs = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...]  # list of images

        p = next(self.model.parameters())  # for device and type
        if isinstance(imgs, torch.Tensor):  # torch
            return self.model(imgs.to(p.device).type_as(p), augment, profile)  # inference

        # Pre-process
        if not isinstance(imgs, list):
            imgs = [imgs]
        shape0, shape1 = [], []  # image and inference shapes
        batch = range(len(imgs))  # batch size
        for i in batch:
            imgs[i] = np.array(imgs[i])  # to numpy
            imgs[i] = imgs[i][:, :, :3] if imgs[i].ndim == 3 else np.tile(imgs[i][:, :, None], 3)  # enforce 3ch input
            s = imgs[i].shape[:2]  # HWC
            shape0.append(s)  # image shape
            g = (size / max(s))  # gain
            shape1.append([y * g for y in s])
        shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)]  # inference shape
        x = [letterbox(imgs[i], new_shape=shape1, auto=False)[0] for i in batch]  # pad
        x = np.stack(x, 0) if batch[-1] else x[0][None]  # stack
        x = np.ascontiguousarray(x.transpose((0, 3, 1, 2)))  # BHWC to BCHW
        x = torch.from_numpy(x).to(p.device).type_as(p) / 255.  # uint8 to fp16/32

        # Inference
        with torch.no_grad():
            y = self.model(x, augment, profile)[0]  # forward
        y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)  # NMS

        # Post-process
        for i in batch:
            if y[i] is not None:
                y[i][:, :4] = scale_coords(shape1, y[i][:, :4], shape0[i])

        return Detections(imgs, y, self.names)


class Detections:
    # detections class for YOLOv5 inference results
    def __init__(self, imgs, pred, names=None):
        super(Detections, self).__init__()
        self.imgs = imgs  # list of images as numpy arrays
        self.pred = pred  # list of tensors pred[0] = (xyxy, conf, cls)
        self.names = names  # class names
        self.xyxy = pred  # xyxy pixels
        self.xywh = [xyxy2xywh(x) for x in pred]  # xywh pixels
        gn = [torch.Tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.]) for im in imgs]  # normalization gains
        self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)]  # xyxy normalized
        self.xywhn = [x / g for x, g in zip(self.xywh, gn)]  # xywh normalized

    def display(self, pprint=False, show=False, save=False):
        colors = color_list()
        for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
            str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
            if pred is not None:
                for c in pred[:, -1].unique():
                    n = (pred[:, -1] == c).sum()  # detections per class
                    str += f'{n} {self.names[int(c)]}s, '  # add to string
                if show or save:
                    img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img  # from np
                    for *box, conf, cls in pred:  # xyxy, confidence, class
                        # str += '%s %.2f, ' % (names[int(cls)], conf)  # label
                        ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10])  # plot
            if save:
                f = f'results{i}.jpg'
                str += f"saved to '{f}'"
                img.save(f)  # save
            if show:
                img.show(f'Image {i}')  # show
            if pprint:
                print(str)

    def print(self):
        self.display(pprint=True)  # print results

    def show(self):
        self.display(show=True)  # show results

    def save(self):
        self.display(save=True)  # save results


class Flatten(nn.Module):
    # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
    @staticmethod
    def forward(x):
        return x.view(x.size(0), -1)


class Classify(nn.Module):
    # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, kernel, stride, padding, groups
        super(Classify, self).__init__()
        self.aap = nn.AdaptiveAvgPool2d(1)  # to x(b,c1,1,1)
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)  # to x(b,c2,1,1)
        self.flat = Flatten()

    def forward(self, x):
        z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1)  # cat if list
        return self.flat(self.conv(z))  # flatten to x(b,c2)
    
    
class TransformerLayer(nn.Module):
    def __init__(self, c, num_heads):
        super().__init__()

        self.ln1 = nn.LayerNorm(c)
        self.q = nn.Linear(c, c, bias=False)
        self.k = nn.Linear(c, c, bias=False)
        self.v = nn.Linear(c, c, bias=False)
        self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
        self.ln2 = nn.LayerNorm(c)
        self.fc1 = nn.Linear(c, c, bias=False)
        self.fc2 = nn.Linear(c, c, bias=False)

    def forward(self, x):
        x_ = self.ln1(x)
        x = self.ma(self.q(x_), self.k(x_), self.v(x_))[0] + x
        x = self.ln2(x)
        x = self.fc2(self.fc1(x)) + x
        return x


class TransformerBlock(nn.Module):
    def __init__(self, c1, c2, num_heads, num_layers):
        super().__init__()

        self.conv = None
        if c1 != c2:
            self.conv = Conv(c1, c2)
        self.linear = nn.Linear(c2, c2)
        self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
        self.c2 = c2

    def forward(self, x):
        if self.conv is not None:
            x = self.conv(x)
        b, _, w, h = x.shape
        p = x.flatten(2)
        p = p.unsqueeze(0)
        p = p.transpose(0, 3)
        p = p.squeeze(3)
        e = self.linear(p)
        x = p + e

        x = self.tr(x)
        x = x.unsqueeze(3)
        x = x.transpose(0, 3)
        x = x.reshape(b, self.c2, w, h)
        return x


        
class BottleneckCSPTR(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSPTR, self).__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = TransformerBlock(c_, c_, 4, n)

    def forward(self, x):
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))

class BottleneckCSP2TR(nn.Module):
    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
        super(BottleneckCSP2TR, self).__init__()
        c_ = int(c2)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv3 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_) 
        self.act = nn.SiLU()
        self.m = TransformerBlock(c_, c_, 4, n)

    def forward(self, x):
        x1 = self.cv1(x)
        y1 = self.m(x1)
        y2 = self.cv2(x1)
        return self.cv3(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class SPPCSPTR(nn.Module):
    # CSP SPP https://github.com/WongKinYiu/CrossStagePartialNetworks
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
        super(SPPCSPTR, self).__init__()
        c_ = int(2 * c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = Conv(c_, c_, 3, 1)
        self.cv4 = Conv(c_, c_, 1, 1)
        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
        self.cv5 = Conv(4 * c_, c_, 1, 1)
        self.cv6 = TransformerBlock(c_, c_, 4, 1)
        self.bn = nn.BatchNorm2d(2 * c_) 
        self.act = nn.SiLU()
        self.cv7 = Conv(2 * c_, c2, 1, 1)

    def forward(self, x):
        x1 = self.cv4(self.cv3(self.cv1(x)))
        y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
        y2 = self.cv2(x)
        return self.cv7(self.act(self.bn(torch.cat((y1, y2), dim=1))))
    
class TR(BottleneckCSPTR):
    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)
        self.m = TransformerBlock(c_, c_, 4, n)

================================================
FILE: asone/detectors/yolor/models/export.py
================================================
import argparse

import torch

from asone.detectors.yolor.utils.google_utils import attempt_download

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default='./yolov4.pt', help='weights path')
    parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
    parser.add_argument('--batch-size', type=int, default=1, help='batch size')
    opt = parser.parse_args()
    opt.img_size *= 2 if len(opt.img_size) == 1 else 1  # expand
    print(opt)

    # Input
    img = torch.zeros((opt.batch_size, 3, *opt.img_size))  # image size(1,3,320,192) iDetection

    # Load PyTorch model
    attempt_download(opt.weights)
    model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
    model.eval()
    model.model[-1].export = True  # set Detect() layer export=True
    y = model(img)  # dry run

    # TorchScript export
    try:
        print('\nStarting TorchScript export with torch %s...' % torch.__version__)
        f = opt.weights.replace('.pt', '.torchscript.pt')  # filename
        ts =
Download .txt
gitextract_lrbvjsnv/

├── .dockerignore
├── .gitignore
├── Dockerfile
├── LICENCE
├── README.md
├── asone/
│   ├── __init__.py
│   ├── asone.py
│   ├── demo_detector.py
│   ├── demo_ocr.py
│   ├── demo_pose_estimator.py
│   ├── demo_segmentor.py
│   ├── demo_tracker.py
│   ├── detectors/
│   │   ├── __init__.py
│   │   ├── detector.py
│   │   ├── easyocr_detector/
│   │   │   ├── __init__.py
│   │   │   └── text_detector.py
│   │   ├── utils/
│   │   │   ├── __init__.py
│   │   │   ├── cfg_path.py
│   │   │   ├── coreml_utils.py
│   │   │   ├── exp_name.py
│   │   │   └── weights_path.py
│   │   ├── yolonas/
│   │   │   ├── __init__.py
│   │   │   └── yolonas.py
│   │   ├── yolor/
│   │   │   ├── __init__.py
│   │   │   ├── cfg/
│   │   │   │   ├── yolor_csp.cfg
│   │   │   │   ├── yolor_csp_x.cfg
│   │   │   │   └── yolor_p6.cfg
│   │   │   ├── models/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── common.py
│   │   │   │   ├── export.py
│   │   │   │   └── models.py
│   │   │   ├── utils/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── activations.py
│   │   │   │   ├── autoanchor.py
│   │   │   │   ├── datasets.py
│   │   │   │   ├── export.py
│   │   │   │   ├── general.py
│   │   │   │   ├── google_utils.py
│   │   │   │   ├── layers.py
│   │   │   │   ├── loss.py
│   │   │   │   ├── metrics.py
│   │   │   │   ├── parse_config.py
│   │   │   │   ├── plots.py
│   │   │   │   ├── torch_utils.py
│   │   │   │   └── yolor_utils.py
│   │   │   └── yolor_detector.py
│   │   ├── yolov5/
│   │   │   ├── __init__.py
│   │   │   ├── yolov5/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── export.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   ├── experimental.py
│   │   │   │   │   ├── general.py
│   │   │   │   │   ├── tf.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── activations.py
│   │   │   │       ├── augmentations.py
│   │   │   │       ├── dataloaders.py
│   │   │   │       ├── downloads.py.py
│   │   │   │       ├── general.py
│   │   │   │       ├── metrics.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       └── yolov5_utils.py
│   │   │   └── yolov5_detector.py
│   │   ├── yolov6/
│   │   │   ├── __init__.py
│   │   │   ├── yolov6/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── assigners/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── anchor_generator.py
│   │   │   │   │   ├── assigner_utils.py
│   │   │   │   │   ├── atss_assigner.py
│   │   │   │   │   ├── iou2d_calculator.py
│   │   │   │   │   └── tal_assigner.py
│   │   │   │   ├── layers/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   └── dbb_transforms.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── efficientrep.py
│   │   │   │   │   ├── effidehead.py
│   │   │   │   │   ├── end2end.py
│   │   │   │   │   ├── loss.py
│   │   │   │   │   ├── loss_distill.py
│   │   │   │   │   ├── reppan.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── checkpoint.py
│   │   │   │       ├── events.py
│   │   │   │       ├── figure_iou.py
│   │   │   │       ├── general.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       └── yolov6_utils.py
│   │   │   └── yolov6_detector.py
│   │   ├── yolov7/
│   │   │   ├── __init__.py
│   │   │   ├── yolov7/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   ├── experimental.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       └── yolov7_utils.py
│   │   │   └── yolov7_detector.py
│   │   ├── yolov8/
│   │   │   ├── __init__.py
│   │   │   ├── utils/
│   │   │   │   ├── __init__.py
│   │   │   │   └── yolov8_utils.py
│   │   │   └── yolov8_detector.py
│   │   ├── yolov9/
│   │   │   ├── __init__.py
│   │   │   ├── export.py
│   │   │   ├── yolov9/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── models/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── common.py
│   │   │   │   │   ├── experimental.py
│   │   │   │   │   ├── tf.py
│   │   │   │   │   └── yolo.py
│   │   │   │   └── utils/
│   │   │   │       ├── __init__.py
│   │   │   │       ├── activations.py
│   │   │   │       ├── augmentations.py
│   │   │   │       ├── autoanchor.py
│   │   │   │       ├── autobatch.py
│   │   │   │       ├── callbacks.py
│   │   │   │       ├── coco_utils.py
│   │   │   │       ├── dataloaders.py
│   │   │   │       ├── downloads.py
│   │   │   │       ├── general.py
│   │   │   │       ├── lion.py
│   │   │   │       ├── loss.py
│   │   │   │       ├── loss_tal.py
│   │   │   │       ├── loss_tal_dual.py
│   │   │   │       ├── loss_tal_triple.py
│   │   │   │       ├── metrics.py
│   │   │   │       ├── plots.py
│   │   │   │       ├── segment/
│   │   │   │       │   ├── __init__.py
│   │   │   │       │   ├── augmentations.py
│   │   │   │       │   ├── dataloaders.py
│   │   │   │       │   ├── general.py
│   │   │   │       │   ├── loss.py
│   │   │   │       │   ├── loss_tal.py
│   │   │   │       │   ├── loss_tal_dual.py
│   │   │   │       │   ├── metrics.py
│   │   │   │       │   ├── plots.py
│   │   │   │       │   └── tal/
│   │   │   │       │       ├── __init__.py
│   │   │   │       │       ├── anchor_generator.py
│   │   │   │       │       └── assigner.py
│   │   │   │       ├── tal/
│   │   │   │       │   ├── __init__.py
│   │   │   │       │   ├── anchor_generator.py
│   │   │   │       │   └── assigner.py
│   │   │   │       ├── torch_utils.py
│   │   │   │       ├── triton.py
│   │   │   │       └── yolov9_utils.py
│   │   │   └── yolov9_detector.py
│   │   └── yolox/
│   │       ├── __init__.py
│   │       ├── exps/
│   │       │   ├── __init__.py
│   │       │   ├── yolov3.py
│   │       │   ├── yolox_l.py
│   │       │   ├── yolox_m.py
│   │       │   ├── yolox_nano.py
│   │       │   ├── yolox_s.py
│   │       │   ├── yolox_tiny.py
│   │       │   └── yolox_x.py
│   │       ├── yolox/
│   │       │   ├── __init__.py
│   │       │   ├── core/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── launch.py
│   │       │   │   └── trainer.py
│   │       │   ├── data/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── data_augment.py
│   │       │   │   ├── data_prefetcher.py
│   │       │   │   ├── dataloading.py
│   │       │   │   ├── datasets/
│   │       │   │   │   ├── __init__.py
│   │       │   │   │   ├── coco.py
│   │       │   │   │   ├── coco_classes.py
│   │       │   │   │   ├── datasets_wrapper.py
│   │       │   │   │   ├── mosaicdetection.py
│   │       │   │   │   ├── voc.py
│   │       │   │   │   └── voc_classes.py
│   │       │   │   └── samplers.py
│   │       │   ├── evaluators/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── coco_evaluator.py
│   │       │   │   ├── voc_eval.py
│   │       │   │   └── voc_evaluator.py
│   │       │   ├── exp/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── base_exp.py
│   │       │   │   ├── build.py
│   │       │   │   ├── default/
│   │       │   │   │   └── __init__.py
│   │       │   │   └── yolox_base.py
│   │       │   ├── models/
│   │       │   │   ├── __init__.py
│   │       │   │   ├── build.py
│   │       │   │   ├── darknet.py
│   │       │   │   ├── losses.py
│   │       │   │   ├── network_blocks.py
│   │       │   │   ├── yolo_fpn.py
│   │       │   │   ├── yolo_head.py
│   │       │   │   ├── yolo_pafpn.py
│   │       │   │   └── yolox.py
│   │       │   └── utils/
│   │       │       ├── __init__.py
│   │       │       ├── allreduce_norm.py
│   │       │       ├── boxes.py
│   │       │       ├── checkpoint.py
│   │       │       ├── compat.py
│   │       │       ├── demo_utils.py
│   │       │       ├── dist.py
│   │       │       ├── ema.py
│   │       │       ├── logger.py
│   │       │       ├── lr_scheduler.py
│   │       │       ├── metric.py
│   │       │       ├── model_utils.py
│   │       │       ├── setup_env.py
│   │       │       └── visualize.py
│   │       ├── yolox_detector.py
│   │       └── yolox_utils.py
│   ├── linux/
│   │   ├── Instructions/
│   │   │   ├── Benchmarking.md
│   │   │   ├── Demo-Detectron2.md
│   │   │   ├── Docker-Setup.md
│   │   │   ├── Driver-Installations.md
│   │   │   ├── Manual-Build.md
│   │   │   └── Manual-Installation.md
│   │   ├── README.md
│   │   ├── docker-installation.sh
│   │   └── main.py
│   ├── pose_estimator.py
│   ├── pose_estimators/
│   │   ├── __init__.py
│   │   ├── yolov7_pose/
│   │   │   ├── __init__.py
│   │   │   ├── main.py
│   │   │   ├── models/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── common.py
│   │   │   │   ├── experimental.py
│   │   │   │   └── yolo.py
│   │   │   ├── requirements.txt
│   │   │   ├── utils/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── activations.py
│   │   │   │   ├── add_nms.py
│   │   │   │   ├── autoanchor.py
│   │   │   │   ├── datasets.py
│   │   │   │   ├── general.py
│   │   │   │   ├── google_utils.py
│   │   │   │   ├── loss.py
│   │   │   │   ├── metrics.py
│   │   │   │   ├── plots.py
│   │   │   │   ├── torch_utils.py
│   │   │   │   ├── wandb_logging/
│   │   │   │   │   ├── __init__.py
│   │   │   │   │   ├── log_dataset.py
│   │   │   │   │   └── wandb_utils.py
│   │   │   │   └── yolov7_pose_utils.py
│   │   │   └── yolov7.py
│   │   └── yolov8_pose/
│   │       ├── __init__.py
│   │       ├── plots.py
│   │       └── yolov8.py
│   ├── recognizers/
│   │   ├── __init__.py
│   │   ├── easyocr_recognizer/
│   │   │   ├── __init__.py
│   │   │   └── easyocr_recognizer.py
│   │   ├── recognizer.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       └── recognizer_name.py
│   ├── schemas/
│   │   └── output_schemas.py
│   ├── segmentors/
│   │   ├── __init__.py
│   │   ├── segment_anything/
│   │   │   ├── __init__.py
│   │   │   └── sam.py
│   │   ├── segmentor.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       └── weights_path.py
│   ├── trackers/
│   │   ├── __init__.py
│   │   ├── byte_track/
│   │   │   ├── __init__.py
│   │   │   ├── bytetracker.py
│   │   │   └── tracker/
│   │   │       ├── __init__.py
│   │   │       ├── basetrack.py
│   │   │       ├── byte_tracker.py
│   │   │       ├── kalman_filter.py
│   │   │       └── matching.py
│   │   ├── deep_sort/
│   │   │   ├── __init__.py
│   │   │   ├── deepsort.py
│   │   │   └── tracker/
│   │   │       ├── .gitignore
│   │   │       ├── README.md
│   │   │       ├── __init__.py
│   │   │       ├── deep/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── evaluate.py
│   │   │       │   ├── feature_extractor.py
│   │   │       │   ├── model.py
│   │   │       │   ├── original_model.py
│   │   │       │   ├── test.py
│   │   │       │   └── train.py
│   │   │       ├── deep_sort.py
│   │   │       ├── parser.py
│   │   │       └── sort/
│   │   │           ├── __init__.py
│   │   │           ├── detection.py
│   │   │           ├── iou_matching.py
│   │   │           ├── kalman_filter.py
│   │   │           ├── linear_assignment.py
│   │   │           ├── nn_matching.py
│   │   │           ├── preprocessing.py
│   │   │           ├── track.py
│   │   │           └── tracker.py
│   │   ├── motpy/
│   │   │   ├── __init__.py
│   │   │   └── motpy.py
│   │   ├── nor_fair/
│   │   │   ├── __init__.py
│   │   │   └── norfair.py
│   │   ├── oc_sort/
│   │   │   ├── __init__.py
│   │   │   ├── ocsort.py
│   │   │   └── tracker/
│   │   │       ├── __init__.py
│   │   │       ├── association.py
│   │   │       ├── kalmanfilter.py
│   │   │       └── ocsort.py
│   │   ├── strong_sort/
│   │   │   ├── __init__.py
│   │   │   ├── strongsort.py
│   │   │   └── tracker/
│   │   │       ├── __init__.py
│   │   │       ├── configs/
│   │   │       │   └── strong_sort.yaml
│   │   │       ├── deep/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── checkpoint/
│   │   │       │   │   └── .gitkeep
│   │   │       │   ├── reid/
│   │   │       │   │   ├── .flake8
│   │   │       │   │   ├── .gitignore
│   │   │       │   │   ├── .isort.cfg
│   │   │       │   │   ├── .style.yapf
│   │   │       │   │   ├── LICENSE
│   │   │       │   │   ├── README.rst
│   │   │       │   │   ├── configs/
│   │   │       │   │   │   ├── im_osnet_ain_x1_0_softmax_256x128_amsgrad_cosine.yaml
│   │   │       │   │   │   ├── im_osnet_ibn_x1_0_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x0_25_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x0_5_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x0_75_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x1_0_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   ├── im_osnet_x1_0_softmax_256x128_amsgrad_cosine.yaml
│   │   │       │   │   │   ├── im_r50_softmax_256x128_amsgrad.yaml
│   │   │       │   │   │   └── im_r50fc512_softmax_256x128_amsgrad.yaml
│   │   │       │   │   ├── docs/
│   │   │       │   │   │   ├── AWESOME_REID.md
│   │   │       │   │   │   ├── MODEL_ZOO.md
│   │   │       │   │   │   ├── Makefile
│   │   │       │   │   │   ├── conf.py
│   │   │       │   │   │   ├── datasets.rst
│   │   │       │   │   │   ├── evaluation.rst
│   │   │       │   │   │   ├── index.rst
│   │   │       │   │   │   ├── pkg/
│   │   │       │   │   │   │   ├── data.rst
│   │   │       │   │   │   │   ├── engine.rst
│   │   │       │   │   │   │   ├── losses.rst
│   │   │       │   │   │   │   ├── metrics.rst
│   │   │       │   │   │   │   ├── models.rst
│   │   │       │   │   │   │   ├── optim.rst
│   │   │       │   │   │   │   └── utils.rst
│   │   │       │   │   │   └── user_guide.rst
│   │   │       │   │   ├── linter.sh
│   │   │       │   │   ├── projects/
│   │   │       │   │   │   ├── DML/
│   │   │       │   │   │   │   ├── README.md
│   │   │       │   │   │   │   ├── default_config.py
│   │   │       │   │   │   │   ├── dml.py
│   │   │       │   │   │   │   ├── im_osnet_x1_0_dml_256x128_amsgrad_cosine.yaml
│   │   │       │   │   │   │   └── main.py
│   │   │       │   │   │   ├── OSNet_AIN/
│   │   │       │   │   │   │   ├── README.md
│   │   │       │   │   │   │   ├── default_config.py
│   │   │       │   │   │   │   ├── main.py
│   │   │       │   │   │   │   ├── nas.yaml
│   │   │       │   │   │   │   ├── osnet_child.py
│   │   │       │   │   │   │   ├── osnet_search.py
│   │   │       │   │   │   │   └── softmax_nas.py
│   │   │       │   │   │   ├── README.md
│   │   │       │   │   │   └── attribute_recognition/
│   │   │       │   │   │       ├── README.md
│   │   │       │   │   │       ├── datasets/
│   │   │       │   │   │       │   ├── __init__.py
│   │   │       │   │   │       │   ├── dataset.py
│   │   │       │   │   │       │   └── pa100k.py
│   │   │       │   │   │       ├── default_parser.py
│   │   │       │   │   │       ├── main.py
│   │   │       │   │   │       ├── models/
│   │   │       │   │   │       │   ├── __init__.py
│   │   │       │   │   │       │   └── osnet.py
│   │   │       │   │   │       └── train.sh
│   │   │       │   │   ├── scripts/
│   │   │       │   │   │   ├── default_config.py
│   │   │       │   │   │   └── main.py
│   │   │       │   │   ├── setup.py
│   │   │       │   │   ├── tools/
│   │   │       │   │   │   ├── compute_mean_std.py
│   │   │       │   │   │   ├── parse_test_res.py
│   │   │       │   │   │   └── visualize_actmap.py
│   │   │       │   │   └── torchreid/
│   │   │       │   │       ├── __init__.py
│   │   │       │   │       ├── data/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── datamanager.py
│   │   │       │   │       │   ├── datasets/
│   │   │       │   │       │   │   ├── __init__.py
│   │   │       │   │       │   │   ├── dataset.py
│   │   │       │   │       │   │   ├── image/
│   │   │       │   │       │   │   │   ├── __init__.py
│   │   │       │   │       │   │   │   ├── cuhk01.py
│   │   │       │   │       │   │   │   ├── cuhk02.py
│   │   │       │   │       │   │   │   ├── cuhk03.py
│   │   │       │   │       │   │   │   ├── cuhksysu.py
│   │   │       │   │       │   │   │   ├── dukemtmcreid.py
│   │   │       │   │       │   │   │   ├── grid.py
│   │   │       │   │       │   │   │   ├── ilids.py
│   │   │       │   │       │   │   │   ├── market1501.py
│   │   │       │   │       │   │   │   ├── msmt17.py
│   │   │       │   │       │   │   │   ├── prid.py
│   │   │       │   │       │   │   │   ├── sensereid.py
│   │   │       │   │       │   │   │   ├── university1652.py
│   │   │       │   │       │   │   │   └── viper.py
│   │   │       │   │       │   │   └── video/
│   │   │       │   │       │   │       ├── __init__.py
│   │   │       │   │       │   │       ├── dukemtmcvidreid.py
│   │   │       │   │       │   │       ├── ilidsvid.py
│   │   │       │   │       │   │       ├── mars.py
│   │   │       │   │       │   │       └── prid2011.py
│   │   │       │   │       │   ├── sampler.py
│   │   │       │   │       │   └── transforms.py
│   │   │       │   │       ├── engine/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── engine.py
│   │   │       │   │       │   ├── image/
│   │   │       │   │       │   │   ├── __init__.py
│   │   │       │   │       │   │   ├── softmax.py
│   │   │       │   │       │   │   └── triplet.py
│   │   │       │   │       │   └── video/
│   │   │       │   │       │       ├── __init__.py
│   │   │       │   │       │       ├── softmax.py
│   │   │       │   │       │       └── triplet.py
│   │   │       │   │       ├── losses/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── cross_entropy_loss.py
│   │   │       │   │       │   └── hard_mine_triplet_loss.py
│   │   │       │   │       ├── metrics/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── accuracy.py
│   │   │       │   │       │   ├── distance.py
│   │   │       │   │       │   ├── rank.py
│   │   │       │   │       │   └── rank_cylib/
│   │   │       │   │       │       ├── Makefile
│   │   │       │   │       │       ├── __init__.py
│   │   │       │   │       │       ├── rank_cy.pyx
│   │   │       │   │       │       ├── setup.py
│   │   │       │   │       │       └── test_cython.py
│   │   │       │   │       ├── models/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── densenet.py
│   │   │       │   │       │   ├── hacnn.py
│   │   │       │   │       │   ├── inceptionresnetv2.py
│   │   │       │   │       │   ├── inceptionv4.py
│   │   │       │   │       │   ├── mlfn.py
│   │   │       │   │       │   ├── mobilenetv2.py
│   │   │       │   │       │   ├── mudeep.py
│   │   │       │   │       │   ├── nasnet.py
│   │   │       │   │       │   ├── osnet.py
│   │   │       │   │       │   ├── osnet_ain.py
│   │   │       │   │       │   ├── pcb.py
│   │   │       │   │       │   ├── resnet.py
│   │   │       │   │       │   ├── resnet_ibn_a.py
│   │   │       │   │       │   ├── resnet_ibn_b.py
│   │   │       │   │       │   ├── resnetmid.py
│   │   │       │   │       │   ├── senet.py
│   │   │       │   │       │   ├── shufflenet.py
│   │   │       │   │       │   ├── shufflenetv2.py
│   │   │       │   │       │   ├── squeezenet.py
│   │   │       │   │       │   └── xception.py
│   │   │       │   │       ├── optim/
│   │   │       │   │       │   ├── __init__.py
│   │   │       │   │       │   ├── lr_scheduler.py
│   │   │       │   │       │   ├── optimizer.py
│   │   │       │   │       │   └── radam.py
│   │   │       │   │       └── utils/
│   │   │       │   │           ├── GPU-Re-Ranking/
│   │   │       │   │           │   ├── README.md
│   │   │       │   │           │   ├── extension/
│   │   │       │   │           │   │   ├── adjacency_matrix/
│   │   │       │   │           │   │   │   ├── build_adjacency_matrix.cpp
│   │   │       │   │           │   │   │   ├── build_adjacency_matrix_kernel.cu
│   │   │       │   │           │   │   │   └── setup.py
│   │   │       │   │           │   │   ├── make.sh
│   │   │       │   │           │   │   └── propagation/
│   │   │       │   │           │   │       ├── gnn_propagate.cpp
│   │   │       │   │           │   │       ├── gnn_propagate_kernel.cu
│   │   │       │   │           │   │       └── setup.py
│   │   │       │   │           │   ├── gnn_reranking.py
│   │   │       │   │           │   ├── main.py
│   │   │       │   │           │   └── utils.py
│   │   │       │   │           ├── __init__.py
│   │   │       │   │           ├── avgmeter.py
│   │   │       │   │           ├── feature_extractor.py
│   │   │       │   │           ├── loggers.py
│   │   │       │   │           ├── model_complexity.py
│   │   │       │   │           ├── reidtools.py
│   │   │       │   │           ├── rerank.py
│   │   │       │   │           ├── tools.py
│   │   │       │   │           └── torchtools.py
│   │   │       │   └── reid_model_factory.py
│   │   │       ├── sort/
│   │   │       │   ├── __init__.py
│   │   │       │   ├── detection.py
│   │   │       │   ├── iou_matching.py
│   │   │       │   ├── kalman_filter.py
│   │   │       │   ├── linear_assignment.py
│   │   │       │   ├── nn_matching.py
│   │   │       │   ├── preprocessing.py
│   │   │       │   ├── track.py
│   │   │       │   └── tracker.py
│   │   │       ├── strong_sort.py
│   │   │       └── utils/
│   │   │           ├── __init__.py
│   │   │           ├── asserts.py
│   │   │           ├── draw.py
│   │   │           ├── evaluation.py
│   │   │           ├── io.py
│   │   │           ├── json_logger.py
│   │   │           ├── log.py
│   │   │           ├── parser.py
│   │   │           └── tools.py
│   │   └── tracker.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── classes.py
│   │   ├── colors.py
│   │   ├── counting.py
│   │   ├── default_cfg.py
│   │   ├── download.py
│   │   ├── draw.py
│   │   ├── ponits_conversion.py
│   │   ├── pose_estimators_weights.py
│   │   ├── temp_loader.py
│   │   ├── utils.py
│   │   ├── video_reader.py
│   │   └── weights.json
│   └── windows/
│       ├── README.md
│       ├── cam2ip-1.6-64bit-cv/
│       │   ├── AUTHORS
│       │   └── COPYING
│       ├── enable_feature.bat
│       ├── installation.bat
│       ├── test-display.py
│       └── test-webcam.py
├── docker-compose.yml
├── main.py
├── requirements.txt
└── setup.py
Download .txt
Showing preview only (353K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (4580 symbols across 336 files)

FILE: asone/asone.py
  class ASOne (line 20) | class ASOne:
    method __init__ (line 21) | def __init__(self,
    method get_detector (line 54) | def get_detector(self, detector: int, weights: str, recognizer, num_cl...
    method get_recognizer (line 59) | def get_recognizer(self, recognizer: int, languages):
    method get_tracker (line 67) | def get_tracker(self, tracker: int):
    method get_segmentor (line 72) | def get_segmentor(self, segmentor, segmentor_weights):
    method _update_args (line 76) | def _update_args(self, kwargs):
    method track_stream (line 85) | def track_stream(self,
    method stream_tracker (line 103) | def stream_tracker(self,
    method track_video (line 116) | def track_video(self,
    method video_tracker (line 134) | def video_tracker(self,
    method detect_video (line 146) | def detect_video(self,
    method video_detecter (line 164) | def video_detecter(self,
    method detect (line 176) | def detect(self, source, **kwargs)->np.ndarray:
    method detecter (line 194) | def detecter(self, source, **kwargs):
    method detect_and_track (line 213) | def detect_and_track(self, frame, **kwargs):
    method detect_track_manager (line 227) | def detect_track_manager(self, frame, **kwargs):
    method detect_text (line 243) | def detect_text(self, image):
    method track_webcam (line 251) | def track_webcam(self,
    method webcam_tracker (line 270) | def webcam_tracker(self,
    method _start_tracking (line 284) | def _start_tracking(self,
    method draw (line 377) | def draw(dets, display=False, img=None, **kwargs):
    method draw_masks (line 413) | def draw_masks(dets, display, img=None, **kwargs):
    method read_video (line 457) | def read_video(self, video_path):
    method format_output (line 462) | def format_output(self, bbox_details, frame_details):

FILE: asone/demo_detector.py
  function main (line 8) | def main(args):

FILE: asone/demo_ocr.py
  function main (line 5) | def main(args):

FILE: asone/demo_pose_estimator.py
  function main (line 10) | def main(args):

FILE: asone/demo_segmentor.py
  function main (line 8) | def main(args):

FILE: asone/demo_tracker.py
  function main (line 10) | def main(args):

FILE: asone/detectors/detector.py
  class Detector (line 7) | class Detector:
    method __init__ (line 8) | def __init__(self,
    method _select_detector (line 16) | def _select_detector(self, model_flag, weights, cuda, recognizer, num_...
    method get_detector (line 106) | def get_detector(self):
    method detect (line 109) | def detect(self,

FILE: asone/detectors/easyocr_detector/text_detector.py
  class TextDetector (line 5) | class TextDetector:
    method __init__ (line 6) | def __init__(self, detect_network, languages: list = ['en'], use_cuda=...
    method detect (line 11) | def detect(self, image: list,  freelist: bool=False, return_image=Fals...

FILE: asone/detectors/utils/cfg_path.py
  function get_cfg_path (line 9) | def get_cfg_path(model_flag):

FILE: asone/detectors/utils/coreml_utils.py
  function yolo_to_xyxy (line 4) | def yolo_to_xyxy(bboxes, img_size):
  function generalize_output_format (line 20) | def generalize_output_format(bboxes, confidence_list, conf_thres):
  function scale_bboxes (line 42) | def scale_bboxes(bboxes, org_img_shape, resized_img_shape):

FILE: asone/detectors/utils/exp_name.py
  function get_exp__name (line 15) | def get_exp__name(model_flag):

FILE: asone/detectors/utils/weights_path.py
  function get_weight_path (line 138) | def get_weight_path(model_flag):

FILE: asone/detectors/yolonas/yolonas.py
  class YOLOnasDetector (line 20) | class YOLOnasDetector:
    method __init__ (line 21) | def __init__(self,
    method load_model (line 45) | def load_model(self, weights):
    method detect (line 66) | def detect(self, image: list,

FILE: asone/detectors/yolor/models/common.py
  class DWT (line 17) | class DWT(nn.Module):
    method __init__ (line 18) | def __init__(self):
    method forward (line 22) | def forward(self, x):
    method __init__ (line 29) | def __init__(self):
    method forward (line 32) | def forward(self, x):
  class DWT (line 28) | class DWT(nn.Module): # use ReOrg instead
    method __init__ (line 18) | def __init__(self):
    method forward (line 22) | def forward(self, x):
    method __init__ (line 29) | def __init__(self):
    method forward (line 32) | def forward(self, x):
  class ImplicitA (line 36) | class ImplicitA(nn.Module):
    method __init__ (line 37) | def __init__(self, channel):
    method forward (line 43) | def forward(self, x):
  class ImplicitM (line 47) | class ImplicitM(nn.Module):
    method __init__ (line 48) | def __init__(self, channel):
    method forward (line 54) | def forward(self, x):
  class ReOrg (line 58) | class ReOrg(nn.Module):
    method __init__ (line 59) | def __init__(self):
    method forward (line 62) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  function autopad (line 65) | def autopad(k, p=None):  # kernel, padding
  function DWConv (line 72) | def DWConv(c1, c2, k=1, s=1, act=True):
  class Conv (line 77) | class Conv(nn.Module):
    method __init__ (line 79) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 85) | def forward(self, x):
    method fuseforward (line 88) | def fuseforward(self, x):
  class ConvSig (line 92) | class ConvSig(nn.Module):
    method __init__ (line 94) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 99) | def forward(self, x):
    method fuseforward (line 102) | def fuseforward(self, x):
  class ConvSqu (line 106) | class ConvSqu(nn.Module):
    method __init__ (line 108) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 113) | def forward(self, x):
    method fuseforward (line 116) | def fuseforward(self, x):
  class Bottleneck (line 120) | class Bottleneck(nn.Module):
    method __init__ (line 122) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 129) | def forward(self, x):
  class BottleneckG (line 133) | class BottleneckG(nn.Module):
    method __init__ (line 135) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 142) | def forward(self, x):
  class BottleneckCSP (line 146) | class BottleneckCSP(nn.Module):
    method __init__ (line 148) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 159) | def forward(self, x):
  class BottleneckCSPF (line 165) | class BottleneckCSPF(nn.Module):
    method __init__ (line 167) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 178) | def forward(self, x):
  class BottleneckCSPL (line 184) | class BottleneckCSPL(nn.Module):
    method __init__ (line 186) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 197) | def forward(self, x):
  class BottleneckCSPLG (line 203) | class BottleneckCSPLG(nn.Module):
    method __init__ (line 205) | def __init__(self, c1, c2, n=1, shortcut=True, g=3, e=0.25):  # ch_in,...
    method forward (line 216) | def forward(self, x):
  class BottleneckCSPSE (line 222) | class BottleneckCSPSE(nn.Module):
    method __init__ (line 224) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 238) | def forward(self, x):
  class BottleneckCSPSEA (line 245) | class BottleneckCSPSEA(nn.Module):
    method __init__ (line 247) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 261) | def forward(self, x):
  class BottleneckCSPSAM (line 268) | class BottleneckCSPSAM(nn.Module):
    method __init__ (line 270) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 282) | def forward(self, x):
  class BottleneckCSPSAMA (line 289) | class BottleneckCSPSAMA(nn.Module):
    method __init__ (line 291) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 303) | def forward(self, x):
  class BottleneckCSPSAMB (line 310) | class BottleneckCSPSAMB(nn.Module):
    method __init__ (line 312) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 324) | def forward(self, x):
  class BottleneckCSPGC (line 331) | class BottleneckCSPGC(nn.Module):
    method __init__ (line 333) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method spatial_pool (line 353) | def spatial_pool(self, x):
    method forward (line 377) | def forward(self, x):
  class BottleneckCSPDNL (line 385) | class BottleneckCSPDNL(nn.Module):
    method __init__ (line 387) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 409) | def forward(self, x):
  class BottleneckCSP2 (line 456) | class BottleneckCSP2(nn.Module):
    method __init__ (line 458) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
    method forward (line 468) | def forward(self, x):
  class BottleneckCSP2SAM (line 475) | class BottleneckCSP2SAM(nn.Module):
    method __init__ (line 477) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
    method forward (line 488) | def forward(self, x):
  class VoVCSP (line 496) | class VoVCSP(nn.Module):
    method __init__ (line 498) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 505) | def forward(self, x):
  class SPP (line 512) | class SPP(nn.Module):
    method __init__ (line 514) | def __init__(self, c1, c2, k=(5, 9, 13)):
    method forward (line 521) | def forward(self, x):
  class SPPCSP (line 526) | class SPPCSP(nn.Module):
    method __init__ (line 528) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 1...
    method forward (line 542) | def forward(self, x):
  class Focus (line 549) | class Focus(nn.Module):
    method __init__ (line 551) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 555) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  class MP (line 559) | class MP(nn.Module):
    method __init__ (line 561) | def __init__(self, k=2):
    method forward (line 565) | def forward(self, x):
  class DownD (line 569) | class DownD(nn.Module):
    method __init__ (line 571) | def __init__(self, c1, c2, n=1, k=2):
    method forward (line 580) | def forward(self, x):
  class DownC (line 584) | class DownC(nn.Module):
    method __init__ (line 586) | def __init__(self, c1, c2, n=1, k=2):
    method forward (line 594) | def forward(self, x):
  class DNL (line 598) | class DNL(nn.Module):
    method __init__ (line 600) | def __init__(self, c1, c2, k=3, s=1):
    method forward (line 622) | def forward(self, x):
  class GC (line 678) | class GC(nn.Module):
    method __init__ (line 680) | def __init__(self, c1, c2, k=3, s=1):
    method spatial_pool (line 697) | def spatial_pool(self, x):
    method forward (line 721) | def forward(self, x):
  class SAM (line 726) | class SAM(nn.Module):
    method __init__ (line 728) | def __init__(self, c1, c2, k=3, s=1):
    method forward (line 734) | def forward(self, x):
  class SAMA (line 739) | class SAMA(nn.Module):
    method __init__ (line 741) | def __init__(self, c1, c2, k=3, s=1):
    method forward (line 747) | def forward(self, x):
  class SAMB (line 752) | class SAMB(nn.Module):
    method __init__ (line 754) | def __init__(self, c1, c2, k=3, s=1):
    method forward (line 760) | def forward(self, x):
  class Concat (line 767) | class Concat(nn.Module):
    method __init__ (line 769) | def __init__(self, dimension=1):
    method forward (line 773) | def forward(self, x):
  class NMS (line 777) | class NMS(nn.Module):
    method __init__ (line 783) | def __init__(self):
    method forward (line 786) | def forward(self, x):
  class autoShape (line 790) | class autoShape(nn.Module):
    method __init__ (line 797) | def __init__(self, model):
    method forward (line 801) | def forward(self, imgs, size=640, augment=False, profile=False):
  class Detections (line 844) | class Detections:
    method __init__ (line 846) | def __init__(self, imgs, pred, names=None):
    method display (line 857) | def display(self, pprint=False, show=False, save=False):
    method print (line 879) | def print(self):
    method show (line 882) | def show(self):
    method save (line 885) | def save(self):
  class Flatten (line 889) | class Flatten(nn.Module):
    method forward (line 892) | def forward(x):
  class Classify (line 896) | class Classify(nn.Module):
    method __init__ (line 898) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, k...
    method forward (line 904) | def forward(self, x):
  class TransformerLayer (line 909) | class TransformerLayer(nn.Module):
    method __init__ (line 910) | def __init__(self, c, num_heads):
    method forward (line 922) | def forward(self, x):
  class TransformerBlock (line 930) | class TransformerBlock(nn.Module):
    method __init__ (line 931) | def __init__(self, c1, c2, num_heads, num_layers):
    method forward (line 941) | def forward(self, x):
  class BottleneckCSPTR (line 960) | class BottleneckCSPTR(nn.Module):
    method __init__ (line 962) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 973) | def forward(self, x):
  class BottleneckCSP2TR (line 978) | class BottleneckCSP2TR(nn.Module):
    method __init__ (line 980) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
    method forward (line 990) | def forward(self, x):
  class SPPCSPTR (line 997) | class SPPCSPTR(nn.Module):
    method __init__ (line 999) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 1...
    method forward (line 1013) | def forward(self, x):
  class TR (line 1019) | class TR(BottleneckCSPTR):
    method __init__ (line 1020) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):

FILE: asone/detectors/yolor/models/models.py
  function create_modules (line 9) | def create_modules(module_defs, img_size, cfg):
  class YOLOLayer (line 339) | class YOLOLayer(nn.Module):
    method __init__ (line 340) | def __init__(self, anchors, nc, img_size, yolo_index, layers, stride):
    method create_grids (line 358) | def create_grids(self, ng=(13, 13), device='cpu'):
    method forward (line 371) | def forward(self, p, out):
  class JDELayer (line 432) | class JDELayer(nn.Module):
    method __init__ (line 433) | def __init__(self, anchors, nc, img_size, yolo_index, layers, stride):
    method create_grids (line 451) | def create_grids(self, ng=(13, 13), device='cpu'):
    method forward (line 464) | def forward(self, p, out):
  class Darknet (line 524) | class Darknet(nn.Module):
    method __init__ (line 527) | def __init__(self, cfg, img_size=(416, 416), verbose=False):
    method forward (line 540) | def forward(self, x, augment=False, verbose=False):
    method forward_once (line 570) | def forward_once(self, x, augment=False, verbose=False):
    method fuse (line 627) | def fuse(self):
    method info (line 644) | def info(self, verbose=False):
  function get_yolo_layers (line 648) | def get_yolo_layers(model):
  function load_darknet_weights (line 652) | def load_darknet_weights(self, weights, cutoff=-1):
  function save_weights (line 702) | def save_weights(self, path='model.weights', cutoff=-1):
  function convert (line 728) | def convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weight...
  function attempt_download (line 742) | def attempt_download(weights):

FILE: asone/detectors/yolor/utils/activations.py
  class Swish (line 9) | class Swish(nn.Module):  #
    method forward (line 11) | def forward(x):
  class Hardswish (line 15) | class Hardswish(nn.Module):  # export-friendly version of nn.Hardswish()
    method forward (line 17) | def forward(x):
  class MemoryEfficientSwish (line 22) | class MemoryEfficientSwish(nn.Module):
    class F (line 23) | class F(torch.autograd.Function):
      method forward (line 25) | def forward(ctx, x):
      method backward (line 30) | def backward(ctx, grad_output):
    method forward (line 35) | def forward(self, x):
  class Mish (line 40) | class Mish(nn.Module):
    method forward (line 42) | def forward(x):
  class MemoryEfficientMish (line 46) | class MemoryEfficientMish(nn.Module):
    class F (line 47) | class F(torch.autograd.Function):
      method forward (line 49) | def forward(ctx, x):
      method backward (line 54) | def backward(ctx, grad_output):
    method forward (line 60) | def forward(self, x):
  class FReLU (line 65) | class FReLU(nn.Module):
    method __init__ (line 66) | def __init__(self, c1, k=3):  # ch_in, kernel
    method forward (line 71) | def forward(self, x):

FILE: asone/detectors/yolor/utils/autoanchor.py
  function check_anchor_order (line 10) | def check_anchor_order(m):
  function check_anchors (line 21) | def check_anchors(dataset, model, thr=4.0, imgsz=640):
  function kmean_anchors (line 55) | def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0...

FILE: asone/detectors/yolor/utils/datasets.py
  function get_hash (line 40) | def get_hash(files):
  function exif_size (line 45) | def exif_size(img):
  function create_dataloader (line 60) | def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, au...
  function create_dataloader9 (line 86) | def create_dataloader9(path, imgsz, batch_size, stride, opt, hyp=None, a...
  class InfiniteDataLoader (line 112) | class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
    method __init__ (line 118) | def __init__(self, *args, **kwargs):
    method __len__ (line 123) | def __len__(self):
    method __iter__ (line 126) | def __iter__(self):
  class _RepeatSampler (line 131) | class _RepeatSampler(object):
    method __init__ (line 138) | def __init__(self, sampler):
    method __iter__ (line 141) | def __iter__(self):
  class LoadImages (line 146) | class LoadImages:  # for inference
    method __init__ (line 147) | def __init__(self, path, img_size=640, auto_size=32):
    method __iter__ (line 176) | def __iter__(self):
    method __next__ (line 180) | def __next__(self):
    method new_video (line 218) | def new_video(self, path):
    method __len__ (line 223) | def __len__(self):
  class LoadWebcam (line 227) | class LoadWebcam:  # for inference
    method __init__ (line 228) | def __init__(self, pipe='0', img_size=640):
    method __iter__ (line 241) | def __iter__(self):
    method __next__ (line 245) | def __next__(self):
    method __len__ (line 280) | def __len__(self):
  class LoadStreams (line 284) | class LoadStreams:  # multiple IP or RTSP cameras
    method __init__ (line 285) | def __init__(self, sources='streams.txt', img_size=640):
    method update (line 318) | def update(self, index, cap):
    method __iter__ (line 330) | def __iter__(self):
    method __next__ (line 334) | def __next__(self):
    method __len__ (line 353) | def __len__(self):
  class LoadImagesAndLabels (line 357) | class LoadImagesAndLabels(Dataset):  # for training/testing
    method __init__ (line 358) | def __init__(self, path, img_size=640, batch_size=16, augment=False, h...
    method cache_labels (line 513) | def cache_labels(self, path='labels.cache3'):
    method __len__ (line 537) | def __len__(self):
    method __getitem__ (line 546) | def __getitem__(self, index):
    method collate_fn (line 633) | def collate_fn(batch):
  class LoadImagesAndLabels9 (line 640) | class LoadImagesAndLabels9(Dataset):  # for training/testing
    method __init__ (line 641) | def __init__(self, path, img_size=640, batch_size=16, augment=False, h...
    method cache_labels (line 796) | def cache_labels(self, path='labels.cache3'):
    method __len__ (line 820) | def __len__(self):
    method __getitem__ (line 829) | def __getitem__(self, index):
    method collate_fn (line 916) | def collate_fn(batch):
  function load_image (line 924) | def load_image(self, index):
  function augment_hsv (line 941) | def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
  function load_mosaic (line 960) | def load_mosaic(self, index):
  function load_mosaic9 (line 1018) | def load_mosaic9(self, index):
  function replicate (line 1092) | def replicate(img, labels):
  function letterbox (line 1109) | def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=Tru...
  function random_perspective (line 1142) | def random_perspective(img, targets=(), degrees=10, translate=.1, scale=...
  function box_candidates (line 1229) | def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1):  # bo...
  function cutout (line 1237) | def cutout(image, labels):
  function create_folder (line 1283) | def create_folder(path='./new'):
  function flatten_recursive (line 1290) | def flatten_recursive(path='../coco128'):

FILE: asone/detectors/yolor/utils/general.py
  function set_logging (line 33) | def set_logging(rank=-1):
  function init_seeds (line 39) | def init_seeds(seed=0):
  function get_latest_run (line 45) | def get_latest_run(search_dir='.'):
  function check_git_status (line 51) | def check_git_status():
  function check_img_size (line 59) | def check_img_size(img_size, s=32):
  function check_file (line 67) | def check_file(file):
  function check_dataset (line 78) | def check_dataset(dict):
  function make_divisible (line 98) | def make_divisible(x, divisor):
  function labels_to_class_weights (line 103) | def labels_to_class_weights(labels, nc=80):
  function labels_to_image_weights (line 122) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  function coco80_to_coco91_class (line 131) | def coco80_to_coco91_class():  # converts 80-index (val2014) to 91-index...
  function xyxy2xywh (line 143) | def xyxy2xywh(x):
  function xywh2xyxy (line 153) | def xywh2xyxy(x):
  function scale_coords (line 163) | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  function clip_coords (line 179) | def clip_coords(boxes, img_shape):
  function bbox_iou (line 187) | def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=Fal...
  function box_iou (line 247) | def box_iou(box1, box2):
  function wh_iou (line 272) | def wh_iou(wh1, wh2):
  function non_max_suppression (line 280) | def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge...
  function strip_optimizer (line 359) | def strip_optimizer(f='weights/best.pt', s=''):  # from utils.general im...
  function print_mutation (line 373) | def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  function apply_classifier (line 404) | def apply_classifier(x, model, img, im0):
  function increment_path (line 439) | def increment_path(path, exist_ok=True, sep=''):

FILE: asone/detectors/yolor/utils/google_utils.py
  function gsutil_getsize (line 13) | def gsutil_getsize(url=''):
  function attempt_download (line 19) | def attempt_download(weights):
  function attempt_load (line 39) | def attempt_load(weights, map_location=None):
  function gdrive_download (line 55) | def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco12...
  function get_token (line 89) | def get_token(cookie="./cookie"):
  class Ensemble (line 96) | class Ensemble(nn.ModuleList):
    method __init__ (line 98) | def __init__(self):
    method forward (line 101) | def forward(self, x, augment=False, profile=False, visualize=False):

FILE: asone/detectors/yolor/utils/layers.py
  class Mish (line 10) | class Mish(nn.Module):  # https://github.com/digantamisra98/Mish
    method forward (line 11) | def forward(self, x):
  class DWT (line 17) | class DWT(nn.Module):
    method __init__ (line 18) | def __init__(self):
    method forward (line 22) | def forward(self, x):
    method forward (line 29) | def forward(self, x):
  class DWT (line 28) | class DWT(nn.Module):
    method __init__ (line 18) | def __init__(self):
    method forward (line 22) | def forward(self, x):
    method forward (line 29) | def forward(self, x):
  class Reorg (line 33) | class Reorg(nn.Module):
    method forward (line 34) | def forward(self, x):
  function make_divisible (line 38) | def make_divisible(v, divisor):
  class Flatten (line 44) | class Flatten(nn.Module):
    method forward (line 46) | def forward(self, x):
  class Concat (line 50) | class Concat(nn.Module):
    method __init__ (line 52) | def __init__(self, dimension=1):
    method forward (line 56) | def forward(self, x):
  class FeatureConcat (line 60) | class FeatureConcat(nn.Module):
    method __init__ (line 61) | def __init__(self, layers):
    method forward (line 66) | def forward(self, x, outputs):
  class FeatureConcat2 (line 70) | class FeatureConcat2(nn.Module):
    method __init__ (line 71) | def __init__(self, layers):
    method forward (line 76) | def forward(self, x, outputs):
  class FeatureConcat3 (line 80) | class FeatureConcat3(nn.Module):
    method __init__ (line 81) | def __init__(self, layers):
    method forward (line 86) | def forward(self, x, outputs):
  class FeatureConcat_l (line 90) | class FeatureConcat_l(nn.Module):
    method __init__ (line 91) | def __init__(self, layers):
    method forward (line 96) | def forward(self, x, outputs):
  class WeightedFeatureFusion (line 100) | class WeightedFeatureFusion(nn.Module):  # weighted sum of 2 or more lay...
    method __init__ (line 101) | def __init__(self, layers, weight=False):
    method forward (line 109) | def forward(self, x, outputs):
  class MixConv2d (line 132) | class MixConv2d(nn.Module):  # MixConv: Mixed Depthwise Convolutional Ke...
    method __init__ (line 133) | def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, b...
    method forward (line 156) | def forward(self, x):
  class SwishImplementation (line 161) | class SwishImplementation(torch.autograd.Function):
    method forward (line 163) | def forward(ctx, x):
    method backward (line 168) | def backward(ctx, grad_output):
  class MishImplementation (line 174) | class MishImplementation(torch.autograd.Function):
    method forward (line 176) | def forward(ctx, x):
    method backward (line 181) | def backward(ctx, grad_output):
  class MemoryEfficientSwish (line 188) | class MemoryEfficientSwish(nn.Module):
    method forward (line 189) | def forward(self, x):
  class MemoryEfficientMish (line 193) | class MemoryEfficientMish(nn.Module):
    method forward (line 194) | def forward(self, x):
  class Swish (line 198) | class Swish(nn.Module):
    method forward (line 199) | def forward(self, x):
  class HardSwish (line 203) | class HardSwish(nn.Module):  # https://arxiv.org/pdf/1905.02244.pdf
    method forward (line 204) | def forward(self, x):
  class DeformConv2d (line 208) | class DeformConv2d(nn.Module):
    method __init__ (line 209) | def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, bias...
    method _set_lr (line 232) | def _set_lr(module, grad_input, grad_output):
    method forward (line 236) | def forward(self, x):
    method _get_p_n (line 294) | def _get_p_n(self, N, dtype):
    method _get_p_0 (line 304) | def _get_p_0(self, h, w, N, dtype):
    method _get_p (line 314) | def _get_p(self, offset, dtype):
    method _get_x_q (line 324) | def _get_x_q(self, x, q, N):
    method _reshape_x_offset (line 341) | def _reshape_x_offset(x_offset, ks):
  class GAP (line 349) | class GAP(nn.Module):
    method __init__ (line 350) | def __init__(self):
    method forward (line 353) | def forward(self, x):
  class Silence (line 358) | class Silence(nn.Module):
    method __init__ (line 359) | def __init__(self):
    method forward (line 361) | def forward(self, x):
  class ScaleChannel (line 365) | class ScaleChannel(nn.Module):  # weighted sum of 2 or more layers https...
    method __init__ (line 366) | def __init__(self, layers):
    method forward (line 370) | def forward(self, x, outputs):
  class ShiftChannel (line 375) | class ShiftChannel(nn.Module):  # weighted sum of 2 or more layers https...
    method __init__ (line 376) | def __init__(self, layers):
    method forward (line 380) | def forward(self, x, outputs):
  class ShiftChannel2D (line 385) | class ShiftChannel2D(nn.Module):  # weighted sum of 2 or more layers htt...
    method __init__ (line 386) | def __init__(self, layers):
    method forward (line 390) | def forward(self, x, outputs):
  class ControlChannel (line 395) | class ControlChannel(nn.Module):  # weighted sum of 2 or more layers htt...
    method __init__ (line 396) | def __init__(self, layers):
    method forward (line 400) | def forward(self, x, outputs):
  class ControlChannel2D (line 405) | class ControlChannel2D(nn.Module):  # weighted sum of 2 or more layers h...
    method __init__ (line 406) | def __init__(self, layers):
    method forward (line 410) | def forward(self, x, outputs):
  class AlternateChannel (line 415) | class AlternateChannel(nn.Module):  # weighted sum of 2 or more layers h...
    method __init__ (line 416) | def __init__(self, layers):
    method forward (line 420) | def forward(self, x, outputs):
  class AlternateChannel2D (line 425) | class AlternateChannel2D(nn.Module):  # weighted sum of 2 or more layers...
    method __init__ (line 426) | def __init__(self, layers):
    method forward (line 430) | def forward(self, x, outputs):
  class SelectChannel (line 435) | class SelectChannel(nn.Module):  # weighted sum of 2 or more layers http...
    method __init__ (line 436) | def __init__(self, layers):
    method forward (line 440) | def forward(self, x, outputs):
  class SelectChannel2D (line 445) | class SelectChannel2D(nn.Module):  # weighted sum of 2 or more layers ht...
    method __init__ (line 446) | def __init__(self, layers):
    method forward (line 450) | def forward(self, x, outputs):
  class ScaleSpatial (line 455) | class ScaleSpatial(nn.Module):  # weighted sum of 2 or more layers https...
    method __init__ (line 456) | def __init__(self, layers):
    method forward (line 460) | def forward(self, x, outputs):
  class ImplicitA (line 465) | class ImplicitA(nn.Module):
    method __init__ (line 466) | def __init__(self, channel):
    method forward (line 472) | def forward(self):
  class ImplicitC (line 476) | class ImplicitC(nn.Module):
    method __init__ (line 477) | def __init__(self, channel):
    method forward (line 483) | def forward(self):
  class ImplicitM (line 487) | class ImplicitM(nn.Module):
    method __init__ (line 488) | def __init__(self, channel):
    method forward (line 494) | def forward(self):
  class Implicit2DA (line 499) | class Implicit2DA(nn.Module):
    method __init__ (line 500) | def __init__(self, atom, channel):
    method forward (line 506) | def forward(self):
  class Implicit2DC (line 510) | class Implicit2DC(nn.Module):
    method __init__ (line 511) | def __init__(self, atom, channel):
    method forward (line 517) | def forward(self):
  class Implicit2DM (line 521) | class Implicit2DM(nn.Module):
    method __init__ (line 522) | def __init__(self, atom, channel):
    method forward (line 528) | def forward(self):

FILE: asone/detectors/yolor/utils/loss.py
  function smooth_BCE (line 10) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class BCEBlurWithLogitsLoss (line 15) | class BCEBlurWithLogitsLoss(nn.Module):
    method __init__ (line 17) | def __init__(self, alpha=0.05):
    method forward (line 22) | def forward(self, pred, true):
  class FocalLoss (line 32) | class FocalLoss(nn.Module):
    method __init__ (line 34) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 42) | def forward(self, pred, true):
  function compute_loss (line 62) | def compute_loss(p, targets, model):  # predictions, targets, model
  function build_targets (line 127) | def build_targets(p, targets, model):

FILE: asone/detectors/yolor/utils/metrics.py
  function fitness (line 7) | def fitness(x):
  function fitness_p (line 13) | def fitness_p(x):
  function fitness_r (line 19) | def fitness_r(x):
  function fitness_ap50 (line 25) | def fitness_ap50(x):
  function fitness_ap (line 31) | def fitness_ap(x):
  function fitness_f (line 37) | def fitness_f(x):
  function ap_per_class (line 43) | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='prec...
  function compute_ap (line 114) | def compute_ap(recall, precision):

FILE: asone/detectors/yolor/utils/parse_config.py
  function parse_model_cfg (line 6) | def parse_model_cfg(path):
  function parse_data_cfg (line 55) | def parse_data_cfg(path):

FILE: asone/detectors/yolor/utils/plots.py
  function color_list (line 26) | def color_list():
  function hist2d (line 34) | def hist2d(x, y, n=100):
  function butter_lowpass_filtfilt (line 43) | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  function plot_one_box (line 54) | def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  function plot_wh_methods (line 68) | def plot_wh_methods():  # from utils.general import *; plot_wh_methods()
  function output_to_target (line 89) | def output_to_target(output, width, height):
  function plot_images (line 111) | def plot_images(images, targets, paths=None, fname='images.jpg', names=N...
  function plot_lr_scheduler (line 186) | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  function plot_test_txt (line 203) | def plot_test_txt():  # from utils.general import *; plot_test()
  function plot_targets_txt (line 220) | def plot_targets_txt():  # from utils.general import *; plot_targets_txt()
  function plot_study_txt (line 233) | def plot_study_txt(f='study.txt', x=None):  # from utils.general import ...
  function plot_labels (line 265) | def plot_labels(labels, save_dir=''):
  function plot_evolution (line 297) | def plot_evolution(yaml_file='data/hyp.finetune.yaml'):  # from utils.ge...
  function plot_results_overlay (line 321) | def plot_results_overlay(start=0, stop=0):  # from utils.general import ...
  function plot_results (line 344) | def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=...

FILE: asone/detectors/yolor/utils/torch_utils.py
  function torch_distributed_zero_first (line 20) | def torch_distributed_zero_first(local_rank: int):
  function init_torch_seeds (line 31) | def init_torch_seeds(seed=0):
  function select_device (line 42) | def select_device(device='', batch_size=None):
  function time_synchronized (line 68) | def time_synchronized():
  function is_parallel (line 73) | def is_parallel(model):
  function intersect_dicts (line 77) | def intersect_dicts(da, db, exclude=()):
  function initialize_weights (line 82) | def initialize_weights(model):
  function find_modules (line 94) | def find_modules(model, mclass=nn.Conv2d):
  function sparsity (line 99) | def sparsity(model):
  function prune (line 108) | def prune(model, amount=0.3):
  function fuse_conv_and_bn (line 119) | def fuse_conv_and_bn(conv, bn):
  function model_info (line 142) | def model_info(model, verbose=False, img_size=640):
  function load_classifier (line 164) | def load_classifier(name='resnet101', n=2):
  function scale_img (line 183) | def scale_img(img, ratio=1.0, same_shape=False):  # img(16,3,256,416), r...
  function copy_attr (line 197) | def copy_attr(a, b, include=(), exclude=()):
  class ModelEMA (line 206) | class ModelEMA:
    method __init__ (line 216) | def __init__(self, model, decay=0.9999, updates=0):
    method update (line 226) | def update(self, model):
    method update_attr (line 238) | def update_attr(self, model, include=(), exclude=('process_group', 're...

FILE: asone/detectors/yolor/utils/yolor_utils.py
  function box_area (line 14) | def box_area(box):
  function box_iou (line 19) | def box_iou(box1, box2, eps=1e-7):
  function xywh2xyxy (line 39) | def xywh2xyxy(x):
  function non_max_suppression (line 48) | def non_max_suppression(prediction,
  function letterbox (line 148) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
  function scale_coords (line 180) | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  function clip_coords (line 196) | def clip_coords(boxes, shape):

FILE: asone/detectors/yolor/yolor_detector.py
  class YOLOrDetector (line 18) | class YOLOrDetector:
    method __init__ (line 19) | def __init__(self,
    method load_model (line 40) | def load_model(self, use_cuda, weights, cfg, img_size, fp16=False):
    method image_preprocessing (line 60) | def image_preprocessing(self,
    method detect (line 73) | def detect(self, image: list,

FILE: asone/detectors/yolov5/yolov5/export.py
  class iOSModel (line 93) | class iOSModel(torch.nn.Module):
    method __init__ (line 94) | def __init__(self, model, im):
    method forward (line 107) | def forward(self, x):
  function export_formats (line 113) | def export_formats():
  function try_export (line 132) | def try_export(inner_func):
  function export_torchscript (line 151) | def export_torchscript(model, im, file, optimize, prefix=colorstr("Torch...
  function export_onnx (line 169) | def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colors...
  function export_openvino (line 226) | def export_openvino(file, metadata, half, int8, data, prefix=colorstr("O...
  function export_paddle (line 282) | def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePadd...
  function export_coreml (line 299) | def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("Cor...
  function export_engine (line 324) | def export_engine(model, im, file, half, dynamic, simplify, workspace=4,...
  function export_saved_model (line 390) | def export_saved_model(
  function export_pb (line 453) | def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
  function export_tflite (line 470) | def export_tflite(
  function export_edgetpu (line 506) | def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
  function export_tfjs (line 548) | def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
  function add_tflite_metadata (line 585) | def add_tflite_metadata(file, metadata, num_outputs):
  function pipeline_coreml (line 622) | def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML P...
  function run (line 756) | def run(
  function parse_opt (line 892) | def parse_opt(known=False):
  function main (line 928) | def main(opt):

FILE: asone/detectors/yolov5/yolov5/models/common.py
  function autopad (line 28) | def autopad(k, p=None):  # kernel, padding
  class Conv (line 35) | class Conv(nn.Module):
    method __init__ (line 37) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 43) | def forward(self, x):
    method forward_fuse (line 46) | def forward_fuse(self, x):
  class DWConv (line 50) | class DWConv(Conv):
    method __init__ (line 52) | def __init__(self, c1, c2, k=1, s=1, act=True):  # ch_in, ch_out, kern...
  class DWConvTranspose2d (line 56) | class DWConvTranspose2d(nn.ConvTranspose2d):
    method __init__ (line 58) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):  # ch_in, ch_out, ke...
  class TransformerLayer (line 62) | class TransformerLayer(nn.Module):
    method __init__ (line 64) | def __init__(self, c, num_heads):
    method forward (line 73) | def forward(self, x):
  class TransformerBlock (line 79) | class TransformerBlock(nn.Module):
    method __init__ (line 81) | def __init__(self, c1, c2, num_heads, num_layers):
    method forward (line 90) | def forward(self, x):
  class Bottleneck (line 98) | class Bottleneck(nn.Module):
    method __init__ (line 100) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 107) | def forward(self, x):
  class BottleneckCSP (line 111) | class BottleneckCSP(nn.Module):
    method __init__ (line 113) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 124) | def forward(self, x):
  class CrossConv (line 130) | class CrossConv(nn.Module):
    method __init__ (line 132) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
    method forward (line 140) | def forward(self, x):
  class C3 (line 144) | class C3(nn.Module):
    method __init__ (line 146) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 154) | def forward(self, x):
  class C3x (line 158) | class C3x(C3):
    method __init__ (line 160) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class C3TR (line 166) | class C3TR(C3):
    method __init__ (line 168) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class C3SPP (line 174) | class C3SPP(C3):
    method __init__ (line 176) | def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
  class C3Ghost (line 182) | class C3Ghost(C3):
    method __init__ (line 184) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
  class SPP (line 190) | class SPP(nn.Module):
    method __init__ (line 192) | def __init__(self, c1, c2, k=(5, 9, 13)):
    method forward (line 199) | def forward(self, x):
  class SPPF (line 206) | class SPPF(nn.Module):
    method __init__ (line 208) | def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))
    method forward (line 215) | def forward(self, x):
  class Focus (line 224) | class Focus(nn.Module):
    method __init__ (line 226) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 231) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  class GhostConv (line 236) | class GhostConv(nn.Module):
    method __init__ (line 238) | def __init__(self, c1, c2, k=1, s=1, g=1, act=True):  # ch_in, ch_out,...
    method forward (line 244) | def forward(self, x):
  class GhostBottleneck (line 249) | class GhostBottleneck(nn.Module):
    method __init__ (line 251) | def __init__(self, c1, c2, k=3, s=1):  # ch_in, ch_out, kernel, stride
    method forward (line 261) | def forward(self, x):
  class Contract (line 265) | class Contract(nn.Module):
    method __init__ (line 267) | def __init__(self, gain=2):
    method forward (line 271) | def forward(self, x):
  class Expand (line 279) | class Expand(nn.Module):
    method __init__ (line 281) | def __init__(self, gain=2):
    method forward (line 285) | def forward(self, x):
  class Concat (line 293) | class Concat(nn.Module):
    method __init__ (line 295) | def __init__(self, dimension=1):
    method forward (line 299) | def forward(self, x):
  class DetectMultiBackend (line 303) | class DetectMultiBackend(nn.Module):
    method __init__ (line 305) | def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), d...
    method forward (line 451) | def forward(self, im, augment=False, visualize=False, val=False):
    method warmup (line 517) | def warmup(self, imgsz=(1, 3, 640, 640)):
    method model_type (line 526) | def model_type(p='path/to/model.pt'):
    method _load_metadata (line 538) | def _load_metadata(f='path/to/meta.yaml'):
  class AutoShape (line 545) | class AutoShape(nn.Module):
    method __init__ (line 555) | def __init__(self, model, verbose=True):
    method _apply (line 564) | def _apply(self, fn):
    method forward (line 576) | def forward(self, imgs, size=640, augment=False, profile=False):
  class Detections (line 638) | class Detections:
    method __init__ (line 640) | def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, ...
    method display (line 657) | def display(self, pprint=False, show=False, save=False, crop=False, re...
    method print (line 700) | def print(self):
    method show (line 704) | def show(self, labels=True):
    method save (line 707) | def save(self, labels=True, save_dir='runs/detect/exp'):
    method crop (line 711) | def crop(self, save=True, save_dir='runs/detect/exp'):
    method render (line 715) | def render(self, labels=True):
    method pandas (line 719) | def pandas(self):
    method tolist (line 729) | def tolist(self):
    method __len__ (line 738) | def __len__(self):
    method __str__ (line 741) | def __str__(self):
  class Classify (line 746) | class Classify(nn.Module):
    method __init__ (line 748) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, k...
    method forward (line 754) | def forward(self, x):

FILE: asone/detectors/yolov5/yolov5/models/experimental.py
  class Ensemble (line 12) | class Ensemble(nn.ModuleList):
    method __init__ (line 14) | def __init__(self):
    method forward (line 17) | def forward(self, x, augment=False, profile=False, visualize=False):
  function attempt_load (line 25) | def attempt_load(weights, device=None, inplace=True, fuse=True):

FILE: asone/detectors/yolov5/yolov5/models/general.py
  function is_kaggle (line 55) | def is_kaggle():
  function is_writeable (line 65) | def is_writeable(dir, test=False):
  function set_logging (line 79) | def set_logging(name=None, verbose=VERBOSE):
  function user_config_dir (line 98) | def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
  class Profile (line 114) | class Profile(contextlib.ContextDecorator):
    method __enter__ (line 116) | def __enter__(self):
    method __exit__ (line 119) | def __exit__(self, type, value, traceback):
  class Timeout (line 123) | class Timeout(contextlib.ContextDecorator):
    method __init__ (line 125) | def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors...
    method _timeout_handler (line 130) | def _timeout_handler(self, signum, frame):
    method __enter__ (line 133) | def __enter__(self):
    method __exit__ (line 138) | def __exit__(self, exc_type, exc_val, exc_tb):
  class WorkingDirectory (line 145) | class WorkingDirectory(contextlib.ContextDecorator):
    method __init__ (line 147) | def __init__(self, new_dir):
    method __enter__ (line 151) | def __enter__(self):
    method __exit__ (line 154) | def __exit__(self, exc_type, exc_val, exc_tb):
  function try_except (line 158) | def try_except(func):
  function threaded (line 169) | def threaded(func):
  function methods (line 179) | def methods(instance):
  function print_args (line 184) | def print_args(args: Optional[dict] = None, show_file=True, show_fcn=Fal...
  function init_seeds (line 195) | def init_seeds(seed=0, deterministic=False):
  function intersect_dicts (line 213) | def intersect_dicts(da, db, exclude=()):
  function get_latest_run (line 218) | def get_latest_run(search_dir='.'):
  function is_docker (line 224) | def is_docker() -> bool:
  function is_colab (line 235) | def is_colab():
  function is_pip (line 244) | def is_pip():
  function is_ascii (line 249) | def is_ascii(s=''):
  function is_chinese (line 255) | def is_chinese(s='人工智能'):
  function emojis (line 260) | def emojis(str=''):
  function file_age (line 265) | def file_age(path=__file__):
  function file_date (line 271) | def file_date(path=__file__):
  function file_size (line 277) | def file_size(path):
  function check_online (line 289) | def check_online():
  function git_describe (line 299) | def git_describe(path=ROOT):  # path must be a directory
  function check_git_status (line 310) | def check_git_status(repo='ultralytics/yolov5'):
  function check_python (line 336) | def check_python(minimum='3.7.0'):
  function check_version (line 341) | def check_version(current='0.0.0', minimum='0.0.0', name='version ', pin...
  function check_requirements (line 354) | def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(...
  function check_img_size (line 390) | def check_img_size(imgsz, s=32, floor=0):
  function check_imshow (line 402) | def check_imshow():
  function check_suffix (line 417) | def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
  function check_yaml (line 428) | def check_yaml(file, suffix=('.yaml', '.yml')):
  function check_file (line 433) | def check_file(file, suffix=''):
  function check_font (line 458) | def check_font(font=FONT, progress=False):
  function check_dataset (line 468) | def check_dataset(data, autodownload=True):
  function check_amp (line 528) | def check_amp(model):
  function url2file (line 556) | def url2file(url):
  function download (line 562) | def download(url, dir='.', unzip=True, delete=True, curl=False, threads=...
  function make_divisible (line 608) | def make_divisible(x, divisor):
  function clean_str (line 615) | def clean_str(s):
  function one_cycle (line 620) | def one_cycle(y1=0.0, y2=1.0, steps=100):
  function colorstr (line 625) | def colorstr(*input):
  function labels_to_class_weights (line 651) | def labels_to_class_weights(labels, nc=80):
  function labels_to_image_weights (line 670) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  function coco80_to_coco91_class (line 677) | def coco80_to_coco91_class():  # converts 80-index (val2014) to 91-index...
  function xyxy2xywh (line 689) | def xyxy2xywh(x):
  function xywh2xyxy (line 699) | def xywh2xyxy(x):
  function xywhn2xyxy (line 709) | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  function xyxy2xywhn (line 719) | def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  function xyn2xy (line 731) | def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  function segment2box (line 739) | def segment2box(segment, width=640, height=640):
  function segments2boxes (line 747) | def segments2boxes(segments):
  function resample_segments (line 756) | def resample_segments(segments, n=1000):
  function scale_coords (line 766) | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  function clip_coords (line 782) | def clip_coords(boxes, shape):
  function non_max_suppression (line 794) | def non_max_suppression(prediction,
  function strip_optimizer (line 896) | def strip_optimizer(f='best.pt', s=''):  # from utils.general import *; ...
  function print_mutation (line 912) | def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evol...
  function apply_classifier (line 952) | def apply_classifier(x, model, img, im0):
  function increment_path (line 987) | def increment_path(path, exist_ok=False, sep='', mkdir=False):
  function imread (line 1017) | def imread(path, flags=cv2.IMREAD_COLOR):
  function imwrite (line 1021) | def imwrite(path, im):
  function imshow (line 1029) | def imshow(path, im):

FILE: asone/detectors/yolov5/yolov5/models/tf.py
  class TFBN (line 51) | class TFBN(keras.layers.Layer):
    method __init__ (line 53) | def __init__(self, w=None):
    method call (line 64) | def call(self, inputs):
  class TFPad (line 69) | class TFPad(keras.layers.Layer):
    method __init__ (line 71) | def __init__(self, pad):
    method call (line 84) | def call(self, inputs):
  class TFConv (line 89) | class TFConv(keras.layers.Layer):
    method __init__ (line 91) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 115) | def call(self, inputs):
  class TFDWConv (line 120) | class TFDWConv(keras.layers.Layer):
    method __init__ (line 122) | def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
    method call (line 144) | def call(self, inputs):
  class TFDWConvTranspose2d (line 149) | class TFDWConvTranspose2d(keras.layers.Layer):
    method __init__ (line 151) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
    method call (line 176) | def call(self, inputs):
  class TFFocus (line 181) | class TFFocus(keras.layers.Layer):
    method __init__ (line 183) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 193) | def call(self, inputs):
  class TFBottleneck (line 203) | class TFBottleneck(keras.layers.Layer):
    method __init__ (line 205) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):
    method call (line 218) | def call(self, inputs):
  class TFCrossConv (line 225) | class TFCrossConv(keras.layers.Layer):
    method __init__ (line 227) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
    method call (line 235) | def call(self, inputs):
  class TFConv2d (line 240) | class TFConv2d(keras.layers.Layer):
    method __init__ (line 242) | def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
    method call (line 258) | def call(self, inputs):
  class TFBottleneckCSP (line 263) | class TFBottleneckCSP(keras.layers.Layer):
    method __init__ (line 265) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 282) | def call(self, inputs):
  class TFC3 (line 291) | class TFC3(keras.layers.Layer):
    method __init__ (line 293) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 306) | def call(self, inputs):
  class TFC3x (line 315) | class TFC3x(keras.layers.Layer):
    method __init__ (line 317) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 332) | def call(self, inputs):
  class TFSPP (line 337) | class TFSPP(keras.layers.Layer):
    method __init__ (line 339) | def __init__(self, c1, c2, k=(5, 9, 13), w=None):
    method call (line 347) | def call(self, inputs):
  class TFSPPF (line 353) | class TFSPPF(keras.layers.Layer):
    method __init__ (line 355) | def __init__(self, c1, c2, k=5, w=None):
    method call (line 365) | def call(self, inputs):
  class TFDetect (line 375) | class TFDetect(keras.layers.Layer):
    method __init__ (line 377) | def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):
    method call (line 397) | def call(self, inputs):
    method _make_grid (line 422) | def _make_grid(nx=20, ny=20):
  class TFSegment (line 429) | class TFSegment(TFDetect):
    method __init__ (line 431) | def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(64...
    method call (line 443) | def call(self, x):
  class TFProto (line 452) | class TFProto(keras.layers.Layer):
    method __init__ (line 453) | def __init__(self, c1, c_=256, c2=32, w=None):
    method call (line 463) | def call(self, inputs):
  class TFUpsample (line 468) | class TFUpsample(keras.layers.Layer):
    method __init__ (line 470) | def __init__(self, size, scale_factor, mode, w=None):
    method call (line 485) | def call(self, inputs):
  class TFConcat (line 490) | class TFConcat(keras.layers.Layer):
    method __init__ (line 492) | def __init__(self, dimension=1, w=None):
    method call (line 498) | def call(self, inputs):
  function parse_model (line 503) | def parse_model(d, ch, model, imgsz):
  class TFModel (line 583) | class TFModel:
    method __init__ (line 585) | def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgs...
    method predict (line 605) | def predict(
    method _xywh2xyxy (line 646) | def _xywh2xyxy(xywh):
  class AgnosticNMS (line 654) | class AgnosticNMS(keras.layers.Layer):
    method call (line 656) | def call(self, input, topk_all, iou_thres, conf_thres):
    method _nms (line 666) | def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):
  function activations (line 701) | def activations(act=nn.SiLU):
  function representative_dataset_gen (line 713) | def representative_dataset_gen(dataset, ncalib=100):
  function run (line 726) | def run(
  function parse_opt (line 751) | def parse_opt():
  function main (line 766) | def main(opt):

FILE: asone/detectors/yolov5/yolov5/models/yolo.py
  class Detect (line 44) | class Detect(nn.Module):
    method __init__ (line 49) | def __init__(self, nc=80, anchors=(), ch=(), inplace=True):  # detecti...
    method forward (line 61) | def forward(self, x):
    method _make_grid (line 85) | def _make_grid(self, nx=20, ny=20, i=0):
  class DetectionModel (line 99) | class DetectionModel(nn.Module):
    method __init__ (line 101) | def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None):  ...
    method forward (line 139) | def forward(self, x, augment=False, profile=False, visualize=False):
    method _forward_augment (line 144) | def _forward_augment(self, x):
    method _forward_once (line 158) | def _forward_once(self, x, profile=False, visualize=False):
    method _descale_pred (line 171) | def _descale_pred(self, p, flips, scale, img_size):
    method _clip_augmented (line 188) | def _clip_augmented(self, y):
    method _profile_one_layer (line 199) | def _profile_one_layer(self, m, x, dt):
    method _initialize_biases (line 212) | def _initialize_biases(self, cf=None):  # initialize biases into Detec...
    method _print_biases (line 222) | def _print_biases(self):
    method _print_weights (line 229) | def _print_weights(self):
    method fuse (line 234) | def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers
    method info (line 244) | def info(self, verbose=False, img_size=640):  # print model information
    method _apply (line 247) | def _apply(self, fn):
  function parse_model (line 261) | def parse_model(d, ch):  # model_dict, input_channels(3)

FILE: asone/detectors/yolov5/yolov5/utils/activations.py
  class SiLU (line 9) | class SiLU(nn.Module):
    method forward (line 11) | def forward(x):
  class Hardswish (line 20) | class Hardswish(nn.Module):
    method forward (line 22) | def forward(x):
  class Mish (line 31) | class Mish(nn.Module):
    method forward (line 35) | def forward(x):
  class MemoryEfficientMish (line 40) | class MemoryEfficientMish(nn.Module):
    class F (line 41) | class F(torch.autograd.Function):
      method forward (line 43) | def forward(ctx, x):
      method backward (line 49) | def backward(ctx, grad_output):
    method forward (line 56) | def forward(self, x):
  class FReLU (line 61) | class FReLU(nn.Module):
    method __init__ (line 64) | def __init__(self, c1, k=3):  # ch_in, kernel
    method forward (line 70) | def forward(self, x):
  class AconC (line 79) | class AconC(nn.Module):
    method __init__ (line 87) | def __init__(self, c1):
    method forward (line 94) | def forward(self, x):
  class MetaAconC (line 100) | class MetaAconC(nn.Module):
    method __init__ (line 108) | def __init__(self, c1, k=1, s=1, r=16):
    method forward (line 119) | def forward(self, x):

FILE: asone/detectors/yolov5/yolov5/utils/augmentations.py
  class Albumentations (line 20) | class Albumentations:
    method __init__ (line 22) | def __init__(self, size=640):
    method __call__ (line 49) | def __call__(self, im, labels, p=1.0):
  function normalize (line 57) | def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
  function denormalize (line 66) | def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
  function augment_hsv (line 73) | def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
  function hist_equalize (line 89) | def hist_equalize(im, clahe=True, bgr=False):
  function replicate (line 100) | def replicate(im, labels):
  function letterbox (line 121) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
  function random_perspective (line 154) | def random_perspective(
  function copy_paste (line 244) | def copy_paste(im, labels, segments, p=0.5):
  function cutout (line 270) | def cutout(im, labels, p=0.5):
  function mixup (line 301) | def mixup(im, labels, im2, labels2):
  function box_candidates (line 313) | def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1...
  function classify_albumentations (line 326) | def classify_albumentations(
  function classify_transforms (line 370) | def classify_transforms(size=224):
  class LetterBox (line 377) | class LetterBox:
    method __init__ (line 379) | def __init__(self, size=(640, 640), auto=False, stride=32):
    method __call__ (line 388) | def __call__(self, im):
  class CenterCrop (line 404) | class CenterCrop:
    method __init__ (line 406) | def __init__(self, size=640):
    method __call__ (line 411) | def __call__(self, im):
  class ToTensor (line 423) | class ToTensor:
    method __init__ (line 425) | def __init__(self, half=False):
    method __call__ (line 430) | def __call__(self, im):

FILE: asone/detectors/yolov5/yolov5/utils/dataloaders.py
  function get_hash (line 75) | def get_hash(paths):
  function exif_size (line 83) | def exif_size(img):
  function exif_transpose (line 93) | def exif_transpose(image):
  function seed_worker (line 120) | def seed_worker(worker_id):
  class SmartDistributedSampler (line 133) | class SmartDistributedSampler(distributed.DistributedSampler):
    method __iter__ (line 134) | def __iter__(self):
  function create_dataloader (line 158) | def create_dataloader(
  class InfiniteDataLoader (line 217) | class InfiniteDataLoader(dataloader.DataLoader):
    method __init__ (line 224) | def __init__(self, *args, **kwargs):
    method __len__ (line 232) | def __len__(self):
    method __iter__ (line 236) | def __iter__(self):
  class _RepeatSampler (line 242) | class _RepeatSampler:
    method __init__ (line 250) | def __init__(self, sampler):
    method __iter__ (line 254) | def __iter__(self):
  class LoadScreenshots (line 260) | class LoadScreenshots:
    method __init__ (line 262) | def __init__(self, source, img_size=640, stride=32, auto=True, transfo...
    method __iter__ (line 296) | def __iter__(self):
    method __next__ (line 300) | def __next__(self):
  class LoadImages (line 317) | class LoadImages:
    method __init__ (line 320) | def __init__(self, path, img_size=640, stride=32, auto=True, transform...
    method __iter__ (line 358) | def __iter__(self):
    method __next__ (line 363) | def __next__(self):
    method _new_video (line 404) | def _new_video(self, path):
    method _cv2_rotate (line 414) | def _cv2_rotate(self, im):
    method __len__ (line 424) | def __len__(self):
  class LoadStreams (line 429) | class LoadStreams:
    method __init__ (line 431) | def __init__(self, sources="file.streams", img_size=640, stride=32, au...
    method update (line 479) | def update(self, i, cap, stream):
    method __iter__ (line 495) | def __iter__(self):
    method __next__ (line 500) | def __next__(self):
    method __len__ (line 519) | def __len__(self):
  function img2label_paths (line 524) | def img2label_paths(img_paths):
  class LoadImagesAndLabels (line 532) | class LoadImagesAndLabels(Dataset):
    method __init__ (line 537) | def __init__(
    method check_cache_ram (line 695) | def check_cache_ram(self, safety_margin=0.1, prefix=""):
    method cache_labels (line 714) | def cache_labels(self, path=Path("./labels.cache"), prefix=""):
    method __len__ (line 754) | def __len__(self):
    method __getitem__ (line 764) | def __getitem__(self, index):
    method load_image (line 841) | def load_image(self, i):
    method cache_images_to_disk (line 866) | def cache_images_to_disk(self, i):
    method load_mosaic (line 872) | def load_mosaic(self, index):
    method load_mosaic9 (line 932) | def load_mosaic9(self, index):
    method collate_fn (line 1014) | def collate_fn(batch):
    method collate_fn4 (line 1022) | def collate_fn4(batch):
  function flatten_recursive (line 1051) | def flatten_recursive(path=DATASETS_DIR / "coco128"):
  function extract_boxes (line 1063) | def extract_boxes(path=DATASETS_DIR / "coco128"):
  function autosplit (line 1102) | def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0...
  function verify_image_label (line 1128) | def verify_image_label(args):
  class HUBDatasetStats (line 1180) | class HUBDatasetStats:
    method __init__ (line 1196) | def __init__(self, path="coco128.yaml", autodownload=False):
    method _find_yaml (line 1217) | def _find_yaml(dir):
    method _unzip (line 1229) | def _unzip(self, path):
    method _hub_ops (line 1239) | def _hub_ops(self, f, max_dim=1920):
    method get_json (line 1257) | def get_json(self, save=False, verbose=False):
    method process_images (line 1295) | def process_images(self):
  class ClassificationDataset (line 1311) | class ClassificationDataset(torchvision.datasets.ImageFolder):
    method __init__ (line 1321) | def __init__(self, root, augment, imgsz, cache=False):
    method __getitem__ (line 1332) | def __getitem__(self, i):
  function create_classification_dataloader (line 1350) | def create_classification_dataloader(

FILE: asone/detectors/yolov5/yolov5/utils/downloads.py.py
  function is_url (line 13) | def is_url(url, check=True):
  function gsutil_getsize (line 24) | def gsutil_getsize(url=""):
  function url_getsize (line 34) | def url_getsize(url="https://ultralytics.com/images/bus.jpg"):
  function curl_download (line 40) | def curl_download(url, filename, *, silent: bool = False) -> bool:
  function safe_download (line 60) | def safe_download(file, url, url2=None, min_bytes=1e0, error_msg=""):
  function attempt_download (line 88) | def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"):

FILE: asone/detectors/yolov5/yolov5/utils/general.py
  function is_ascii (line 73) | def is_ascii(s=""):
  function is_chinese (line 79) | def is_chinese(s="人工智能"):
  function is_colab (line 84) | def is_colab():
  function is_jupyter (line 89) | def is_jupyter():
  function is_kaggle (line 103) | def is_kaggle():
  function is_docker (line 108) | def is_docker() -> bool:
  function is_writeable (line 119) | def is_writeable(dir, test=False):
  function set_logging (line 136) | def set_logging(name=LOGGING_NAME, verbose=True):
  function user_config_dir (line 170) | def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"):
  class Profile (line 188) | class Profile(contextlib.ContextDecorator):
    method __init__ (line 190) | def __init__(self, t=0.0, device: torch.device = None):
    method __enter__ (line 196) | def __enter__(self):
    method __exit__ (line 201) | def __exit__(self, type, value, traceback):
    method time (line 206) | def time(self):
  class Timeout (line 213) | class Timeout(contextlib.ContextDecorator):
    method __init__ (line 215) | def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors...
    method _timeout_handler (line 221) | def _timeout_handler(self, signum, frame):
    method __enter__ (line 225) | def __enter__(self):
    method __exit__ (line 231) | def __exit__(self, exc_type, exc_val, exc_tb):
  class WorkingDirectory (line 239) | class WorkingDirectory(contextlib.ContextDecorator):
    method __init__ (line 241) | def __init__(self, new_dir):
    method __enter__ (line 246) | def __enter__(self):
    method __exit__ (line 250) | def __exit__(self, exc_type, exc_val, exc_tb):
  function methods (line 255) | def methods(instance):
  function print_args (line 260) | def print_args(args: Optional[dict] = None, show_file=True, show_func=Fa...
  function init_seeds (line 275) | def init_seeds(seed=0, deterministic=False):
  function intersect_dicts (line 294) | def intersect_dicts(da, db, exclude=()):
  function get_default_args (line 301) | def get_default_args(func):
  function get_latest_run (line 307) | def get_latest_run(search_dir="."):
  function file_age (line 313) | def file_age(path=__file__):
  function file_date (line 319) | def file_date(path=__file__):
  function file_size (line 325) | def file_size(path):
  function check_online (line 337) | def check_online():
  function git_describe (line 354) | def git_describe(path=ROOT):
  function check_git_status (line 369) | def check_git_status(repo="ultralytics/yolov5", branch="master"):
  function check_git_info (line 398) | def check_git_info(path="."):
  function check_python (line 416) | def check_python(minimum="3.8.0"):
  function check_version (line 421) | def check_version(current="0.0.0", minimum="0.0.0", name="version ", pin...
  function check_img_size (line 433) | def check_img_size(imgsz, s=32, floor=0):
  function check_imshow (line 445) | def check_imshow(warn=False):
  function check_suffix (line 461) | def check_suffix(file="yolov5s.pt", suffix=(".pt",), msg=""):
  function check_yaml (line 472) | def check_yaml(file, suffix=(".yaml", ".yml")):
  function check_file (line 477) | def check_file(file, suffix=""):
  function check_font (line 507) | def check_font(font=FONT, progress=False):
  function check_dataset (line 517) | def check_dataset(data, autodownload=True):
  function check_amp (line 583) | def check_amp(model):
  function yaml_load (line 611) | def yaml_load(file="data.yaml"):
  function yaml_save (line 617) | def yaml_save(file="data.yaml", data={}):
  function unzip_file (line 625) | def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX")):
  function url2file (line 637) | def url2file(url):
  function download (line 647) | def download(url, dir=".", unzip=True, delete=True, curl=False, threads=...
  function make_divisible (line 694) | def make_divisible(x, divisor):
  function clean_str (line 701) | def clean_str(s):
  function one_cycle (line 708) | def one_cycle(y1=0.0, y2=1.0, steps=100):
  function colorstr (line 717) | def colorstr(*input):
  function labels_to_class_weights (line 748) | def labels_to_class_weights(labels, nc=80):
  function labels_to_image_weights (line 767) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  function coco80_to_coco91_class (line 774) | def coco80_to_coco91_class():
  function xyxy2xywh (line 868) | def xyxy2xywh(x):
  function xywh2xyxy (line 878) | def xywh2xyxy(x):
  function xywhn2xyxy (line 888) | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  function xyxy2xywhn (line 898) | def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  function xyn2xy (line 910) | def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  function segment2box (line 918) | def segment2box(segment, width=640, height=640):
  function segments2boxes (line 929) | def segments2boxes(segments):
  function resample_segments (line 938) | def resample_segments(segments, n=1000):
  function scale_boxes (line 948) | def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
  function scale_segments (line 964) | def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, nor...
  function clip_boxes (line 983) | def clip_boxes(boxes, shape):
  function clip_segments (line 995) | def clip_segments(segments, shape):
  function non_max_suppression (line 1005) | def non_max_suppression(
  function strip_optimizer (line 1119) | def strip_optimizer(f="best.pt", s=""):
  function print_mutation (line 1140) | def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr...
  function apply_classifier (line 1195) | def apply_classifier(x, model, img, im0):
  function increment_path (line 1230) | def increment_path(path, exist_ok=False, sep="", mkdir=False):
  function imread (line 1265) | def imread(filename, flags=cv2.IMREAD_COLOR):
  function imwrite (line 1272) | def imwrite(filename, img):
  function imshow (line 1281) | def imshow(path, im):

FILE: asone/detectors/yolov5/yolov5/utils/metrics.py
  function fitness (line 15) | def fitness(x):
  function smooth (line 21) | def smooth(y, f=0.05):
  function ap_per_class (line 29) | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir="....
  function compute_ap (line 98) | def compute_ap(recall, precision):
  class ConfusionMatrix (line 126) | class ConfusionMatrix:
    method __init__ (line 128) | def __init__(self, nc, conf=0.25, iou_thres=0.45):
    method process_batch (line 135) | def process_batch(self, detections, labels):
    method tp_fp (line 182) | def tp_fp(self):
    method plot (line 192) | def plot(self, normalize=True, save_dir="", names=()):
    method print (line 224) | def print(self):
  function bbox_iou (line 230) | def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, ...
  function box_iou (line 276) | def box_iou(box1, box2, eps=1e-7):
  function bbox_ioa (line 298) | def bbox_ioa(box1, box2, eps=1e-7):
  function wh_iou (line 324) | def wh_iou(wh1, wh2, eps=1e-7):
  function plot_pr_curve (line 338) | def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()):
  function plot_mc_curve (line 363) | def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabe...

FILE: asone/detectors/yolov5/yolov5/utils/torch_utils.py
  function smart_DDP (line 37) | def smart_DDP(model):
  function torch_distributed_zero_first (line 49) | def torch_distributed_zero_first(local_rank: int):
  function device_count (line 58) | def device_count():
  function select_device (line 68) | def select_device(device='', batch_size=0, newline=True):
  function time_sync (line 104) | def time_sync():
  function profile (line 111) | def profile(input, ops, n=10, device=None):
  function is_parallel (line 155) | def is_parallel(model):
  function de_parallel (line 160) | def de_parallel(model):
  function initialize_weights (line 165) | def initialize_weights(model):
  function find_modules (line 177) | def find_modules(model, mclass=nn.Conv2d):
  function sparsity (line 182) | def sparsity(model):
  function prune (line 191) | def prune(model, amount=0.3):
  function fuse_conv_and_bn (line 202) | def fuse_conv_and_bn(conv, bn):
  function model_info (line 225) | def model_info(model, verbose=False, img_size=640):
  function scale_img (line 250) | def scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,...
  function copy_attr (line 262) | def copy_attr(a, b, include=(), exclude=()):
  function smart_optimizer (line 271) | def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_d...
  class EarlyStopping (line 301) | class EarlyStopping:
    method __init__ (line 303) | def __init__(self, patience=30):
    method __call__ (line 309) | def __call__(self, epoch, fitness):
  class ModelEMA (line 324) | class ModelEMA:
    method __init__ (line 330) | def __init__(self, model, decay=0.9999, tau=2000, updates=0):
    method update (line 340) | def update(self, model):
    method update_attr (line 352) | def update_attr(self, model, include=(), exclude=('process_group', 're...

FILE: asone/detectors/yolov5/yolov5/utils/yolov5_utils.py
  function box_area (line 12) | def box_area(box):
  function box_iou (line 17) | def box_iou(box1, box2, eps=1e-7):
  function xywh2xyxy (line 37) | def xywh2xyxy(x):
  function non_max_suppression (line 46) | def non_max_suppression(prediction,
  function letterbox (line 146) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
  function scale_coords (line 180) | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  function clip_coords (line 196) | def clip_coords(boxes, shape):
  function yolov5_in_syspath (line 208) | def yolov5_in_syspath():

FILE: asone/detectors/yolov5/yolov5_detector.py
  class YOLOv5Detector (line 19) | class YOLOv5Detector:
    method __init__ (line 20) | def __init__(self,
    method load_model (line 37) | def load_model(self, use_cuda, weights, fp16=False):
    method image_preprocessing (line 56) | def image_preprocessing(self,
    method detect (line 69) | def detect(self, image: list,

FILE: asone/detectors/yolov6/yolov6/assigners/anchor_generator.py
  function generate_anchors (line 4) | def generate_anchors(feats, fpn_strides, grid_cell_size=5.0, grid_cell_o...

FILE: asone/detectors/yolov6/yolov6/assigners/assigner_utils.py
  function dist_calculator (line 4) | def dist_calculator(gt_bboxes, anchor_bboxes):
  function select_candidates_in_gts (line 25) | def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
  function select_highest_overlaps (line 46) | def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
  function iou_calculator (line 69) | def iou_calculator(box1, box2, eps=1e-9):

FILE: asone/detectors/yolov6/yolov6/assigners/atss_assigner.py
  class ATSSAssigner (line 7) | class ATSSAssigner(nn.Module):
    method __init__ (line 9) | def __init__(self,
    method forward (line 18) | def forward(self,
    method select_topk_candidates (line 88) | def select_topk_candidates(self,
    method thres_calculator (line 117) | def thres_calculator(self,
    method get_targets (line 138) | def get_targets(self,

FILE: asone/detectors/yolov6/yolov6/assigners/iou2d_calculator.py
  function cast_tensor_type (line 7) | def cast_tensor_type(x, scale=1., dtype=None):
  function fp16_clamp (line 14) | def fp16_clamp(x, min=None, max=None):
  function iou2d_calculator (line 22) | def iou2d_calculator(bboxes1, bboxes2, mode='iou', is_aligned=False, sca...
  function bbox_overlaps (line 63) | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e...

FILE: asone/detectors/yolov6/yolov6/assigners/tal_assigner.py
  class TaskAlignedAssigner (line 6) | class TaskAlignedAssigner(nn.Module):
    method __init__ (line 7) | def __init__(self,
    method forward (line 22) | def forward(self,
    method get_pos_mask (line 75) | def get_pos_mask(self,
    method get_box_metrics (line 95) | def get_box_metrics(self,
    method select_topk_candidates (line 113) | def select_topk_candidates(self,
    method get_targets (line 130) | def get_targets(self,

FILE: asone/detectors/yolov6/yolov6/layers/common.py
  class SiLU (line 14) | class SiLU(nn.Module):
    method forward (line 17) | def forward(x):
  class Conv (line 21) | class Conv(nn.Module):
    method __init__ (line 23) | def __init__(self, in_channels, out_channels, kernel_size, stride, gro...
    method forward (line 38) | def forward(self, x):
    method forward_fuse (line 41) | def forward_fuse(self, x):
  class SimConv (line 45) | class SimConv(nn.Module):
    method __init__ (line 47) | def __init__(self, in_channels, out_channels, kernel_size, stride, gro...
    method forward (line 62) | def forward(self, x):
    method forward_fuse (line 65) | def forward_fuse(self, x):
  class ConvWrapper (line 68) | class ConvWrapper(nn.Module):
    method __init__ (line 70) | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,...
    method forward (line 74) | def forward(self, x):
  class SimConvWrapper (line 78) | class SimConvWrapper(nn.Module):
    method __init__ (line 80) | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,...
    method forward (line 84) | def forward(self, x):
  class SimSPPF (line 88) | class SimSPPF(nn.Module):
    method __init__ (line 90) | def __init__(self, in_channels, out_channels, kernel_size=5):
    method forward (line 97) | def forward(self, x):
  class SPPF (line 106) | class SPPF(nn.Module):
    method __init__ (line 108) | def __init__(self, in_channels, out_channels, kernel_size=5):  # equiv...
    method forward (line 115) | def forward(self, x):
  class Transpose (line 124) | class Transpose(nn.Module):
    method __init__ (line 126) | def __init__(self, in_channels, out_channels, kernel_size=2, stride=2):
    method forward (line 136) | def forward(self, x):
  class Concat (line 140) | class Concat(nn.Module):
    method __init__ (line 141) | def __init__(self, dimension=1):
    method forward (line 145) | def forward(self, x):
  function conv_bn (line 149) | def conv_bn(in_channels, out_channels, kernel_size, stride, padding, gro...
  class RepVGGBlock (line 158) | class RepVGGBlock(nn.Module):
    method __init__ (line 162) | def __init__(self, in_channels, out_channels, kernel_size=3,
    method forward (line 206) | def forward(self, inputs):
    method get_equivalent_kernel_bias (line 218) | def get_equivalent_kernel_bias(self):
    method _pad_1x1_to_3x3_tensor (line 224) | def _pad_1x1_to_3x3_tensor(self, kernel1x1):
    method _fuse_bn_tensor (line 230) | def _fuse_bn_tensor(self, branch):
    method switch_to_deploy (line 258) | def switch_to_deploy(self):
  class RealVGGBlock (line 278) | class RealVGGBlock(nn.Module):
    method __init__ (line 280) | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,...
    method forward (line 293) | def forward(self, inputs):
  class ScaleLayer (line 298) | class ScaleLayer(torch.nn.Module):
    method __init__ (line 300) | def __init__(self, num_features, use_bias=True, scale_init=1.0):
    method forward (line 311) | def forward(self, inputs):
  class LinearAddBlock (line 319) | class LinearAddBlock(nn.Module):
    method __init__ (line 321) | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,...
    method forward (line 341) | def forward(self, inputs):
  class DetectBackend (line 349) | class DetectBackend(nn.Module):
    method __init__ (line 350) | def __init__(self, weights='yolov6s.pt', device=None, dnn=True):
    method forward (line 359) | def forward(self, im, val=False):
  class RepBlock (line 366) | class RepBlock(nn.Module):
    method __init__ (line 370) | def __init__(self, in_channels, out_channels, n=1, block=RepVGGBlock, ...
    method forward (line 380) | def forward(self, x):
  class BottleRep (line 387) | class BottleRep(nn.Module):
    method __init__ (line 389) | def __init__(self, in_channels, out_channels, basic_block=RepVGGBlock,...
    method forward (line 402) | def forward(self, x):
  function autopad (line 409) | def autopad(k, p=None):  # kernel, padding
  class Conv_C3 (line 416) | class Conv_C3(nn.Module):
    method __init__ (line 418) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 423) | def forward(self, x):
    method forward_fuse (line 425) | def forward_fuse(self, x):
  class BepC3 (line 429) | class BepC3(nn.Module):
    method __init__ (line 431) | def __init__(self, in_channels, out_channels, n=1, e=0.5, concat=True,...
    method forward (line 447) | def forward(self, x):
  function get_block (line 454) | def get_block(mode):

FILE: asone/detectors/yolov6/yolov6/layers/dbb_transforms.py
  function transI_fusebn (line 6) | def transI_fusebn(kernel, bn):
  function transII_addbranch (line 12) | def transII_addbranch(kernels, biases):
  function transIII_1x1_kxk (line 16) | def transIII_1x1_kxk(k1, b1, k2, b2, groups):
  function transIV_depthconcat (line 35) | def transIV_depthconcat(kernels, biases):
  function transV_avg (line 39) | def transV_avg(channels, kernel_size, groups):
  function transVI_multiscale (line 47) | def transVI_multiscale(kernel, target_kernel_size):

FILE: asone/detectors/yolov6/yolov6/models/efficientrep.py
  class EfficientRep (line 5) | class EfficientRep(nn.Module):
    method __init__ (line 11) | def __init__(
    method forward (line 95) | def forward(self, x):
  class CSPBepBackbone (line 110) | class CSPBepBackbone(nn.Module):
    method __init__ (line 115) | def __init__(
    method forward (line 208) | def forward(self, x):

FILE: asone/detectors/yolov6/yolov6/models/effidehead.py
  class Detect (line 10) | class Detect(nn.Module):
    method __init__ (line 15) | def __init__(self, num_classes=80, anchors=1, num_layers=3, inplace=Tr...
    method initialize_biases (line 53) | def initialize_biases(self):
    method forward (line 75) | def forward(self, x):
  function build_effidehead_layer (line 137) | def build_effidehead_layer(channels_list, num_anchors, num_classes, reg_...

FILE: asone/detectors/yolov6/yolov6/models/end2end.py
  class ORT_NMS (line 6) | class ORT_NMS(torch.autograd.Function):
    method forward (line 9) | def forward(ctx,
    method symbolic (line 26) | def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_thresho...
  class TRT8_NMS (line 30) | class TRT8_NMS(torch.autograd.Function):
    method forward (line 33) | def forward(
    method symbolic (line 53) | def symbolic(g,
  class TRT7_NMS (line 77) | class TRT7_NMS(torch.autograd.Function):
    method forward (line 80) | def forward(
    method symbolic (line 104) | def symbolic(g,
  class ONNX_ORT (line 140) | class ONNX_ORT(nn.Module):
    method __init__ (line 142) | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_...
    method forward (line 153) | def forward(self, x):
  class ONNX_TRT7 (line 171) | class ONNX_TRT7(nn.Module):
    method __init__ (line 173) | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_...
    method forward (line 192) | def forward(self, x):
  class ONNX_TRT8 (line 216) | class ONNX_TRT8(nn.Module):
    method __init__ (line 218) | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_...
    method forward (line 230) | def forward(self, x):
  class End2End (line 242) | class End2End(nn.Module):
    method __init__ (line 244) | def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.2...
    method forward (line 254) | def forward(self, x):

FILE: asone/detectors/yolov6/yolov6/models/loss.py
  class ComputeLoss (line 15) | class ComputeLoss:
    method __init__ (line 17) | def __init__(self,
    method __call__ (line 51) | def __call__(
    method preprocess (line 119) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 129) | def bbox_decode(self, anchor_points, pred_dist):
  class VarifocalLoss (line 136) | class VarifocalLoss(nn.Module):
    method __init__ (line 137) | def __init__(self):
    method forward (line 140) | def forward(self, pred_score,gt_score, label, alpha=0.75, gamma=2.0):
  class BboxLoss (line 149) | class BboxLoss(nn.Module):
    method __init__ (line 150) | def __init__(self, num_classes, reg_max, use_dfl=False, iou_type='giou'):
    method forward (line 157) | def forward(self, pred_dist, pred_bboxes, anchor_points,
    method _df_loss (line 196) | def _df_loss(self, pred_dist, target):

FILE: asone/detectors/yolov6/yolov6/models/loss_distill.py
  class ComputeLoss (line 15) | class ComputeLoss:
    method __init__ (line 17) | def __init__(self,
    method __call__ (line 59) | def __call__(
    method distill_loss_cls (line 154) | def distill_loss_cls(self, logits_student, logits_teacher, num_classes...
    method distill_loss_cw (line 165) | def distill_loss_cw(self, s_feats, t_feats,  temperature=1):
    method preprocess (line 189) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 199) | def bbox_decode(self, anchor_points, pred_dist):
  class VarifocalLoss (line 206) | class VarifocalLoss(nn.Module):
    method __init__ (line 207) | def __init__(self):
    method forward (line 210) | def forward(self, pred_score,gt_score, label, alpha=0.75, gamma=2.0):
  class BboxLoss (line 219) | class BboxLoss(nn.Module):
    method __init__ (line 220) | def __init__(self, num_classes, reg_max, use_dfl=False, iou_type='giou'):
    method forward (line 227) | def forward(self, pred_dist, pred_bboxes, t_pred_dist, t_pred_bboxes, ...
    method _df_loss (line 274) | def _df_loss(self, pred_dist, target):
    method distill_loss_dfl (line 287) | def distill_loss_dfl(self, logits_student, logits_teacher, temperature...

FILE: asone/detectors/yolov6/yolov6/models/reppan.py
  class RepPANNeck (line 6) | class RepPANNeck(nn.Module):
    method __init__ (line 12) | def __init__(
    method upsample_enable_quant (line 89) | def upsample_enable_quant(self):
    method forward (line 100) | def forward(self, input):
  class CSPRepPANNeck (line 131) | class CSPRepPANNeck(nn.Module):
    method __init__ (line 136) | def __init__(
    method forward (line 218) | def forward(self, input):

FILE: asone/detectors/yolov6/yolov6/models/yolo.py
  class Model (line 14) | class Model(nn.Module):
    method __init__ (line 19) | def __init__(self, config, channels=3, num_classes=None, anchors=None)...
    method forward (line 37) | def forward(self, x):
    method _apply (line 47) | def _apply(self, fn):
  function make_divisible (line 54) | def make_divisible(x, divisor):
  function build_network (line 59) | def build_network(config, channels, num_classes, anchors, num_layers):
  function build_model (line 112) | def build_model(cfg, num_classes, device):

FILE: asone/detectors/yolov6/yolov6/utils/checkpoint.py
  function load_state_dict (line 11) | def load_state_dict(weights, model, map_location=None):
  function load_checkpoint (line 22) | def load_checkpoint(weights, map_location=None, inplace=True, fuse=True):
  function save_checkpoint (line 35) | def save_checkpoint(ckpt, is_best, save_dir, model_name=""):
  function strip_optimizer (line 46) | def strip_optimizer(ckpt_dir, epoch):

FILE: asone/detectors/yolov6/yolov6/utils/events.py
  function set_logging (line 9) | def set_logging(name=None):
  function load_yaml (line 19) | def load_yaml(file_path):
  function save_yaml (line 27) | def save_yaml(data_dict, save_path):
  function write_tblog (line 33) | def write_tblog(tblogger, epoch, results, losses):
  function write_tbimg (line 47) | def write_tbimg(tblogger, imgs, step, type='train'):

FILE: asone/detectors/yolov6/yolov6/utils/figure_iou.py
  class IOUloss (line 7) | class IOUloss:
    method __init__ (line 10) | def __init__(self, box_format='xywh', iou_type='ciou', reduction='none...
    method __call__ (line 23) | def __call__(self, box1, box2):
  function pairwise_bbox_iou (line 103) | def pairwise_bbox_iou(box1, box2, box_format='xywh'):

FILE: asone/detectors/yolov6/yolov6/utils/general.py
  function increment_name (line 9) | def increment_name(path):
  function find_latest_checkpoint (line 23) | def find_latest_checkpoint(search_dir='.'):
  function dist2bbox (line 29) | def dist2bbox(distance, anchor_points, box_format='xyxy'):
  function bbox2dist (line 43) | def bbox2dist(anchor_points, bbox, reg_max):
  function xywh2xyxy (line 52) | def xywh2xyxy(bboxes):
  function box_iou (line 60) | def box_iou(box1, box2):

FILE: asone/detectors/yolov6/yolov6/utils/torch_utils.py
  function torch_distributed_zero_first (line 20) | def torch_distributed_zero_first(local_rank: int):
  function time_sync (line 31) | def time_sync():
  function initialize_weights (line 38) | def initialize_weights(model):
  function fuse_conv_and_bn (line 50) | def fuse_conv_and_bn(conv, bn):
  function fuse_model (line 85) | def fuse_model(model):
  function get_model_info (line 96) | def get_model_info(model, img_size=640):

FILE: asone/detectors/yolov6/yolov6/utils/yolov6_utils.py
  function nms (line 12) | def nms(boxes, scores, iou_threshold):
  function process_output (line 34) | def process_output(output,  img_height, img_width,
  function compute_iou (line 66) | def compute_iou(box, boxes):
  function xywh2xyxy (line 87) | def xywh2xyxy(x):
  function prepare_input (line 96) | def prepare_input(image, input_width, input_height):
  function process_and_scale_boxes (line 109) | def process_and_scale_boxes(predictions, img_height, img_width,
  function load_pytorch (line 126) | def load_pytorch(weights, map_location=None, inplace=True, fuse=False):
  function fuse_model (line 136) | def fuse_model(model):
  function fuse_conv_and_bn (line 144) | def fuse_conv_and_bn(conv, bn):
  function non_max_suppression (line 179) | def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, cla...

FILE: asone/detectors/yolov6/yolov6_detector.py
  class YOLOv6Detector (line 16) | class YOLOv6Detector:
    method __init__ (line 17) | def __init__(self,
    method load_model (line 40) | def load_model(self, use_cuda, weights, fp16=False):
    method ONNXModel_detail (line 56) | def ONNXModel_detail(self, model):
    method ONNXModel_names (line 66) | def ONNXModel_names(self, model):
    method detect (line 76) | def detect(self, image: list,

FILE: asone/detectors/yolov7/yolov7/models/common.py
  function autopad (line 19) | def autopad(k, p=None):  # kernel, padding
  class MP (line 26) | class MP(nn.Module):
    method __init__ (line 27) | def __init__(self, k=2):
    method forward (line 31) | def forward(self, x):
  class SP (line 35) | class SP(nn.Module):
    method __init__ (line 36) | def __init__(self, k=3, s=1):
    method forward (line 40) | def forward(self, x):
  class ReOrg (line 44) | class ReOrg(nn.Module):
    method __init__ (line 45) | def __init__(self):
    method forward (line 48) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  class Concat (line 52) | class Concat(nn.Module):
    method __init__ (line 53) | def __init__(self, dimension=1):
    method forward (line 57) | def forward(self, x):
  class Chuncat (line 61) | class Chuncat(nn.Module):
    method __init__ (line 62) | def __init__(self, dimension=1):
    method forward (line 66) | def forward(self, x):
  class Shortcut (line 76) | class Shortcut(nn.Module):
    method __init__ (line 77) | def __init__(self, dimension=0):
    method forward (line 81) | def forward(self, x):
  class Foldcut (line 85) | class Foldcut(nn.Module):
    method __init__ (line 86) | def __init__(self, dimension=0):
    method forward (line 90) | def forward(self, x):
  class Conv (line 95) | class Conv(nn.Module):
    method __init__ (line 97) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 103) | def forward(self, x):
    method fuseforward (line 106) | def fuseforward(self, x):
  class RobustConv (line 110) | class RobustConv(nn.Module):
    method __init__ (line 112) | def __init__(self, c1, c2, k=7, s=1, p=None, g=1, act=True, layer_scal...
    method forward (line 118) | def forward(self, x):
  class RobustConv2 (line 126) | class RobustConv2(nn.Module):
    method __init__ (line 128) | def __init__(self, c1, c2, k=7, s=4, p=None, g=1, act=True, layer_scal...
    method forward (line 136) | def forward(self, x):
  function DWConv (line 143) | def DWConv(c1, c2, k=1, s=1, act=True):
  class GhostConv (line 148) | class GhostConv(nn.Module):
    method __init__ (line 150) | def __init__(self, c1, c2, k=1, s=1, g=1, act=True):  # ch_in, ch_out,...
    method forward (line 156) | def forward(self, x):
  class Stem (line 161) | class Stem(nn.Module):
    method __init__ (line 163) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 172) | def forward(self, x):
  class DownC (line 177) | class DownC(nn.Module):
    method __init__ (line 179) | def __init__(self, c1, c2, n=1, k=2):
    method forward (line 187) | def forward(self, x):
  class SPP (line 191) | class SPP(nn.Module):
    method __init__ (line 193) | def __init__(self, c1, c2, k=(5, 9, 13)):
    method forward (line 200) | def forward(self, x):
  class Bottleneck (line 205) | class Bottleneck(nn.Module):
    method __init__ (line 207) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 214) | def forward(self, x):
  class Res (line 218) | class Res(nn.Module):
    method __init__ (line 220) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 228) | def forward(self, x):
  class ResX (line 232) | class ResX(Res):
    method __init__ (line 234) | def __init__(self, c1, c2, shortcut=True, g=32, e=0.5):  # ch_in, ch_o...
  class Ghost (line 239) | class Ghost(nn.Module):
    method __init__ (line 241) | def __init__(self, c1, c2, k=3, s=1):  # ch_in, ch_out, kernel, stride
    method forward (line 250) | def forward(self, x):
  class SPPCSPC (line 258) | class SPPCSPC(nn.Module):
    method __init__ (line 260) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 1...
    method forward (line 272) | def forward(self, x):
  class GhostSPPCSPC (line 278) | class GhostSPPCSPC(SPPCSPC):
    method __init__ (line 280) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 1...
  class GhostStem (line 292) | class GhostStem(Stem):
    method __init__ (line 294) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
  class BottleneckCSPA (line 303) | class BottleneckCSPA(nn.Module):
    method __init__ (line 305) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 313) | def forward(self, x):
  class BottleneckCSPB (line 319) | class BottleneckCSPB(nn.Module):
    method __init__ (line 321) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
    method forward (line 329) | def forward(self, x):
  class BottleneckCSPC (line 336) | class BottleneckCSPC(nn.Module):
    method __init__ (line 338) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 347) | def forward(self, x):
  class ResCSPA (line 353) | class ResCSPA(BottleneckCSPA):
    method __init__ (line 355) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class ResCSPB (line 361) | class ResCSPB(BottleneckCSPB):
    method __init__ (line 363) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class ResCSPC (line 369) | class ResCSPC(BottleneckCSPC):
    method __init__ (line 371) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class ResXCSPA (line 377) | class ResXCSPA(ResCSPA):
    method __init__ (line 379) | def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5):  # ch_in,...
  class ResXCSPB (line 385) | class ResXCSPB(ResCSPB):
    method __init__ (line 387) | def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5):  # ch_in,...
  class ResXCSPC (line 393) | class ResXCSPC(ResCSPC):
    method __init__ (line 395) | def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5):  # ch_in,...
  class GhostCSPA (line 401) | class GhostCSPA(BottleneckCSPA):
    method __init__ (line 403) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class GhostCSPB (line 409) | class GhostCSPB(BottleneckCSPB):
    method __init__ (line 411) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class GhostCSPC (line 417) | class GhostCSPC(BottleneckCSPC):
    method __init__ (line 419) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class ImplicitA (line 429) | class ImplicitA(nn.Module):
    method __init__ (line 430) | def __init__(self, channel, mean=0., std=.02):
    method forward (line 438) | def forward(self, x):
  class ImplicitM (line 442) | class ImplicitM(nn.Module):
    method __init__ (line 443) | def __init__(self, channel, mean=0., std=.02):
    method forward (line 451) | def forward(self, x):
  class RepConv (line 459) | class RepConv(nn.Module):
    method __init__ (line 463) | def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=Fal...
    method forward (line 494) | def forward(self, inputs):
    method get_equivalent_kernel_bias (line 505) | def get_equivalent_kernel_bias(self):
    method _pad_1x1_to_3x3_tensor (line 514) | def _pad_1x1_to_3x3_tensor(self, kernel1x1):
    method _fuse_bn_tensor (line 520) | def _fuse_bn_tensor(self, branch):
    method repvgg_convert (line 550) | def repvgg_convert(self):
    method fuse_conv_bn (line 557) | def fuse_conv_bn(self, conv, bn):
    method fuse_repvgg_block (line 580) | def fuse_repvgg_block(self):
  class RepBottleneck (line 642) | class RepBottleneck(Bottleneck):
    method __init__ (line 644) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
  class RepBottleneckCSPA (line 650) | class RepBottleneckCSPA(BottleneckCSPA):
    method __init__ (line 652) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class RepBottleneckCSPB (line 658) | class RepBottleneckCSPB(BottleneckCSPB):
    method __init__ (line 660) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
  class RepBottleneckCSPC (line 666) | class RepBottleneckCSPC(BottleneckCSPC):
    method __init__ (line 668) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class RepRes (line 674) | class RepRes(Res):
    method __init__ (line 676) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
  class RepResCSPA (line 682) | class RepResCSPA(ResCSPA):
    method __init__ (line 684) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class RepResCSPB (line 690) | class RepResCSPB(ResCSPB):
    method __init__ (line 692) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
  class RepResCSPC (line 698) | class RepResCSPC(ResCSPC):
    method __init__ (line 700) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
  class RepResX (line 706) | class RepResX(ResX):
    method __init__ (line 708) | def __init__(self, c1, c2, shortcut=True, g=32, e=0.5):  # ch_in, ch_o...
  class RepResXCSPA (line 714) | class RepResXCSPA(ResXCSPA):
    method __init__ (line 716) | def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5):  # ch_in,...
  class RepResXCSPB (line 722) | class RepResXCSPB(ResXCSPB):
    method __init__ (line 724) | def __init__(self, c1, c2, n=1, shortcut=False, g=32, e=0.5):  # ch_in...
  class RepResXCSPC (line 730) | class RepResXCSPC(ResXCSPC):
    method __init__ (line 732) | def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5):  # ch_in,...
  class TransformerLayer (line 742) | class TransformerLayer(nn.Module):
    method __init__ (line 744) | def __init__(self, c, num_heads):
    method forward (line 753) | def forward(self, x):
  class TransformerBlock (line 759) | class TransformerBlock(nn.Module):
    method __init__ (line 761) | def __init__(self, c1, c2, num_heads, num_layers):
    method forward (line 770) | def forward(self, x):
  class Focus (line 792) | class Focus(nn.Module):
    method __init__ (line 794) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in,...
    method forward (line 799) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  class SPPF (line 804) | class SPPF(nn.Module):
    method __init__ (line 806) | def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))
    method forward (line 813) | def forward(self, x):
  class Contract (line 820) | class Contract(nn.Module):
    method __init__ (line 822) | def __init__(self, gain=2):
    method forward (line 826) | def forward(self, x):
  class Expand (line 834) | class Expand(nn.Module):
    method __init__ (line 836) | def __init__(self, gain=2):
    method forward (line 840) | def forward(self, x):
  class NMS (line 848) | class NMS(nn.Module):
    method __init__ (line 854) | def __init__(self):
    method forward (line 857) | def forward(self, x):
  class autoShape (line 861) | class autoShape(nn.Module):
    method __init__ (line 867) | def __init__(self, model):
    method autoshape (line 871) | def autoshape(self):
    method forward (line 876) | def forward(self, imgs, size=640, augment=False, profile=False):
  class Detections (line 931) | class Detections:
    method __init__ (line 933) | def __init__(self, imgs, pred, files, times=None, names=None, shape=No...
    method display (line 949) | def display(self, pprint=False, show=False, save=False, render=False, ...
    method print (line 973) | def print(self):
    method show (line 977) | def show(self):
    method save (line 980) | def save(self, save_dir='runs/hub/exp'):
    method render (line 985) | def render(self):
    method pandas (line 989) | def pandas(self):
    method tolist (line 999) | def tolist(self):
    method __len__ (line 1007) | def __len__(self):
  class Classify (line 1011) | class Classify(nn.Module):
    method __init__ (line 1013) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, k...
    method forward (line 1019) | def forward(self, x):
  function transI_fusebn (line 1028) | def transI_fusebn(kernel, bn):
  class ConvBN (line 1034) | class ConvBN(nn.Module):
    method __init__ (line 1035) | def __init__(self, in_channels, out_channels, kernel_size,
    method forward (line 1050) | def forward(self, x):
    method switch_to_deploy (line 1056) | def switch_to_deploy(self):
  class OREPA_3x3_RepConv (line 1068) | class OREPA_3x3_RepConv(nn.Module):
    method __init__ (line 1070) | def __init__(self, in_channels, out_channels, kernel_size,
    method fre_init (line 1153) | def fre_init(self):
    method weight_gen (line 1166) | def weight_gen(self):
    method dwsc2full (line 1202) | def dwsc2full(self, weight_dw, weight_pw, groups):
    method forward (line 1214) | def forward(self, inputs):
  class RepConv_OREPA (line 1220) | class RepConv_OREPA(nn.Module):
    method __init__ (line 1222) | def __init__(self, c1, c2, k=3, s=1, padding=1, dilation=1, groups=1, ...
    method forward (line 1259) | def forward(self, inputs):
    method get_custom_L2 (line 1286) | def get_custom_L2(self):
    method get_equivalent_kernel_bias (line 1297) | def get_equivalent_kernel_bias(self):
    method _pad_1x1_to_3x3_tensor (line 1303) | def _pad_1x1_to_3x3_tensor(self, kernel1x1):
    method _fuse_bn_tensor (line 1309) | def _fuse_bn_tensor(self, branch):
    method switch_to_deploy (line 1341) | def switch_to_deploy(self):
  class WindowAttention (line 1363) | class WindowAttention(nn.Module):
    method __init__ (line 1365) | def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scal...
    method forward (line 1399) | def forward(self, x, mask=None):
  class Mlp (line 1433) | class Mlp(nn.Module):
    method __init__ (line 1435) | def __init__(self, in_features, hidden_features=None, out_features=Non...
    method forward (line 1444) | def forward(self, x):
  function window_partition (line 1452) | def window_partition(x, window_size):
  function window_reverse (line 1460) | def window_reverse(windows, window_size, H, W):
  class SwinTransformerLayer (line 1468) | class SwinTransformerLayer(nn.Module):
    method __init__ (line 1470) | def __init__(self, dim, num_heads, window_size=8, shift_size=0,
    method create_mask (line 1495) | def create_mask(self, H, W):
    method forward (line 1517) | def forward(self, x):
  class SwinTransformerBlock (line 1580) | class SwinTransformerBlock(nn.Module):
    method __init__ (line 1581) | def __init__(self, c1, c2, num_heads, num_layers, window_size=8):
    method forward (line 1591) | def forward(self, x):
  class STCSPA (line 1598) | class STCSPA(nn.Module):
    method __init__ (line 1600) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 1610) | def forward(self, x):
  class STCSPB (line 1616) | class STCSPB(nn.Module):
    method __init__ (line 1618) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
    method forward (line 1628) | def forward(self, x):
  class STCSPC (line 1635) | class STCSPC(nn.Module):
    method __init__ (line 1637) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 1648) | def forward(self, x):
  class WindowAttention_v2 (line 1658) | class WindowAttention_v2(nn.Module):
    method __init__ (line 1660) | def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_dr...
    method forward (line 1719) | def forward(self, x, mask=None):
    method extra_repr (line 1760) | def extra_repr(self) -> str:
    method flops (line 1764) | def flops(self, N):
  class Mlp_v2 (line 1777) | class Mlp_v2(nn.Module):
    method __init__ (line 1778) | def __init__(self, in_features, hidden_features=None, out_features=Non...
    method forward (line 1787) | def forward(self, x):
  function window_partition_v2 (line 1796) | def window_partition_v2(x, window_size):
  function window_reverse_v2 (line 1804) | def window_reverse_v2(windows, window_size, H, W):
  class SwinTransformerLayer_v2 (line 1812) | class SwinTransformerLayer_v2(nn.Module):
    method __init__ (line 1814) | def __init__(self, dim, num_heads, window_size=7, shift_size=0,
    method create_mask (line 1841) | def create_mask(self, H, W):
    method forward (line 1863) | def forward(self, x):
    method extra_repr (line 1923) | def extra_repr(self) -> str:
    method flops (line 1927) | def flops(self):
  class SwinTransformer2Block (line 1942) | class SwinTransformer2Block(nn.Module):
    method __init__ (line 1943) | def __init__(self, c1, c2, num_heads, num_layers, window_size=7):
    method forward (line 1953) | def forward(self, x):
  class ST2CSPA (line 1960) | class ST2CSPA(nn.Module):
    method __init__ (line 1962) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 1972) | def forward(self, x):
  class ST2CSPB (line 1978) | class ST2CSPB(nn.Module):
    method __init__ (line 1980) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):  # ch_in,...
    method forward (line 1990) | def forward(self, x):
  class ST2CSPC (line 1997) | class ST2CSPC(nn.Module):
    method __init__ (line 1999) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 2010) | def forward(self, x):

FILE: asone/detectors/yolov7/yolov7/models/experimental.py
  class Ensemble (line 6) | class Ensemble(nn.ModuleList):
    method __init__ (line 8) | def __init__(self):
    method forward (line 11) | def forward(self, x, augment=False):
  function attempt_load (line 20) | def attempt_load(weights, map_location=None):

FILE: asone/detectors/yolov7/yolov7/models/yolo.py
  class Detect (line 21) | class Detect(nn.Module):
    method __init__ (line 28) | def __init__(self, nc=80, anchors=(), ch=()):  # detection layer
    method forward (line 42) | def forward(self, x):
    method _make_grid (line 86) | def _make_grid(nx=20, ny=20):
    method convert (line 90) | def convert(self, z):
  class IDetect (line 103) | class IDetect(nn.Module):
    method __init__ (line 110) | def __init__(self, nc=80, anchors=(), ch=()):  # detection layer
    method forward (line 127) | def forward(self, x):
    method fuseforward (line 151) | def fuseforward(self, x):
    method fuse (line 195) | def fuse(self):
    method _make_grid (line 211) | def _make_grid(nx=20, ny=20):
    method convert (line 215) | def convert(self, z):
  class IKeypoint (line 228) | class IKeypoint(nn.Module):
    method __init__ (line 232) | def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw...
    method forward (line 270) | def forward(self, x):
    method _make_grid (line 336) | def _make_grid(nx=20, ny=20):
  class IAuxDetect (line 341) | class IAuxDetect(nn.Module):
    method __init__ (line 348) | def __init__(self, nc=80, anchors=(), ch=()):  # detection layer
    method forward (line 368) | def forward(self, x):
    method fuseforward (line 405) | def fuseforward(self, x):
    method fuse (line 447) | def fuse(self):
    method _make_grid (line 463) | def _make_grid(nx=20, ny=20):
    method convert (line 467) | def convert(self, z):
  class IBin (line 480) | class IBin(nn.Module):
    method __init__ (line 484) | def __init__(self, nc=80, anchors=(), ch=(), bin_count=21):  # detecti...
    method forward (line 511) | def forward(self, x):
    method _make_grid (line 557) | def _make_grid(nx=20, ny=20):
  class Model (line 562) | class Model(nn.Module):
    method __init__ (line 564) | def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None):
    method forward (line 644) | def forward(self, x, augment=False, profile=False):
    method forward_once (line 666) | def forward_once(self, x, profile=False):
    method _initialize_biases (line 701) | def _initialize_biases(self, cf=None):
    method _initialize_aux_biases (line 714) | def _initialize_aux_biases(self, cf=None):
    method _initialize_biases_bin (line 733) | def _initialize_biases_bin(self, cf=None):
    method _initialize_biases_kpt (line 751) | def _initialize_biases_kpt(self, cf=None):
    method _print_biases (line 763) | def _print_biases(self):
    method fuse (line 775) | def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers
    method nms (line 794) | def nms(self, mode=True):  # add or remove NMS module
    method autoshape (line 808) | def autoshape(self):  # add autoShape module
    method info (line 815) | def info(self, verbose=False, img_size=640):  # print model information
  function parse_model (line 819) | def parse_model(d, ch):  # model_dict, input_channels(3)

FILE: asone/detectors/yolov7/yolov7/utils/torch_utils.py
  function torch_distributed_zero_first (line 28) | def torch_distributed_zero_first(local_rank: int):
  function init_torch_seeds (line 39) | def init_torch_seeds(seed=0):
  function date_modified (line 48) | def date_modified(path=__file__):
  function git_describe (line 54) | def git_describe(path=Path(__file__).parent):  # path must be a directory
  function select_device (line 63) | def select_device(device='', batch_size=None):
  function time_synchronized (line 89) | def time_synchronized():
  function profile (line 96) | def profile(x, ops, n=100, device=None):
  function is_parallel (line 135) | def is_parallel(model):
  function intersect_dicts (line 139) | def intersect_dicts(da, db, exclude=()):
  function initialize_weights (line 144) | def initialize_weights(model):
  function find_modules (line 156) | def find_modules(model, mclass=nn.Conv2d):
  function sparsity (line 161) | def sparsity(model):
  function prune (line 170) | def prune(model, amount=0.3):
  function fuse_conv_and_bn (line 181) | def fuse_conv_and_bn(conv, bn):
  function model_info (line 204) | def model_info(model, verbose=False, img_size=640):
  function load_classifier (line 228) | def load_classifier(name='resnet101', n=2):
  function scale_img (line 247) | def scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,...
  function copy_attr (line 260) | def copy_attr(a, b, include=(), exclude=()):
  class ModelEMA (line 269) | class ModelEMA:
    method __init__ (line 279) | def __init__(self, model, decay=0.9999, updates=0):
    method update (line 289) | def update(self, model):
    method update_attr (line 301) | def update_attr(self, model, include=(), exclude=('process_group', 're...
  class BatchNormXd (line 306) | class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
    method _check_input_dim (line 307) | def _check_input_dim(self, input):
  function revert_sync_batchnorm (line 318) | def revert_sync_batchnorm(module):
  class TracedModel (line 343) | class TracedModel(nn.Module):
    method __init__ (line 345) | def __init__(self, model=None, device=None, img_size=(640,640)):
    method forward (line 371) | def forward(self, x, augment=False, profile=False):

FILE: asone/detectors/yolov7/yolov7/utils/yolov7_utils.py
  function prepare_input (line 7) | def prepare_input(image, input_shape):
  function process_output (line 20) | def process_output(output, ori_shape, input_shape, conf_threshold, iou_t...
  function rescale_boxes (line 71) | def rescale_boxes(boxes, ori_shape, input_shape):
  function nms (line 81) | def nms(boxes, scores, iou_threshold):
  function compute_iou (line 103) | def compute_iou(box, boxes):
  function xywh2xyxy (line 124) | def xywh2xyxy(x):
  function non_max_suppression (line 133) | def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, cla...

FILE: asone/detectors/yolov7/yolov7_detector.py
  function xywh2xyxy (line 19) | def xywh2xyxy(x):
  class YOLOv7Detector (line 29) | class YOLOv7Detector:
    method __init__ (line 30) | def __init__(self,
    method load_model (line 50) | def load_model(self, use_cuda, weights, fp16=False):
    method detect (line 71) | def detect(self, image: list,

FILE: asone/detectors/yolov8/utils/yolov8_utils.py
  function prepare_input (line 7) | def prepare_input(image, input_shape, stride, pt):
  function process_output (line 16) | def process_output(detections,
  function rescale_boxes (line 46) | def rescale_boxes(boxes, ori_shape, input_shape):

FILE: asone/detectors/yolov8/yolov8_detector.py
  class YOLOv8Detector (line 18) | class YOLOv8Detector:
    method __init__ (line 19) | def __init__(self,
    method load_model (line 39) | def load_model(self, use_cuda, weights, fp16=False):
    method detect (line 64) | def detect(self, image: list,

FILE: asone/detectors/yolov9/export.py
  function export_formats (line 34) | def export_formats():
  function try_export (line 53) | def try_export(inner_func):
  function export_torchscript (line 72) | def export_torchscript(model, im, file, optimize, prefix=colorstr('Torch...
  function export_onnx (line 88) | def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colors...
  function export_onnx_end2end (line 144) | def export_onnx_end2end(model, im, file, simplify, topk_all, iou_thres, ...
  function export_openvino (line 202) | def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
  function export_paddle (line 220) | def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePadd...
  function export_coreml (line 235) | def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
  function export_engine (line 258) | def export_engine(model, im, file, half, dynamic, simplify, workspace=4,...
  function export_saved_model (line 320) | def export_saved_model(model,
  function export_pb (line 372) | def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
  function export_tflite (line 389) | def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, ...
  function export_edgetpu (line 420) | def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
  function export_tfjs (line 445) | def export_tfjs(file, prefix=colorstr('TensorFlow.js:')):
  function add_tflite_metadata (line 473) | def add_tflite_metadata(file, metadata, num_outputs):
  function run (line 507) | def run(
  function parse_opt (line 639) | def parse_opt():
  function main (line 679) | def main(opt):

FILE: asone/detectors/yolov9/yolov9/models/common.py
  function autopad (line 34) | def autopad(k, p=None, d=1):  # kernel, padding, dilation
  class Conv (line 43) | class Conv(nn.Module):
    method __init__ (line 47) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
    method forward (line 53) | def forward(self, x):
    method forward_fuse (line 56) | def forward_fuse(self, x):
  class AConv (line 60) | class AConv(nn.Module):
    method __init__ (line 61) | def __init__(self, c1, c2):  # ch_in, ch_out, shortcut, kernels, group...
    method forward (line 65) | def forward(self, x):
  class ADown (line 70) | class ADown(nn.Module):
    method __init__ (line 71) | def __init__(self, c1, c2):  # ch_in, ch_out, shortcut, kernels, group...
    method forward (line 77) | def forward(self, x):
  class RepConvN (line 86) | class RepConvN(nn.Module):
    method __init__ (line 92) | def __init__(self, c1, c2, k=3, s=1, p=1, g=1, d=1, act=True, bn=False...
    method forward_fuse (line 104) | def forward_fuse(self, x):
    method forward (line 108) | def forward(self, x):
    method get_equivalent_kernel_bias (line 113) | def get_equivalent_kernel_bias(self):
    method _avg_to_3x3_tensor (line 119) | def _avg_to_3x3_tensor(self, avgp):
    method _pad_1x1_to_3x3_tensor (line 128) | def _pad_1x1_to_3x3_tensor(self, kernel1x1):
    method _fuse_bn_tensor (line 134) | def _fuse_bn_tensor(self, branch):
    method fuse_convs (line 161) | def fuse_convs(self):
  class SP (line 187) | class SP(nn.Module):
    method __init__ (line 188) | def __init__(self, k=3, s=1):
    method forward (line 192) | def forward(self, x):
  class MP (line 196) | class MP(nn.Module):
    method __init__ (line 198) | def __init__(self, k=2):
    method forward (line 202) | def forward(self, x):
  class ConvTranspose (line 206) | class ConvTranspose(nn.Module):
    method __init__ (line 210) | def __init__(self, c1, c2, k=2, s=2, p=0, bn=True, act=True):
    method forward (line 216) | def forward(self, x):
  class DWConv (line 220) | class DWConv(Conv):
    method __init__ (line 222) | def __init__(self, c1, c2, k=1, s=1, d=1, act=True):  # ch_in, ch_out,...
  class DWConvTranspose2d (line 226) | class DWConvTranspose2d(nn.ConvTranspose2d):
    method __init__ (line 228) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):  # ch_in, ch_out, ke...
  class DFL (line 232) | class DFL(nn.Module):
    method __init__ (line 234) | def __init__(self, c1=17):
    method forward (line 241) | def forward(self, x):
  class BottleneckBase (line 247) | class BottleneckBase(nn.Module):
    method __init__ (line 249) | def __init__(self, c1, c2, shortcut=True, g=1, k=(1, 3), e=0.5):  # ch...
    method forward (line 256) | def forward(self, x):
  class RBottleneckBase (line 260) | class RBottleneckBase(nn.Module):
    method __init__ (line 262) | def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 1), e=0.5):  # ch...
    method forward (line 269) | def forward(self, x):
  class RepNRBottleneckBase (line 273) | class RepNRBottleneckBase(nn.Module):
    method __init__ (line 275) | def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 1), e=0.5):  # ch...
    method forward (line 282) | def forward(self, x):
  class Bottleneck (line 286) | class Bottleneck(nn.Module):
    method __init__ (line 288) | def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):  # ch...
    method forward (line 295) | def forward(self, x):
  class RepNBottleneck (line 299) | class RepNBottleneck(nn.Module):
    method __init__ (line 301) | def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):  # ch...
    method forward (line 308) | def forward(self, x):
  class Res (line 312) | class Res(nn.Module):
    method __init__ (line 314) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 322) | def forward(self, x):
  class RepNRes (line 326) | class RepNRes(nn.Module):
    method __init__ (line 328) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_ou...
    method forward (line 336) | def forward(self, x):
  class BottleneckCSP (line 340) | class BottleneckCSP(nn.Module):
    method __init__ (line 342) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 353) | def forward(self, x):
  class CSP (line 359) | class CSP(nn.Module):
    method __init__ (line 361) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 369) | def forward(self, x):
  class RepNCSP (line 373) | class RepNCSP(nn.Module):
    method __init__ (line 375) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 383) | def forward(self, x):
  class CSPBase (line 387) | class CSPBase(nn.Module):
    method __init__ (line 389) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ...
    method forward (line 397) | def forward(self, x):
  class SPP (line 401) | class SPP(nn.Module):
    method __init__ (line 403) | def __init__(self, c1, c2, k=(5, 9, 13)):
    method forward (line 410) | def forward(self, x):
  class ASPP (line 417) | class ASPP(torch.nn.Module):
    method __init__ (line 419) | def __init__(self, in_channels, out_channels):
    method forward (line 443) | def forward(self, x):
  class SPPCSPC (line 454) | class SPPCSPC(nn.Module):
    method __init__ (line 456) | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 1...
    method forward (line 468) | def forward(self, x):
  class SPPF (line 475) | class SPPF(nn.Module):
    method __init__ (line 477) | def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))
    method forward (line 485) | def forward(self, x):
  class ReOrg (line 498) | class ReOrg(nn.Module):
    method __init__ (line 500) | def __init__(self):
    method forward (line 503) | def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)
  class Contract (line 507) | class Contract(nn.Module):
    method __init__ (line 509) | def __init__(self, gain=2):
    method forward (line 513) | def forward(self, x):
  class Expand (line 521) | class Expand(nn.Module):
    method __init__ (line 523) | def __init__(self, gain=2):
    method forward (line 527) | def forward(self, x):
  class Concat (line 535) | class Concat(nn.Module):
    method __init__ (line 537) | def __init__(self, dimension=1):
    method forward (line 541) | def forward(self, x):
  class Shortcut (line 545) | class Shortcut(nn.Module):
    method __init__ (line 546) | def __init__(self, dimension=0):
    method forward (line 550) | def forward(self, x):
  class Silence (line 554) | class Silence(nn.Module):
    method __init__ (line 555) | def __init__(self):
    method forward (line 557) | def forward(self, x):
  class SPPELAN (line 563) | class SPPELAN(nn.Module):
    method __init__ (line 565) | def __init__(self, c1, c2, c3):  # ch_in, ch_out, number, shortcut, gr...
    method forward (line 574) | def forward(self, x):
  class RepNCSPELAN4 (line 580) | class RepNCSPELAN4(nn.Module):
    method __init__ (line 582) | def __init__(self, c1, c2, c3, c4, c5=1):  # ch_in, ch_out, number, sh...
    method forward (line 590) | def forward(self, x):
    method forward_split (line 595) | def forward_split(self, x):
  class ImplicitA (line 605) | class ImplicitA(nn.Module):
    method __init__ (line 606) | def __init__(self, channel):
    method forward (line 612) | def forward(self, x):
  class ImplicitM (line 616) | class ImplicitM(nn.Module):
    method __init__ (line 617) | def __init__(self, channel):
    method forward (line 623) | def forward(self, x):
  class CBLinear (line 631) | class CBLinear(nn.Module):
    method __init__ (line 632) | def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):  # ch_in, ch_outs,...
    method forward (line 637) | def forward(self, x):
  class CBFuse (line 641) | class CBFuse(nn.Module):
    method __init__ (line 642) | def __init__(self, idx):
    method forward (line 646) | def forward(self, xs):
  class DetectMultiBackend (line 655) | class DetectMultiBackend(nn.Module):
    method __init__ (line 657) | def __init__(self, weights='yolo.pt', device=torch.device('cpu'), dnn=...
    method forward (line 845) | def forward(self, im, augment=False, visualize=False):
    method from_numpy (line 927) | def from_numpy(self, x):
    method warmup (line 930) | def warmup(self, imgsz=(1, 3, 640, 640)):
    method _model_type (line 939) | def _model_type(p='path/to/model.pt'):
    method _load_metadata (line 954) | def _load_metadata(f=Path('path/to/meta.yaml')):
  class AutoShape (line 962) | class AutoShape(nn.Module):
    method __init__ (line 972) | def __init__(self, model, verbose=True):
    method _apply (line 985) | def _apply(self, fn):
    method forward (line 998) | def forward(self, ims, size=640, augment=False, profile=False):
  class Detections (line 1062) | class Detections:
    method __init__ (line 1064) | def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shap...
    method _run (line 1081) | def _run(self, pprint=False, show=False, save=False, crop=False, rende...
    method show (line 1127) | def show(self, labels=True):
    method save (line 1130) | def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
    method crop (line 1134) | def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
    method render (line 1138) | def render(self, labels=True):
    method pandas (line 1142) | def pandas(self):
    method tolist (line 1152) | def tolist(self):
    method print (line 1161) | def print(self):
    method __len__ (line 1164) | def __len__(self):  # override len(results)
    method __str__ (line 1167) | def __str__(self):  # override print(results)
    method __repr__ (line 1170) | def __repr__(self):
  class Proto (line 1174) | class Proto(nn.Module):
    method __init__ (line 1176) | def __init__(self, c1, c_=256, c2=32):  # ch_in, number of protos, num...
    method forward (line 1183) | def forward(self, x):
  class UConv (line 1187) | class UConv(nn.Module):
    method __init__ (line 1188) | def __init__(self, c1, c_=256, c2=256):  # ch_in, number of protos, nu...
    method forward (line 1195) | def forward(self, x):
  class Classify (line 1199) | class Classify(nn.Module):
    method __init__ (line 1201) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, k...
    method forward (line 1209) | def forward(self, x):

FILE: asone/detectors/yolov9/yolov9/models/experimental.py
  class Sum (line 11) | class Sum(nn.Module):
    method __init__ (line 13) | def __init__(self, n, weight=False):  # n: number of inputs
    method forward (line 20) | def forward(self, x):
  class MixConv2d (line 32) | class MixConv2d(nn.Module):
    method __init__ (line 34) | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):  # ch_in, ch...
    method forward (line 53) | def forward(self, x):
  class Ensemble (line 57) | class Ensemble(nn.ModuleList):
    method __init__ (line 59) | def __init__(self):
    method forward (line 62) | def forward(self, x, augment=False, profile=False, visualize=False):
  class ORT_NMS (line 70) | class ORT_NMS(torch.autograd.Function):
    method forward (line 73) | def forward(ctx,
    method symbolic (line 90) | def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_thresho...
  class TRT_NMS (line 94) | class TRT_NMS(torch.autograd.Function):
    method forward (line 97) | def forward(
    method symbolic (line 118) | def symbolic(g,
  class ONNX_ORT (line 143) | class ONNX_ORT(nn.Module):
    method __init__ (line 145) | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_...
    method forward (line 157) | def forward(self, x):
  class ONNX_TRT (line 185) | class ONNX_TRT(nn.Module):
    method __init__ (line 187) | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_...
    method forward (line 200) | def forward(self, x):
  class End2End (line 220) | class End2End(nn.Module):
    method __init__ (line 222) | def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.2...
    method forward (line 232) | def forward(self, x):
  function attempt_load (line 238) | def attempt_load(weights, device=None, inplace=True, fuse=True):

FILE: asone/detectors/yolov9/yolov9/models/tf.py
  class TFBN (line 26) | class TFBN(keras.layers.Layer):
    method __init__ (line 28) | def __init__(self, w=None):
    method call (line 37) | def call(self, inputs):
  class TFPad (line 41) | class TFPad(keras.layers.Layer):
    method __init__ (line 43) | def __init__(self, pad):
    method call (line 50) | def call(self, inputs):
  class TFConv (line 54) | class TFConv(keras.layers.Layer):
    method __init__ (line 56) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 74) | def call(self, inputs):
  class TFDWConv (line 78) | class TFDWConv(keras.layers.Layer):
    method __init__ (line 80) | def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
    method call (line 96) | def call(self, inputs):
  class TFDWConvTranspose2d (line 100) | class TFDWConvTranspose2d(keras.layers.Layer):
    method __init__ (line 102) | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
    method call (line 119) | def call(self, inputs):
  class TFFocus (line 123) | class TFFocus(keras.layers.Layer):
    method __init__ (line 125) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
    method call (line 130) | def call(self, inputs):  # x(b,w,h,c) -> y(b,w/2,h/2,4c)
  class TFBottleneck (line 136) | class TFBottleneck(keras.layers.Layer):
    method __init__ (line 138) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):  # ch_i...
    method call (line 145) | def call(self, inputs):
  class TFCrossConv (line 149) | class TFCrossConv(keras.layers.Layer):
    method __init__ (line 151) | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
    method call (line 158) | def call(self, inputs):
  class TFConv2d (line 162) | class TFConv2d(keras.layers.Layer):
    method __init__ (line 164) | def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
    method call (line 176) | def call(self, inputs):
  class TFBottleneckCSP (line 180) | class TFBottleneckCSP(keras.layers.Layer):
    method __init__ (line 182) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 194) | def call(self, inputs):
  class TFC3 (line 200) | class TFC3(keras.layers.Layer):
    method __init__ (line 202) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 211) | def call(self, inputs):
  class TFC3x (line 215) | class TFC3x(keras.layers.Layer):
    method __init__ (line 217) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
    method call (line 227) | def call(self, inputs):
  class TFSPP (line 231) | class TFSPP(keras.layers.Layer):
    method __init__ (line 233) | def __init__(self, c1, c2, k=(5, 9, 13), w=None):
    method call (line 240) | def call(self, inputs):
  class TFSPPF (line 245) | class TFSPPF(keras.layers.Layer):
    method __init__ (line 247) | def __init__(self, c1, c2, k=5, w=None):
    method call (line 254) | def call(self, inputs):
  class TFDetect (line 261) | class TFDetect(keras.layers.Layer):
    method __init__ (line 263) | def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None)...
    method call (line 280) | def call(self, inputs):
    method _make_grid (line 304) | def _make_grid(nx=20, ny=20):
  class TFSegment (line 311) | class TFSegment(TFDetect):
    method __init__ (line 313) | def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(64...
    method call (line 322) | def call(self, x):
  class TFProto (line 330) | class TFProto(keras.layers.Layer):
    method __init__ (line 332) | def __init__(self, c1, c_=256, c2=32, w=None):
    method call (line 339) | def call(self, inputs):
  class TFUpsample (line 343) | class TFUpsample(keras.layers.Layer):
    method __init__ (line 345) | def __init__(self, size, scale_factor, mode, w=None):  # warning: all ...
    method call (line 354) | def call(self, inputs):
  class TFConcat (line 358) | class TFConcat(keras.layers.Layer):
    method __init__ (line 360) | def __init__(self, dimension=1, w=None):
    method call (line 365) | def call(self, inputs):
  function parse_model (line 369) | def parse_model(d, ch, model, imgsz):  # model_dict, input_channels(3)
  class TFModel (line 425) | class TFModel:
    method __init__ (line 427) | def __init__(self, cfg='yolo.yaml', ch=3, nc=None, model=None, imgsz=(...
    method predict (line 443) | def predict(self,
    method _xywh2xyxy (line 486) | def _xywh2xyxy(xywh):
  class AgnosticNMS (line 492) | class AgnosticNMS(keras.layers.Layer):
    method call (line 494) | def call(self, input, topk_all, iou_thres, conf_thres):
    method _nms (line 502) | def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):  # agnosti...
  function activations (line 530) | def activations(act=nn.SiLU):
  function representative_dataset_gen (line 542) | def representative_dataset_gen(dataset, ncalib=100):
  function run (line 553) | def run(
  function parse_opt (line 578) | def parse_opt():
  function main (line 590) | def main(opt):

FILE: asone/detectors/yolov9/yolov9/models/yolo.py
  class Detect (line 29) | class Detect(nn.Module):
    method __init__ (line 37) | def __init__(self, nc=80, ch=(), inplace=True):  # detection layer
    method forward (line 53) | def forward(self, x):
    method bias_init (line 68) | def bias_init(self):
  class DDetect (line 78) | class DDetect(nn.Module):
    method __init__ (line 86) | def __init__(self, nc=80, ch=(), inplace=True):  # detection layer
    method forward (line 102) | def forward(self, x):
    method bias_init (line 117) | def bias_init(self):
  class DualDetect (line 127) | class DualDetect(nn.Module):
    method __init__ (line 135) | def __init__(self, nc=80, ch=(), inplace=True):  # detection layer
    method forward (line 157) | def forward(self, x):
    method bias_init (line 177) | def bias_init(self):
  class DualDDetect (line 190) | class DualDDetect(nn.Module):
    method __init__ (line 198) | def __init__(self, nc=80, ch=(), inplace=True):  # detection layer
    method forward (line 220) | def forward(self, x):
    method bias_init (line 246) | def bias_init(self):
  class TripleDetect (line 259) | class TripleDetect(nn.Module):
    method __init__ (line 267) | def __init__(self, nc=80, ch=(), inplace=True):  # detection layer
    method forward (line 295) | def forward(self, x):
    method bias_init (line 319) | def bias_init(self):
  class TripleDDetect (line 335) | class TripleDDetect(nn.Module):
    method __init__ (line 343) | def __init__(self, nc=80, ch=(), inplace=True):  # detection layer
    method forward (line 377) | def forward(self, x):
    method bias_init (line 403) | def bias_init(self):
  class Segment (line 419) | class Segment(Detect):
    method __init__ (line 421) | def __init__(self, nc=80, nm=32, npr=256, ch=(), inplace=True):
    method forward (line 431) | def forward(self, x):
  class Panoptic (line 442) | class Panoptic(Detect):
    method __init__ (line 444) | def __init__(self, nc=80, sem_nc=93, nm=32, npr=256, ch=(), inplace=Tr...
    method forward (line 457) | def forward(self, x):
  class BaseModel (line 469) | class BaseModel(nn.Module):
    method forward (line 471) | def forward(self, x, profile=False, visualize=False):
    method _forward_once (line 474) | def _forward_once(self, x, profile=False, visualize=False):
    method _profile_one_layer (line 487) | def _profile_one_layer(self, m, x, dt):
    method fuse (line 500) | def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers
    method info (line 513) | def info(self, verbose=False, img_size=640):  # print model information
    method _apply (line 516) | def _apply(self, fn):
  class DetectionModel (line 528) | class DetectionModel(BaseModel):
    method __init__ (line 530) | def __init__(self, cfg='yolo.yaml', ch=3, nc=None, anchors=None):  # m...
    method forward (line 579) | def forward(self, x, augment=False, profile=False, visualize=False):
    method _forward_augment (line 584) | def _forward_augment(self, x):
    method _descale_pred (line 598) | def _descale_pred(self, p, flips, scale, img_size):
    method _clip_augmented (line 615) | def _clip_augmented(self, y):
  class SegmentationModel (line 630) | class SegmentationModel(DetectionModel):
    method __init__ (line 632) | def __init__(self, cfg='yolo-seg.yaml', ch=3, nc=None, anchors=None):
  class ClassificationModel (line 636) | class ClassificationModel(BaseModel):
    method __init__ (line 638) | def __init__(self, cfg=None, model=None, nc=1000, cutoff=10):  # yaml,...
    method _from_detection_model (line 642) | def _from_detection_model(self, model, nc=1000, cutoff=10):
    method _from_yaml (line 657) | def _from_yaml(self, cfg):
  function parse_model (line 662) | def parse_model(d, ch):  # model_dict, input_channels(3)

FILE: asone/detectors/yolov9/yolov9/utils/__init__.py
  function emojis (line 6) | def emojis(str=''):
  class TryExcept (line 11) | class TryExcept(contextlib.ContextDecorator):
    method __init__ (line 13) | def __init__(self, msg=''):
    method __enter__ (line 16) | def __enter__(self):
    method __exit__ (line 19) | def __exit__(self, exc_type, value, traceback):
  function threaded (line 25) | def threaded(func):
  function join_threads (line 35) | def join_threads(verbose=False):
  function notebook_init (line 45) | def notebook_init(verbose=True):

FILE: asone/detectors/yolov9/yolov9/utils/activations.py
  class SiLU (line 6) | class SiLU(nn.Module):
    method forward (line 9) | def forward(x):
  class Hardswish (line 13) | class Hardswish(nn.Module):
    method forward (line 16) | def forward(x):
  class Mish (line 21) | class Mish(nn.Module):
    method forward (line 24) | def forward(x):
  class MemoryEfficientMish (line 28) | class MemoryEfficientMish(nn.Module):
    class F (line 30) | class F(torch.autograd.Function):
      method forward (line 33) | def forward(ctx, x):
      method backward (line 38) | def backward(ctx, grad_output):
    method forward (line 44) | def forward(self, x):
  class FReLU (line 48) | class FReLU(nn.Module):
    method __init__ (line 50) | def __init__(self, c1, k=3):  # ch_in, kernel
    method forward (line 55) | def forward(self, x):
  class AconC (line 59) | class AconC(nn.Module):
    method __init__ (line 65) | def __init__(self, c1):
    method forward (line 71) | def forward(self, x):
  class MetaAconC (line 76) | class MetaAconC(nn.Module):
    method __init__ (line 82) | def __init__(self, c1, k=1, s=1, r=16):  # ch_in, kernel, stride, r
    method forward (line 92) | def forward(self, x):

FILE: asone/detectors/yolov9/yolov9/utils/augmentations.py
  class Albumentations (line 17) | class Albumentations:
    method __init__ (line 19) | def __init__(self, size=640):
    method __call__ (line 43) | def __call__(self, im, labels, p=1.0):
  function normalize (line 50) | def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
  function denormalize (line 55) | def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
  function augment_hsv (line 62) | def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
  function hist_equalize (line 78) | def hist_equalize(im, clahe=True, bgr=False):
  function replicate (line 89) | def replicate(im, labels):
  function letterbox (line 106) | def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True...
  function random_perspective (line 139) | def random_perspective(im,
  function copy_paste (line 235) | def copy_paste(im, labels, segments, p=0.5):
  function cutout (line 260) | def cutout(im, labels, p=0.5):
  function mixup (line 287) | def mixup(im, labels, im2, labels2):
  function box_candidates (line 295) | def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1...
  function classify_albumentations (line 303) | def classify_albumentations(
  function classify_transforms (line 345) | def classify_transforms(size=224):
  class LetterBox (line 352) | class LetterBox:
    method __init__ (line 354) | def __init__(self, size=(640, 640), auto=False, stride=32):
    method __call__ (line 360) | def __call__(self, im):  # im = np.array HWC
  class CenterCrop (line 371) | class CenterCrop:
    method __init__ (line 373) | def __init__(self, size=640):
    method __call__ (line 377) | def __call__(self, im):  # im = np.array HWC
  class ToTensor (line 384) | class ToTensor:
    method __init__ (line 386) | def __init__(self, half=False):
    method __call__ (line 390) | def __call__(self, im):  # im = np.array HWC in BGR order

FILE: asone/detectors/yolov9/yolov9/utils/autoanchor.py
  function check_anchor_order (line 14) | def check_anchor_order(m):
  function check_anchors (line 25) | def check_anchors(dataset, model, thr=4.0, imgsz=640):
  function kmean_anchors (line 62) | def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=...

FILE: asone/detectors/yolov9/yolov9/utils/autobatch.py
  function check_train_batch_size (line 10) | def check_train_batch_size(model, imgsz=640, amp=True):
  function autobatch (line 16) | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):

FILE: asone/detectors/yolov9/yolov9/utils/callbacks.py
  class Callbacks (line 4) | class Callbacks:
    method __init__ (line 9) | def __init__(self):
    method register_action (line 33) | def register_action(self, hook, name='', callback=None):
    method get_registered_actions (line 46) | def get_registered_actions(self, hook=None):
    method run (line 55) | def run(self, hook, *args, thread=False, **kwargs):

FILE: asone/detectors/yolov9/yolov9/utils/coco_utils.py
  function getCocoIds (line 53) | def getCocoIds(name = 'semantic'):
  function getMappingId (line 63) | def getMappingId(index, name = 'semantic'):
  function getMappingIndex (line 67) | def getMappingIndex(id, name = 'semantic'):
  function annToRLE (line 72) | def annToRLE(ann, img_size):
  function annToMask (line 89) | def annToMask(ann, img_size):
  function convert_to_polys (line 95) | def convert_to_polys(mask):

FILE: asone/detectors/yolov9/yolov9/utils/dataloaders.py
  function get_hash (line 47) | def get_hash(paths):
  function exif_size (line 55) | def exif_size(img):
  function exif_transpose (line 65) | def exif_transpose(image):
  function seed_worker (line 91) | def seed_worker(worker_id):
  function create_dataloader (line 98) | def create_dataloader(path,
  class InfiniteDataLoader (line 154) | class InfiniteDataLoader(dataloader.DataLoader):
    method __init__ (line 160) | def __init__(self, *args, **kwargs):
    method __len__ (line 165) | def __len__(self):
    method __iter__ (line 168) | def __iter__(self):
  class _RepeatSampler (line 173) | class _RepeatSampler:
    method __init__ (line 180) | def __init__(self, sampler):
    method __iter__ (line 183) | def __iter__(self):
  class LoadScreenshots (line 188) | class LoadScreenshots:
    method __init__ (line 190) | def __init__(self, source, img_size=640, stride=32, auto=True, transfo...
    method __iter__ (line 219) | def __iter__(self):
    method __next__ (line 222) | def __next__(self):
  class LoadImages (line 237) | class LoadImages:
    method __init__ (line 239) | def __init__(self, path, img_size=640, stride=32, auto=True, transform...
    method __iter__ (line 272) | def __iter__(self):
    method __next__ (line 276) | def __next__(self):
    method _new_video (line 316) | def _new_video(self, path):
    method _cv2_rotate (line 324) | def _cv2_rotate(self, im):
    method __len__ (line 334) | def __len__(self):
  class LoadStreams (line 338) | class LoadStreams:
    method __init__ (line 340) | def __init__(self, sources='streams.txt', img_size=640, stride=32, aut...
    method update (line 384) | def update(self, i, cap, stream):
    method __iter__ (line 400) | def __iter__(self):
    method __next__ (line 404) | def __next__(self):
    method __len__ (line 420) | def __len__(self):
  function img2label_paths (line 424) | def img2label_paths(img_paths):
  class LoadImagesAndLabels (line 430) | class LoadImagesAndLabels(Dataset):
    method __init__ (line 435) | def __init__(self,
    method check_cache_ram (line 585) | def check_cache_ram(self, safety_margin=0.1, prefix=''):
    method cache_labels (line 602) | def cache_labels(self, path=Path('./labels.cache'), prefix=''):
    method __len__ (line 640) | def __len__(self):
    method __getitem__ (line 649) | def __getitem__(self, index):
    method load_image (line 723) | def load_image(self, i):
    method cache_images_to_disk (line 740) | def cache_images_to_disk(self, i):
    method load_mosaic (line 746) | def load_mosaic(self, index):
    method load_mosaic9 (line 804) | def load_mosaic9(self, index):
    method collate_fn (line 882) | def collate_fn(batch):
    method collate_fn4 (line 889) | def collate_fn4(batch):
  function flatten_recursive (line 916) | def flatten_recursive(path=DATASETS_DIR / 'coco128'):
  function extract_boxes (line 926) | def extract_boxes(path=DATASETS_DIR / 'coco128'):  # from utils.dataload...
  function autosplit (line 960) | def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0...
  function verify_image_label (line 986) | def verify_image_label(args):
  class HUBDatasetStats (line 1038) | class HUBDatasetStats():
    method __init__ (line 1053) | def __init__(self, path='coco128.yaml', autodownload=False):
    method _find_yaml (line 1072) | def _find_yaml(dir):
    method _unzip (line 1082) | def _unzip(self, path):
    method _hub_ops (line 1092) | def _hub_ops(self, f, max_dim=1920):
    method get_json (line 1110) | def get_json(self, save=False, verbose=False):
    method process_images (line 1145) | def process_images(self):
  class ClassificationDataset (line 1159) | class ClassificationDataset(torchvision.datasets.ImageFolder):
    method __init__ (line 1168) | def __init__(self, root, augment, imgsz, cache=False):
    method __getitem__ (line 1176) | def __getitem__(self, i):
  function create_classification_dataloader (line 1193) | def create_classification_dataloader(path,

FILE: asone/detectors/yolov9/yolov9/utils/downloads.py
  function is_url (line 11) | def is_url(url, check=True):
  function gsutil_getsize (line 22) | def gsutil_getsize(url=''):
  function url_getsize (line 28) | def url_getsize(url='https://ultralytics.com/images/bus.jpg'):
  function safe_download (line 34) | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
  function attempt_download (line 57) | def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'):

FILE: asone/detectors/yolov9/yolov9/utils/general.py
  function is_ascii (line 58) | def is_ascii(s=''):
  function is_chinese (line 64) | def is_chinese(s='人工智能'):
  function is_colab (line 69) | def is_colab():
  function is_notebook (line 74) | def is_notebook():
  function is_kaggle (line 80) | def is_kaggle():
  function is_docker (line 85) | def is_docker() -> bool:
  function is_writeable (line 96) | def is_writeable(dir, test=False):
  function set_logging (line 113) | def set_logging(name=LOGGING_NAME, verbose=True):
  function user_config_dir (line 142) | def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
  class Profile (line 158) | class Profile(contextlib.ContextDecorator):
    method __init__ (line 160) | def __init__(self, t=0.0):
    method __enter__ (line 164) | def __enter__(self):
    method __exit__ (line 168) | def __exit__(self, type, value, traceback):
    method time (line 172) | def time(self):
  class Timeout (line 178) | class Timeout(contextlib.ContextDecorator):
    method __init__ (line 180) | def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors...
    method _timeout_handler (line 185) | def _timeout_handler(self, signum, frame):
    method __enter__ (line 188) | def __enter__(self):
    method __exit__ (line 193) | def __exit__(self, exc_type, exc_val, exc_tb):
  class WorkingDirectory (line 200) | class WorkingDirectory(contextlib.ContextDecorator):
    method __init__ (line 202) | def __init__(self, new_dir):
    method __enter__ (line 206) | def __enter__(self):
    method __exit__ (line 209) | def __exit__(self, exc_type, exc_val, exc_tb):
  function methods (line 213) | def methods(instance):
  function print_args (line 218) | def print_args(args: Optional[dict] = None, show_file=True, show_func=Fa...
  function init_seeds (line 233) | def init_seeds(seed=0, deterministic=False):
  function intersect_dicts (line 248) | def intersect_dicts(da, db, exclude=()):
  function get_default_args (line 253) | def get_default_args(func):
  function get_latest_run (line 259) | def get_latest_run(search_dir='.'):
  function file_age (line 265) | def file_age(path=__file__):
  function file_date (line 271) | def file_date(path=__file__):
  function file_size (line 277) | def file_size(path):
  function check_online (line 289) | def check_online():
  function git_describe (line 304) | def git_describe(path=ROOT):  # path must be a directory
  function check_git_status (line 315) | def check_git_status(repo='WongKinYiu/yolov9', branch='main'):
  function check_git_info (line 342) | def check_git_info(path='.'):
  function check_python (line 359) | def check_python(minimum='3.7.0'):
  function check_version (line 364) | def check_version(current='0.0.0', minimum='0.0.0', name='version ', pin...
  function check_requirements (line 377) | def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(...
  function check_img_size (line 411) | def check_img_size(imgsz, s=32, floor=0):
  function check_imshow (line 423) | def check_imshow(warn=False):
  function check_suffix (line 439) | def check_suffix(file='yolo.pt', suffix=('.pt',), msg=''):
  function check_yaml (line 450) | def check_yaml(file, suffix=('.yaml', '.yml')):
  function check_file (line 455) | def check_file(file, suffix=''):
  function check_font (line 483) | def check_font(font=FONT, progress=False):
  function check_dataset (line 493) | def check_dataset(data, autodownload=True):
  function check_amp (line 559) | def check_amp(model):
  function yaml_load (line 587) | def yaml_load(file='data.yaml'):
  function yaml_save (line 593) | def yaml_save(file='data.yaml', data={}):
  function unzip_file (line 599) | def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):
  function url2file (line 609) | def url2file(url):
  function download (line 615) | def download(url, dir='.', unzip=True, delete=True, curl=False, threads=...
  function make_divisible (line 664) | def make_divisible(x, divisor):
  function clean_str (line 671) | def clean_str(s):
  function one_cycle (line 676) | def one_cycle(y1=0.0, y2=1.0, steps=100):
  function one_flat_cycle (line 681) | def one_flat_cycle(y1=0.0, y2=1.0, steps=100):
  function colorstr (line 687) | def colorstr(*input):
  function labels_to_class_weights (line 713) | def labels_to_class_weights(labels, nc=80):
  function labels_to_image_weights (line 732) | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  function coco80_to_coco91_class (line 739) | def coco80_to_coco91_class():  # converts 80-index (val2014) to 91-index...
  function xyxy2xywh (line 751) | def xyxy2xywh(x):
  function xywh2xyxy (line 761) | def xywh2xyxy(x):
  function xywhn2xyxy (line 771) | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  function xyxy2xywhn (line 781) | def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
  function xyn2xy (line 793) | def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  function segment2box (line 801) | def segment2box(segment, width=640, height=640):
  function segments2boxes (line 809) | def segments2boxes(segments):
  function resample_segments (line 818) | def resample_segments(segments, n=1000):
  function scale_boxes (line 828) | def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
  function scale_segments (line 844) | def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, nor...
  function clip_boxes (line 863) | def clip_boxes(boxes, shape):
  function clip_segments (line 875) | def clip_segments(segments, shape):
  function non_max_suppression (line 885) | def non_max_suppression(
  function strip_optimizer (line 997) | def strip_optimizer(f='best.pt', s=''):  # from utils.general import *; ...
  function print_mutation (line 1013) | def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr...
  function apply_classifier (line 1052) | def apply_classifier(x, model, img, im0):
  function increment_path (line 1087) | def increment_path(path, exist_ok=False, sep='', mkdir=False):
  function imread (line 1117) | def imread(path, flags=cv2.IMREAD_COLOR):
  function imwrite (line 1121) | def imwrite(path, im):
  function imshow (line 1129) | def imshow(path, im):

FILE: asone/detectors/yolov9/yolov9/utils/lion.py
  class Lion (line 6) | class Lion(Optimizer):
    method __init__ (line 9) | def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):
    method step (line 30) | def step(self, closure=None):

FILE: asone/detectors/yolov9/yolov9/utils/loss.py
  function smooth_BCE (line 9) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class BCEBlurWithLogitsLoss (line 14) | class BCEBlurWithLogitsLoss(nn.Module):
    method __init__ (line 16) | def __init__(self, alpha=0.05):
    method forward (line 21) | def forward(self, pred, true):
  class FocalLoss (line 31) | class FocalLoss(nn.Module):
    method __init__ (line 33) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 41) | def forward(self, pred, true):
  class QFocalLoss (line 61) | class QFocalLoss(nn.Module):
    method __init__ (line 63) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 71) | def forward(self, pred, true):
  class ComputeLoss (line 87) | class ComputeLoss:
    method __init__ (line 91) | def __init__(self, model, autobalance=False):
    method __call__ (line 116) | def __call__(self, p, targets):  # predictions, targets
    method build_targets (line 171) | def build_targets(self, p, targets):
  class ComputeLoss_NEW (line 228) | class ComputeLoss_NEW:
    method __init__ (line 232) | def __init__(self, model, autobalance=False):
    method __call__ (line 258) | def __call__(self, p, targets):  # predictions, targets
    method build_targets (line 303) | def build_targets(self, p, targets):

FILE: asone/detectors/yolov9/yolov9/utils/loss_tal.py
  function smooth_BCE (line 14) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class VarifocalLoss (line 19) | class VarifocalLoss(nn.Module):
    method __init__ (line 21) | def __init__(self):
    method forward (line 24) | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
  class FocalLoss (line 32) | class FocalLoss(nn.Module):
    method __init__ (line 34) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 42) | def forward(self, pred, true):
  class BboxLoss (line 62) | class BboxLoss(nn.Module):
    method __init__ (line 63) | def __init__(self, reg_max, use_dfl=False):
    method forward (line 68) | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes...
    method _df_loss (line 94) | def _df_loss(self, pred_dist, target):
  class ComputeLoss (line 106) | class ComputeLoss:
    method __init__ (line 108) | def __init__(self, model, use_dfl=True):
    method preprocess (line 142) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 157) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 165) | def __call__(self, p, targets, img=None, epoch=0):

FILE: asone/detectors/yolov9/yolov9/utils/loss_tal_dual.py
  function smooth_BCE (line 14) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class VarifocalLoss (line 19) | class VarifocalLoss(nn.Module):
    method __init__ (line 21) | def __init__(self):
    method forward (line 24) | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
  class FocalLoss (line 32) | class FocalLoss(nn.Module):
    method __init__ (line 34) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 42) | def forward(self, pred, true):
  class BboxLoss (line 62) | class BboxLoss(nn.Module):
    method __init__ (line 63) | def __init__(self, reg_max, use_dfl=False):
    method forward (line 68) | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes...
    method _df_loss (line 94) | def _df_loss(self, pred_dist, target):
  class ComputeLoss (line 106) | class ComputeLoss:
    method __init__ (line 108) | def __init__(self, model, use_dfl=True):
    method preprocess (line 147) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 162) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 170) | def __call__(self, p, targets, img=None, epoch=0):
  class ComputeLossLH (line 254) | class ComputeLossLH:
    method __init__ (line 256) | def __init__(self, model, use_dfl=True):
    method preprocess (line 290) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 305) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 313) | def __call__(self, p, targets, img=None, epoch=0):

FILE: asone/detectors/yolov9/yolov9/utils/loss_tal_triple.py
  function smooth_BCE (line 14) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class VarifocalLoss (line 19) | class VarifocalLoss(nn.Module):
    method __init__ (line 21) | def __init__(self):
    method forward (line 24) | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
  class FocalLoss (line 32) | class FocalLoss(nn.Module):
    method __init__ (line 34) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 42) | def forward(self, pred, true):
  class BboxLoss (line 62) | class BboxLoss(nn.Module):
    method __init__ (line 63) | def __init__(self, reg_max, use_dfl=False):
    method forward (line 68) | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes...
    method _df_loss (line 94) | def _df_loss(self, pred_dist, target):
  class ComputeLoss (line 106) | class ComputeLoss:
    method __init__ (line 108) | def __init__(self, model, use_dfl=True):
    method preprocess (line 152) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 167) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 175) | def __call__(self, p, targets, img=None, epoch=0):

FILE: asone/detectors/yolov9/yolov9/utils/metrics.py
  function fitness (line 12) | def fitness(x):
  function smooth (line 18) | def smooth(y, f=0.05):
  function ap_per_class (line 26) | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='....
  function compute_ap (line 93) | def compute_ap(recall, precision):
  class ConfusionMatrix (line 121) | class ConfusionMatrix:
    method __init__ (line 123) | def __init__(self, nc, conf=0.25, iou_thres=0.45):
    method process_batch (line 129) | def process_batch(self, detections, labels):
    method matrix (line 175) | def matrix(self):
    method tp_fp (line 178) | def tp_fp(self):
    method plot (line 185) | def plot(self, normalize=True, save_dir='', names=()):
    method print (line 215) | def print(self):
  class WIoU_Scale (line 220) | class WIoU_Scale:
    method __init__ (line 233) | def __init__(self, iou):
    method _update (line 238) | def _update(cls, self):
    method _scaled_loss (line 243) | def _scaled_loss(cls, self, gamma=1.9, delta=3):
  function bbox_iou (line 254) | def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, ...
  function box_iou (line 300) | def box_iou(box1, box2, eps=1e-7):
  function bbox_ioa (line 321) | def bbox_ioa(box1, box2, eps=1e-7):
  function wh_iou (line 343) | def wh_iou(wh1, wh2, eps=1e-7):
  function plot_pr_curve (line 355) | def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):
  function plot_mc_curve (line 378) | def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabe...

FILE: asone/detectors/yolov9/yolov9/utils/plots.py
  class Colors (line 29) | class Colors:
    method __init__ (line 31) | def __init__(self):
    method __call__ (line 38) | def __call__(self, i, bgr=False):
    method hex2rgb (line 43) | def hex2rgb(h):  # rgb order (PIL)
  function check_pil_font (line 50) | def check_pil_font(font=FONT, size=10):
  class Annotator (line 66) | class Annotator:
    method __init__ (line 68) | def __init__(self, im, line_width=None, font_size=None, font='Arial.tt...
    method box_label (line 81) | def box_label(self, box, label='', color=(128, 128, 128), txt_color=(2...
    method masks (line 112) | def masks(self, masks, colors, im_gpu=None, alpha=0.5):
    method rectangle (line 158) | def rectangle(self, xy, fill=None, outline=None, width=1):
    method text (line 162) | def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
    method fromarray (line 169) | def fromarray(self, im):
    method result (line 174) | def result(self):
  function feature_visualization (line 179) | def feature_visualization(x, module_type, stage, n=32, save_dir=Path('ru...
  function hist2d (line 207) | def hist2d(x, y, n=100):
  function butter_lowpass_filtfilt (line 216) | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  function output_to_target (line 229) | def output_to_target(output, max_det=300):
  function plot_images (line 240) | def plot_images(images, targets, paths=None, fname='images.jpg', names=N...
  function plot_lr_scheduler (line 304) | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
  function plot_val_txt (line 321) | def plot_val_txt():  # from utils.plots import *; plot_val()
  function plot_targets_txt (line 338) | def plot_targets_txt():  # from utils.plots import *; plot_targets_txt()
  function plot_val_study (line 351) | def plot_val_study(file='', dir='', x=None):  # from utils.plots import ...
  function plot_labels (line 397) | def plot_labels(labels, names=(), save_dir=Path('')):
  function imshow_cls (line 442) | def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=...
  function plot_evolve (line 471) | def plot_evolve(evolve_csv='path/to/evolve.csv'):  # from utils.plots im...
  function plot_results (line 498) | def plot_results(file='path/to/results.csv', dir=''):
  function profile_idetection (line 524) | def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
  function save_one_box (line 555) | def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, squar...

FILE: asone/detectors/yolov9/yolov9/utils/segment/augmentations.py
  function mixup (line 11) | def mixup(im, labels, segments, im2, labels2, segments2):
  function random_perspective (line 20) | def random_perspective(im,

FILE: asone/detectors/yolov9/yolov9/utils/segment/dataloaders.py
  function create_dataloader (line 18) | def create_dataloader(path,
  class LoadImagesAndLabelsAndMasks (line 78) | class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels):  # for training/...
    method __init__ (line 80) | def __init__(
    method __getitem__ (line 103) | def __getitem__(self, index):
    method load_mosaic (line 204) | def load_mosaic(self, index):
    method collate_fn (line 263) | def collate_fn(batch):
  function polygon2mask (line 271) | def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
  function polygons2masks (line 291) | def polygons2masks(img_size, polygons, color, downsample_ratio=1):
  function polygons2masks_overlap (line 306) | def polygons2masks_overlap(img_size, segments, downsample_ratio=1):

FILE: asone/detectors/yolov9/yolov9/utils/segment/general.py
  function crop_mask (line 7) | def crop_mask(masks, boxes):
  function process_mask_upsample (line 25) | def process_mask_upsample(protos, masks_in, bboxes, shape):
  function process_mask (line 43) | def process_mask(protos, masks_in, bboxes, shape, upsample=False):
  function scale_image (line 70) | def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
  function mask_iou (line 98) | def mask_iou(mask1, mask2, eps=1e-7):
  function masks_iou (line 111) | def masks_iou(mask1, mask2, eps=1e-7):
  function masks2segments (line 124) | def masks2segments(masks, strategy='largest'):

FILE: asone/detectors/yolov9/yolov9/utils/segment/loss.py
  class ComputeLoss (line 12) | class ComputeLoss:
    method __init__ (line 14) | def __init__(self, model, autobalance=False, overlap=False):
    method __call__ (line 44) | def __call__(self, preds, targets, masks):  # predictions, targets, model
    method single_mask_loss (line 112) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
    method build_targets (line 118) | def build_targets(self, p, targets):

FILE: asone/detectors/yolov9/yolov9/utils/segment/loss_tal.py
  function smooth_BCE (line 17) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class VarifocalLoss (line 22) | class VarifocalLoss(nn.Module):
    method __init__ (line 24) | def __init__(self):
    method forward (line 27) | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
  class FocalLoss (line 35) | class FocalLoss(nn.Module):
    method __init__ (line 37) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 45) | def forward(self, pred, true):
  class BboxLoss (line 65) | class BboxLoss(nn.Module):
    method __init__ (line 66) | def __init__(self, reg_max, use_dfl=False):
    method forward (line 71) | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes...
    method _df_loss (line 97) | def _df_loss(self, pred_dist, target):
  class ComputeLoss (line 109) | class ComputeLoss:
    method __init__ (line 111) | def __init__(self, model, use_dfl=True, overlap=True):
    method preprocess (line 147) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 162) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 170) | def __call__(self, p, targets, masks, img=None, epoch=0):
    method single_mask_loss (line 246) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):

FILE: asone/detectors/yolov9/yolov9/utils/segment/loss_tal_dual.py
  function smooth_BCE (line 17) | def smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues...
  class VarifocalLoss (line 22) | class VarifocalLoss(nn.Module):
    method __init__ (line 24) | def __init__(self):
    method forward (line 27) | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
  class FocalLoss (line 35) | class FocalLoss(nn.Module):
    method __init__ (line 37) | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
    method forward (line 45) | def forward(self, pred, true):
  class BboxLoss (line 65) | class BboxLoss(nn.Module):
    method __init__ (line 66) | def __init__(self, reg_max, use_dfl=False):
    method forward (line 71) | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes...
    method _df_loss (line 97) | def _df_loss(self, pred_dist, target):
  class ComputeLoss (line 109) | class ComputeLoss:
    method __init__ (line 111) | def __init__(self, model, use_dfl=True, overlap=True):
    method preprocess (line 152) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 167) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 175) | def __call__(self, p, targets, masks, img=None, epoch=0):
    method single_mask_loss (line 311) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
  class ComputeLossLH (line 326) | class ComputeLossLH:
    method __init__ (line 328) | def __init__(self, model, use_dfl=True, overlap=True):
    method preprocess (line 364) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 379) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 387) | def __call__(self, p, targets, masks, img=None, epoch=0):
    method single_mask_loss (line 513) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
  class ComputeLossLH0 (line 528) | class ComputeLossLH0:
    method __init__ (line 530) | def __init__(self, model, use_dfl=True, overlap=True):
    method preprocess (line 566) | def preprocess(self, targets, batch_size, scale_tensor):
    method bbox_decode (line 581) | def bbox_decode(self, anchor_points, pred_dist):
    method __call__ (line 589) | def __call__(self, p, targets, masks, img=None, epoch=0):
    method single_mask_loss (line 715) | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):

FILE: asone/detectors/yolov9/yolov9/utils/segment/metrics.py
  function fitness (line 6) | def fitness(x):
  function ap_per_class_box_and_mask (line 12) | def ap_per_class_box_and_mask(
  class Metric (line 61) | class Metric:
    method __init__ (line 63) | def __init__(self) -> None:
    method ap50 (line 71) | def ap50(self):
    method ap (line 79) | def ap(self):
    method mp (line 87) | def mp(self):
    method mr (line 95) | def mr(self):
    method map50 (line 103) | def map50(self):
    method map (line 111) | def map(self):
    method mean_results (line 118) | def mean_results(self):
    method class_result (line 122) | def class_result(self, i):
    method get_maps (line 126) | def get_maps(self, nc):
    method update (line 132) | def update(self, results):
  class Metrics (line 145) | class Metrics:
    method __init__ (line 148) | def __init__(self) -> None:
    method update (line 152) | def update(self, results):
    method mean_results (line 160) | def mean_results(self):
    method class_result (line 163) | def class_result(self, i):
    method get_maps (line 166) | def get_maps(self, nc):
    method ap_class_index (line 170) | def ap_class_index(self):

FILE: asone/detectors/yolov9/yolov9/utils/segment/plots.py
  function plot_images_and_masks (line 17) | def plot_images_and_masks(images, targets, masks, paths=None, fname='ima...
  function plot_results_with_masks (line 111) | def plot_results_with_masks(file="path/to/results.csv", dir="", best=True):

FILE: asone/detectors/yolov9/yolov9/utils/segment/tal/anchor_generator.py
  function make_anchors (line 8) | def make_anchors(feats, strides, grid_cell_offset=0.5):
  function dist2bbox (line 23) | def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
  function bbox2dist (line 35) | def bbox2dist(anchor_points, bbox, reg_max):

FILE: asone/detectors/yolov9/yolov9/utils/segment/tal/assigner.py
  function select_candidates_in_gts (line 8) | def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
  function select_highest_overlaps (line 25) | def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
  class TaskAlignedAssigner (line 51) | class TaskAlignedAssigner(nn.Module):
    method __init__ (line 52) | def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1...
    method forward (line 62) | def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bbox...
    method get_pos_mask (line 107) | def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc...
    method get_box_metrics (line 121) | def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
    method select_topk_candidates (line 134) | def select_topk_candidates(self, metrics, largest=True, topk_mask=None):
    method get_targets (line 157) | def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):

FILE: asone/detectors/yolov9/yolov9/utils/tal/anchor_generator.py
  function make_anchors (line 8) | def make_anchors(feats, strides, grid_cell_offset=0.5):
  function dist2bbox (line 23) | def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
  function bbox2dist (line 35) | def bbox2dist(anchor_points, bbox, reg_max):

FILE: asone/detectors/yolov9/yolov9/utils/tal/assigner.py
  function select_candidates_in_gts (line 8) | def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
  function select_highest_overlaps (line 25) | def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
  class TaskAlignedAssigner (line 51) | class TaskAlignedAssigner(nn.Module):
    method __init__ (line 52) | def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1...
    method forward (line 62) | def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bbox...
    method get_pos_mask (line 106) | def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc...
    method get_box_metrics (line 120) | def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
    method select_topk_candidates (line 133) | def select_topk_candidates(self, metrics, largest=True, topk_mask=None):
    method get_targets (line 156) | def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):

FILE: asone/detectors/yolov9/yolov9/utils/torch_utils.py
  function smart_inference_mode (line 34) | def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9...
  function smartCrossEntropyLoss (line 42) | def smartCrossEntropyLoss(label_smoothing=0.0):
  function smart_DDP (line 51) | def smart_DDP(model):
  function reshape_classifier_output (line 62) | def reshape_classifier_output(model, n=1000):
  function torch_distributed_zero_first (line 85) | def torch_distributed_zero_first(local_rank: int):
  function device_count (line 94) | def device_count():
  function select_device (line 104) | def select_device(device='', batch_size=0, newline=True):
  function time_sync (line 140) | def time_sync():
  function profile (line 147) | def profile(input, ops, n=10, device=None):
  function is_parallel (line 198) | def is_parallel(model):
  function de_parallel (line 203) | def de_parallel(model):
  function initialize_weights (line 208) | def initialize_weights(model):
  function find_modules (line 220) | def find_modules(model, mclass=nn.Conv2d):
  function sparsity (line 225) | def sparsity(model):
  function prune (line 234) | def prune(model, amount=0.3):
  function fuse_conv_and_bn (line 244) | def fuse_conv_and_bn(conv, bn):
  function model_info (line 268) | def model_info(model, verbose=False, imgsz=640):
  function scale_img (line 293) | def scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,...
  function copy_attr (line 305) | def copy_attr(a, b, include=(), exclude=()):
  function smart_optimizer (line 314) | def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e...
  function smart_hub_load (line 446) | def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs):
  function smart_resume (line 458) | def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs...
  class EarlyStopping (line 478) | class EarlyStopping:
    method __init__ (line 480) | def __init__(self, patience=30):
    method __call__ (line 486) | def __call__(self, epoch, fitness):
  class ModelEMA (line 501) | class ModelEMA:
    method __init__ (line 507) | def __init__(self, model, decay=0.9999, tau=2000, updates=0):
    method update (line 515) | def update(self, model):
    method update_attr (line 527) | def update_attr(self, model, include=(), exclude=('process_group', 're...

FILE: asone/detectors/yolov9/yolov9/utils/triton.py
  class TritonRemoteModel (line 7) | class TritonRemoteModel:
    method __init__ (line 13) | def __init__(self, url: str):
    method runtime (line 47) | def runtime(self):
    method __call__ (line 51) | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typi...
    method _create_inputs (line 64) | def _create_inputs(self, *args, **kwargs):

FILE: asone/detectors/yolov9/yolov9/utils/yolov9_utils.py
  function prepare_input (line 9) | def prepare_input(image, input_shape):
  function process_output (line 22) | def process_output(output, ori_shape, input_shape, conf_threshold, iou_t...
  function rescale_boxes (line 73) | def rescale_boxes(boxes, ori_shape, input_shape):
  function nms (line 83) | def nms(boxes, scores, iou_threshold):
  function compute_iou (line 105) | def compute_iou(box, boxes):
  function xywh2xyxy (line 126) | def xywh2xyxy(x):
  function non_max_suppression (line 135) | def non_max_suppression(

FILE: asone/detectors/yolov9/yolov9_detector.py
  function xywh2xyxy (line 20) | def xywh2xyxy(x):
  class YOLOv9Detector (line 31) | class YOLOv9Detector:
    method __init__ (line 32) | def __init__(self,
    method load_model (line 52) | def load_model(self, use_cuda, weights, fp16=False):
    method detect (line 73) | def detect(self, image: list,

FILE: asone/detectors/yolox/exps/yolov3.py
  class Exp (line 12) | class Exp(MyExp):
    method __init__ (line 13) | def __init__(self):
    method get_model (line 19) | def get_model(self, sublinear=False):

FILE: asone/detectors/yolox/exps/yolox_l.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: asone/detectors/yolox/exps/yolox_m.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: asone/detectors/yolox/exps/yolox_nano.py
  class Exp (line 12) | class Exp(MyExp):
    method __init__ (line 13) | def __init__(self):
    method get_model (line 25) | def get_model(self, sublinear=False):

FILE: asone/detectors/yolox/exps/yolox_s.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: asone/detectors/yolox/exps/yolox_tiny.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: asone/detectors/yolox/exps/yolox_x.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: asone/detectors/yolox/yolox/core/launch.py
  function _find_free_port (line 24) | def _find_free_port():
  function launch (line 39) | def launch(
  function _distributed_worker (line 101) | def _distributed_worker(

FILE: asone/detectors/yolox/yolox/core/trainer.py
  class Trainer (line 36) | class Trainer:
    method __init__ (line 37) | def __init__(self, exp: Exp, args):
    method train (line 73) | def train(self):
    method train_in_epoch (line 82) | def train_in_epoch(self):
    method train_in_iter (line 88) | def train_in_iter(self):
    method train_one_iter (line 94) | def train_one_iter(self):
    method before_train (line 129) | def before_train(self):
    method after_train (line 194) | def after_train(self):
    method before_epoch (line 202) | def before_epoch(self):
    method after_epoch (line 217) | def after_epoch(self):
    method before_iter (line 224) | def before_iter(self):
    method after_iter (line 227) | def after_iter(self):
    method progress_in_iter (line 289) | def progress_in_iter(self):
    method resume_train (line 292) | def resume_train(self, model):
    method evaluate_and_save_model (line 327) | def evaluate_and_save_model(self):
    method save_ckpt (line 361) | def save_ckpt(self, ckpt_name, update_best_ckpt=False, ap=None):

FILE: asone/detectors/yolox/yolox/data/data_augment.py
  function augment_hsv (line 21) | def augment_hsv(img, hgain=5, sgain=30, vgain=30):
  function get_aug_params (line 34) | def get_aug_params(value, center=0):
  function get_affine_matrix (line 46) | def get_affine_matrix(
  function apply_affine_to_bboxes (line 82) | def apply_affine_to_bboxes(targets, target_size, M, scale):
  function random_affine (line 114) | def random_affine(
  function _mirror (line 134) | def _mirror(image, boxes, prob=0.5):
  function preproc (line 142) | def preproc(img, input_size, swap=(2, 0, 1)):
  class TrainTransform (line 161) | class TrainTransform:
    method __init__ (line 162) | def __init__(self, max_labels=50, flip_prob=0.5, hsv_prob=1.0):
    method __call__ (line 167) | def __call__(self, image, targets, input_dim):
  class ValTransform (line 213) | class ValTransform:
    method __init__ (line 231) | def __init__(self, swap=(2, 0, 1), legacy=False):
    method __call__ (line 236) | def __call__(self, img, res, input_size):

FILE: asone/detectors/yolox/yolox/data/data_prefetcher.py
  class DataPrefetcher (line 8) | class DataPrefetcher:
    method __init__ (line 16) | def __init__(self, loader):
    method preload (line 23) | def preload(self):
    method next (line 35) | def next(self):
    method _input_cuda_for_image (line 46) | def _input_cuda_for_image(self):
    method _record_stream_for_image (line 50) | def _record_stream_for_image(input):

FILE: asone/detectors/yolox/yolox/data/dataloading.py
  function get_yolox_datadir (line 18) | def get_yolox_datadir():
  class DataLoader (line 32) | class DataLoader(torchDataLoader):
    method __init__ (line 40) | def __init__(self, *args, **kwargs):
    method close_mosaic (line 88) | def close_mosaic(self):
  function list_collate (line 92) | def list_collate(batch):
  function worker_init_reset_seed (line 109) | def worker_init_reset_seed(worker_id):

FILE: asone/detectors/yolox/yolox/data/datasets/coco.py
  function remove_useless_info (line 15) | def remove_useless_info(coco):
  class COCODataset (line 34) | class COCODataset(CacheDataset):
    method __init__ (line 39) | def __init__(
    method __len__ (line 86) | def __len__(self):
    method _load_coco_annotations (line 89) | def _load_coco_annotations(self):
    method load_anno_from_ids (line 92) | def load_anno_from_ids(self, id_):
    method load_anno (line 130) | def load_anno(self, index):
    method load_resized_img (line 133) | def load_resized_img(self, index):
    method load_image (line 143) | def load_image(self, index):
    method read_img (line 154) | def read_img(self, index):
    method pull_item (line 157) | def pull_item(self, index):
    method __getitem__ (line 165) | def __getitem__(self, index):

FILE: asone/detectors/yolox/yolox/data/datasets/datasets_wrapper.py
  class ConcatDataset (line 22) | class ConcatDataset(torchConcatDataset):
    method __init__ (line 23) | def __init__(self, datasets):
    method pull_item (line 29) | def pull_item(self, idx):
  class MixConcatDataset (line 44) | class MixConcatDataset(torchConcatDataset):
    method __init__ (line 45) | def __init__(self, datasets):
    method __getitem__ (line 51) | def __getitem__(self, index):
  class Dataset (line 72) | class Dataset(torchDataset):
    method __init__ (line 80) | def __init__(self, input_dimension, mosaic=True):
    method input_dim (line 86) | def input_dim(self):
    method mosaic_getitem (line 100) | def mosaic_getitem(getitem_fn):
  class CacheDataset (line 127) | class CacheDataset(Dataset, metaclass=ABCMeta):
    method __init__ (line 147) | def __init__(
    method __del__ (line 176) | def __del__(self):
    method read_img (line 181) | def read_img(self, index):
    method cache_images (line 190) | def cache_images(
    method cal_cache_occupy (line 262) | def cal_cache_occupy(self, num_imgs):
  function cache_read_img (line 272) | def cache_read_img(use_cache=True):

FILE: asone/detectors/yolox/yolox/data/datasets/mosaicdetection.py
  function get_mosaic_coordinate (line 16) | def get_mosaic_coordinate(mosaic_image, mosaic_index, xc, yc, w, h, inpu...
  class MosaicDetection (line 37) | class MosaicDetection(Dataset):
    method __init__ (line 40) | def __init__(
    method __len__ (line 75) | def __len__(self):
    method __getitem__ (line 79) | def __getitem__(self, idx):
    method mixup (line 162) | def mixup(self, origin_img, origin_labels, input_dim):

FILE: asone/detectors/yolox/yolox/data/datasets/voc.py
  class AnnotationTransform (line 23) | class AnnotationTransform(object):
    method __init__ (line 37) | def __init__(self, class_to_ind=None, keep_difficult=True):
    method __call__ (line 43) | def __call__(self, target):
  class VOCDetection (line 82) | class VOCDetection(CacheDataset):
    method __init__ (line 101) | def __init__(
    method __len__ (line 151) | def __len__(self):
    method _load_coco_annotations (line 154) | def _load_coco_annotations(self):
    method load_anno_from_ids (line 157) | def load_anno_from_ids(self, index):
    method load_anno (line 171) | def load_anno(self, index):
    method load_resized_img (line 174) | def load_resized_img(self, index):
    method load_image (line 185) | def load_image(self, index):
    method read_img (line 193) | def read_img(self, index):
    method pull_item (line 196) | def pull_item(self, index):
    method __getitem__ (line 213) | def __getitem__(self, index):
    method evaluate_detections (line 221) | def evaluate_detections(self, all_boxes, output_dir=None):
    method _get_voc_results_file_template (line 245) | def _get_voc_results_file_template(self):
    method _write_voc_results_file (line 253) | def _write_voc_results_file(self, all_boxes):
    method _do_python_eval (line 278) | def _do_python_eval(self, output_dir="output", iou=0.5):

FILE: asone/detectors/yolox/yolox/data/samplers.py
  class YoloBatchSampler (line 14) | class YoloBatchSampler(torchBatchSampler):
    method __init__ (line 21) | def __init__(self, *args, mosaic=True, **kwargs):
    method __iter__ (line 25) | def __iter__(self):
  class InfiniteSampler (line 30) | class InfiniteSampler(Sampler):
    method __init__ (line 41) | def __init__(
    method __iter__ (line 69) | def __iter__(self):
    method _infinite_indices (line 75) | def _infinite_indices(self):
    method __len__ (line 84) | def __len__(self):

FILE: asone/detectors/yolox/yolox/evaluators/coco_evaluator.py
  function per_class_AR_table (line 31) | def per_class_AR_table(coco_eval, class_names=COCO_CLASSES, headers=["cl...
  function per_class_AP_table (line 54) | def per_class_AP_table(coco_eval, class_names=COCO_CLASSES, headers=["cl...
  class COCOEvaluator (line 79) | class COCOEvaluator:
    method __init__ (line 85) | def __init__(
    method evaluate (line 116) | def evaluate(
    method convert_to_coco_format (line 207) | def convert_to_coco_format(self, outputs, info_imgs, ids, return_outpu...
    method evaluate_prediction (line 255) | def evaluate_prediction(self, data_dict, statistics):

FILE: asone/detectors/yolox/yolox/evaluators/voc_eval.py
  function parse_rec (line 14) | def parse_rec(filename):
  function voc_ap (line 36) | def voc_ap(rec, prec, use_07_metric=False):
  function voc_eval (line 70) | def voc_eval(

FILE: asone/detectors/yolox/yolox/evaluators/voc_evaluator.py
  class VOCEvaluator (line 19) | class VOCEvaluator:
    method __init__ (line 24) | def __init__(self, dataloader, img_size, confthre, nmsthre, num_classes):
    method evaluate (line 41) | def evaluate(
    method convert_to_voc_format (line 120) | def convert_to_voc_format(self, outputs, info_imgs, ids):
    method evaluate_prediction (line 140) | def evaluate_prediction(self, data_dict, statistics):

FILE: asone/detectors/yolox/yolox/exp/base_exp.py
  class BaseExp (line 18) | class BaseExp(metaclass=ABCMeta):
    method __init__ (line 21) | def __init__(self):
    method get_model (line 28) | def get_model(self) -> Module:
    method get_data_loader (line 32) | def get_data_loader(
    method get_optimizer (line 38) | def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer:
    method get_lr_scheduler (line 42) | def get_lr_scheduler(
    method get_evaluator (line 48) | def get_evaluator(self):
    method eval (line 52) | def eval(self, model, evaluator, weights):
    method __repr__ (line 55) | def __repr__(self):
    method merge (line 64) | def merge(self, cfg_list):

FILE: asone/detectors/yolox/yolox/exp/build.py
  function get_exp_by_file (line 10) | def get_exp_by_file(exp_file):
  function get_exp_by_name (line 23) | def get_exp_by_name(exp_name):
  function get_exp (line 30) | def get_exp(exp_file=None, exp_name=None):

FILE: asone/detectors/yolox/yolox/exp/default/__init__.py
  class _ExpFinder (line 17) | class _ExpFinder(importlib.abc.MetaPathFinder):
    method find_spec (line 19) | def find_spec(self, name, path, target=None):

FILE: asone/detectors/yolox/yolox/exp/yolox_base.py
  class Exp (line 15) | class Exp(BaseExp):
    method __init__ (line 16) | def __init__(self):
    method get_model (line 110) | def get_model(self):
    method get_data_loader (line 130) | def get_data_loader(self, batch_size, is_distributed, no_aug=False, ca...
    method random_resize (line 197) | def random_resize(self, data_loader, epoch, rank, is_distributed):
    method preprocess (line 218) | def preprocess(self, inputs, targets, tsize):
    method get_optimizer (line 229) | def get_optimizer(self, batch_size):
    method get_lr_scheduler (line 257) | def get_lr_scheduler(self, lr, iters_per_epoch):
    method get_eval_loader (line 272) | def get_eval_loader(self, batch_size, is_distributed, testdev=False, l...
    method get_evaluator (line 301) | def get_evaluator(self, batch_size, is_distributed, testdev=False, leg...
    method get_trainer (line 315) | def get_trainer(self, args):
    method eval (line 321) | def eval(self, model, evaluator, is_distributed, half=False, return_ou...

FILE: asone/detectors/yolox/yolox/models/build.py
  function create_yolox_model (line 32) | def create_yolox_model(name: str, pretrained: bool = True, num_classes: ...
  function yolox_nano (line 82) | def yolox_nano(pretrained: bool = True, num_classes: int = 80, device: s...
  function yolox_tiny (line 86) | def yolox_tiny(pretrained: bool = True, num_classes: int = 80, device: s...
  function yolox_s (line 90) | def yolox_s(pretrained: bool = True, num_classes: int = 80, device: str ...
  function yolox_m (line 94) | def yolox_m(pretrained: bool = True, num_classes: int = 80, device: str ...
  function yolox_l (line 98) | def yolox_l(pretrained: bool = True, num_classes: int = 80, device: str ...
  function yolox_x (line 102) | def yolox_x(pretrained: bool = True, num_classes: int = 80, device: str ...
  function yolov3 (line 106) | def yolov3(pretrained: bool = True, num_classes: int = 80, device: str =...
  function yolox_custom (line 110) | def yolox_custom(ckpt_path: str = None, exp_path: str = None, device: st...

FILE: asone/detectors/yolox/yolox/models/darknet.py
  class Darknet (line 10) | class Darknet(nn.Module):
    method __init__ (line 14) | def __init__(
    method make_group_layer (line 59) | def make_group_layer(self, in_channels: int, num_blocks: int, stride: ...
    method make_spp_block (line 66) | def make_spp_block(self, filters_list, in_filters):
    method forward (line 82) | def forward(self, x):
  class CSPDarknet (line 97) | class CSPDarknet(nn.Module):
    method __init__ (line 98) | def __init__(
    method forward (line 167) | def forward(self, x):

FILE: asone/detectors/yolox/yolox/models/losses.py
  class IOUloss (line 9) | class IOUloss(nn.Module):
    method __init__ (line 10) | def __init__(self, reduction="none", loss_type="iou"):
    method forward (line 15) | def forward(self, pred, target):

FILE: asone/detectors/yolox/yolox/models/network_blocks.py
  class SiLU (line 9) | class SiLU(nn.Module):
    method forward (line 13) | def forward(x):
  function get_activation (line 17) | def get_activation(name="silu", inplace=True):
  class BaseConv (line 29) | class BaseConv(nn.Module):
    method __init__ (line 32) | def __init__(
    method forward (line 50) | def forward(self, x):
    method fuseforward (line 53) | def fuseforward(self, x):
  class DWConv (line 57) | class DWConv(nn.Module):
    method __init__ (line 60) | def __init__(self, in_channels, out_channels, ksize, stride=1, act="si...
    method forward (line 74) | def forward(self, x):
  class Bottleneck (line 79) | class Bottleneck(nn.Module):
    method __init__ (line 81) | def __init__(
    method forward (line 97) | def forward(self, x):
  class ResLayer (line 104) | class ResLayer(nn.Module):
    method __init__ (line 107) | def __init__(self, in_channels: int):
    method forward (line 117) | def forward(self, x):
  class SPPBottleneck (line 122) | class SPPBottleneck(nn.Module):
    method __init__ (line 125) | def __init__(
    method forward (line 140) | def forward(self, x):
  class CSPLayer (line 147) | class CSPLayer(nn.Module):
    method __init__ (line 150) | def __init__(
    method forward (line 180) | def forward(self, x):
  class Focus (line 188) | class Focus(nn.Module):
    method __init__ (line 191) | def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="...
    method forward (line 195) | def forward(self, x):

FILE: asone/detectors/yolox/yolox/models/yolo_fpn.py
  class YOLOFPN (line 12) | class YOLOFPN(nn.Module):
    method __init__ (line 17) | def __init__(
    method _make_cbl (line 38) | def _make_cbl(self, _in, _out, ks):
    method _make_embedding (line 41) | def _make_embedding(self, filters_list, in_filters):
    method load_pretrained_model (line 53) | def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"):
    method forward (line 59) | def forward(self, inputs):

FILE: asone/detectors/yolox/yolox/models/yolo_head.py
  class YOLOXHead (line 18) | class YOLOXHead(nn.Module):
    method __init__ (line 19) | def __init__(
    method initialize_biases (line 132) | def initialize_biases(self, prior_prob):
    method forward (line 143) | def forward(self, xin, labels=None, imgs=None):
    method get_output_and_grid (line 216) | def get_output_and_grid(self, output, k, stride, dtype):
    method decode_outputs (line 236) | def decode_outputs(self, outputs, dtype):
    method get_losses (line 253) | def get_losses(
    method get_l1_target (line 421) | def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps...
    method get_assignments (line 429) | def get_assignments(
    method get_in_boxes_info (line 526) | def get_in_boxes_info(
    method dynamic_k_matching (line 611) | def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,...

FILE: asone/detectors/yolox/yolox/models/yolo_pafpn.py
  class YOLOPAFPN (line 12) | class YOLOPAFPN(nn.Module):
    method __init__ (line 17) | def __init__(
    method forward (line 83) | def forward(self, input):

FILE: asone/detectors/yolox/yolox/models/yolox.py
  class YOLOX (line 11) | class YOLOX(nn.Module):
    method __init__ (line 18) | def __init__(self, backbone=None, head=None):
    method forward (line 28) | def forward(self, x, targets=None):

FILE: asone/detectors/yolox/yolox/utils/allreduce_norm.py
  function get_async_norm_states (line 32) | def get_async_norm_states(module):
  function pyobj2tensor (line 41) | def pyobj2tensor(pyobj, device="cuda"):
  function tensor2pyobj (line 47) | def tensor2pyobj(tensor):
  function _get_reduce_op (line 52) | def _get_reduce_op(op_name):
  function all_reduce (line 59) | def all_reduce(py_dict, op="sum", group=None):
  function all_reduce_norm (line 97) | def all_reduce_norm(module):

FILE: asone/detectors/yolox/yolox/utils/boxes.py
  function filter_box (line 21) | def filter_box(output, scale_range):
  function postprocess (line 32) | def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45, c...
  function bboxes_iou (line 80) | def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
  function matrix_iou (line 106) | def matrix_iou(a, b):
  function adjust_box_anns (line 119) | def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):
  function xyxy2xywh (line 125) | def xyxy2xywh(bboxes):
  function xyxy2cxcywh (line 131) | def xyxy2cxcywh(bboxes):

FILE: asone/detectors/yolox/yolox/utils/checkpoint.py
  function load_ckpt (line 11) | def load_ckpt(model, ckpt):
  function save_checkpoint (line 36) | def save_checkpoint(state, is_best, save_dir, model_name=""):

FILE: asone/detectors/yolox/yolox/utils/compat.py
  function meshgrid (line 11) | def meshgrid(*tensors):

FILE: asone/detectors/yolox/yolox/utils/demo_utils.py
  function mkdir (line 12) | def mkdir(path):
  function nms (line 17) | def nms(boxes, scores, nms_thr):
  function multiclass_nms (line 47) | def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
  function multiclass_nms_class_aware (line 56) | def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
  function multiclass_nms_class_agnostic (line 80) | def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
  function demo_postprocess (line 99) | def demo_postprocess(outputs, img_size, p6=False):

FILE: asone/detectors/yolox/yolox/utils/dist.py
  function get_num_devices (line 41) | def get_num_devices():
  function wait_for_the_master (line 52) | def wait_for_the_master(local_rank: int = None):
  function synchronize (line 75) | def synchronize():
  function get_world_size (line 89) | def get_world_size() -> int:
  function get_rank (line 97) | def get_rank() -> int:
  function get_local_rank (line 105) | def get_local_rank() -> int:
  function get_local_size (line 120) | def get_local_size() -> int:
  function is_main_process (line 132) | def is_main_process() -> bool:
  function _get_global_gloo_group (line 137) | def _get_global_gloo_group():
  function _serialize_to_tensor (line 148) | def _serialize_to_tensor(data, group):
  function _pad_to_largest_tensor (line 165) | def _pad_to_largest_tensor(tensor, group):
  function all_gather (line 195) | def all_gather(data, group=None):
  function gather (line 233) | def gather(data, dst=0, group=None):
  function shared_random_seed (line 277) | def shared_random_seed():
  function time_synchronized (line 290) | def time_synchronized():

FILE: asone/detectors/yolox/yolox/utils/ema.py
  function is_parallel (line 13) | def is_parallel(model):
  class ModelEMA (line 22) | class ModelEMA:
    method __init__ (line 33) | def __init__(self, model, decay=0.9999, updates=0):
    method update (line 48) | def update(self, model):

FILE: asone/detectors/yolox/yolox/utils/logger.py
  function get_caller_name (line 17) | def get_caller_name(depth=0):
  class StreamToLoguru (line 34) | class StreamToLoguru:
    method __init__ (line 39) | def __init__(s
Condensed preview — 501 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (3,568K chars).
[
  {
    "path": ".dockerignore",
    "chars": 128,
    "preview": ".env/\nresults/\n**__pycache__**\n*.onnx\n*.pt\n*.mlmodel\n**byte_track_results**\n**deep_sort_results**\n**nor_fair_results**\nt"
  },
  {
    "path": ".gitignore",
    "chars": 205,
    "preview": ".env/\n**__pycache__**\n*.onnx\n*.pt\n*.pth\n*.mlmodel\n**byte_track_results**\n**deep_sort_results**\n**nor_fair_results**\nbuil"
  },
  {
    "path": "Dockerfile",
    "chars": 755,
    "preview": "FROM pytorch/pytorch:latest\n\n# Set Time Zone to prevent issues for installing some apt packages\nENV TZ=Europe/Minsk\nRUN "
  },
  {
    "path": "LICENCE",
    "chars": 35148,
    "preview": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free "
  },
  {
    "path": "README.md",
    "chars": 12373,
    "preview": "# AS-One v2 : A Modular Library for YOLO Object Detection, Segmentation, Tracking & Pose\n\n\n\n<div align=\"center\">\n  <p>\n "
  },
  {
    "path": "asone/__init__.py",
    "chars": 3080,
    "preview": "from .asone import ASOne\nimport asone.detectors\nimport asone.trackers\nimport asone.recognizers\nimport asone.segmentors\nf"
  },
  {
    "path": "asone/asone.py",
    "chars": 17811,
    "preview": "import copy\nimport warnings\nimport cv2\nfrom loguru import logger\nimport os\nimport time\nimport asone.utils as utils\nfrom "
  },
  {
    "path": "asone/demo_detector.py",
    "chars": 2639,
    "preview": "import sys\nimport argparse\nimport asone\nfrom asone import ASOne\nimport torch\n\n\ndef main(args):\n    filter_classes = args"
  },
  {
    "path": "asone/demo_ocr.py",
    "chars": 2149,
    "preview": "import argparse\nimport asone\nfrom asone import ASOne\n\ndef main(args):\n\n    detect = ASOne(\n        tracker=asone.DEEPSOR"
  },
  {
    "path": "asone/demo_pose_estimator.py",
    "chars": 2451,
    "preview": "import asone\nfrom asone import PoseEstimator\nfrom .utils import draw_kpts\nimport cv2\nimport argparse\nimport time\nimport "
  },
  {
    "path": "asone/demo_segmentor.py",
    "chars": 2875,
    "preview": "import sys\nimport argparse\nimport asone\nfrom asone import ASOne\nimport torch\n\n\ndef main(args):\n    filter_classes = args"
  },
  {
    "path": "asone/demo_tracker.py",
    "chars": 1722,
    "preview": "import asone\nfrom asone import ASOne\nfrom .utils import draw_boxes\nimport cv2\nimport argparse\nimport time\nimport os\n\n\nde"
  },
  {
    "path": "asone/detectors/__init__.py",
    "chars": 652,
    "preview": "from asone.detectors.yolov5 import YOLOv5Detector\nfrom asone.detectors.yolov6 import YOLOv6Detector\nfrom asone.detectors"
  },
  {
    "path": "asone/detectors/detector.py",
    "chars": 5272,
    "preview": "import cv2\n\nfrom asone.detectors.utils.weights_path import get_weight_path\nfrom asone.detectors.utils.cfg_path import ge"
  },
  {
    "path": "asone/detectors/easyocr_detector/__init__.py",
    "chars": 66,
    "preview": "from .text_detector import TextDetector\n__all__ = ['TextDetector']"
  },
  {
    "path": "asone/detectors/easyocr_detector/text_detector.py",
    "chars": 2094,
    "preview": "import easyocr\nimport numpy as np\n\n\nclass TextDetector:\n    def __init__(self, detect_network, languages: list = ['en'],"
  },
  {
    "path": "asone/detectors/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/utils/cfg_path.py",
    "chars": 533,
    "preview": "import os\n\ncfg_dir = os.path.dirname(os.path.dirname(__file__))\n\nconfiguration = {'0': os.path.join(cfg_dir, 'yolor','cf"
  },
  {
    "path": "asone/detectors/utils/coreml_utils.py",
    "chars": 1545,
    "preview": "import numpy as np\n\n\ndef yolo_to_xyxy(bboxes, img_size):\n    w, h = img_size\n    \n    bboxes = bboxes[:, 0:]\n    bboxes["
  },
  {
    "path": "asone/detectors/utils/exp_name.py",
    "chars": 1478,
    "preview": "import os\n\nexp_dir = os.path.dirname(os.path.dirname(__file__))\n\nexp_file_name = {'58': (os.path.join(exp_dir, 'yolox','"
  },
  {
    "path": "asone/detectors/utils/weights_path.py",
    "chars": 9796,
    "preview": "import os\n\nweights = { '0': os.path.join('yolov5','weights','yolov5x6.pt'),\n            '1': os.path.join('yolov5','weig"
  },
  {
    "path": "asone/detectors/yolonas/__init__.py",
    "chars": 66,
    "preview": "from .yolonas import YOLOnasDetector\n__all__ = ['YOLOnasDetector']"
  },
  {
    "path": "asone/detectors/yolonas/yolonas.py",
    "chars": 4038,
    "preview": "import os\nfrom asone.utils import get_names\nimport numpy as np\nimport warnings\nimport torch\nimport onnxruntime\nfrom ason"
  },
  {
    "path": "asone/detectors/yolor/__init__.py",
    "chars": 69,
    "preview": "from .yolor_detector import YOLOrDetector\n__all__ = ['YOLOrDetector']"
  },
  {
    "path": "asone/detectors/yolor/cfg/yolor_csp.cfg",
    "chars": 14241,
    "preview": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=512\nheight=512\nchannels=3\nmomentum=0.9"
  },
  {
    "path": "asone/detectors/yolor/cfg/yolor_csp_x.cfg",
    "chars": 16338,
    "preview": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=8\nwidth=512\nheight=512\nchannels=3\nmomentum=0.9"
  },
  {
    "path": "asone/detectors/yolor/cfg/yolor_p6.cfg",
    "chars": 18330,
    "preview": "[net]\nbatch=64\nsubdivisions=8\nwidth=1280\nheight=1280\nchannels=3\nmomentum=0.949\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexp"
  },
  {
    "path": "asone/detectors/yolor/models/__init__.py",
    "chars": 1,
    "preview": "\n"
  },
  {
    "path": "asone/detectors/yolor/models/common.py",
    "chars": 38971,
    "preview": "# This file contains modules common to various models\n\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as n"
  },
  {
    "path": "asone/detectors/yolor/models/export.py",
    "chars": 2733,
    "preview": "import argparse\n\nimport torch\n\nfrom asone.detectors.yolor.utils.google_utils import attempt_download\n\nif __name__ == '__"
  },
  {
    "path": "asone/detectors/yolor/models/models.py",
    "chars": 36694,
    "preview": "from asone.detectors.yolor.utils.google_utils import *\nfrom asone.detectors.yolor.utils.layers import *\nfrom asone.detec"
  },
  {
    "path": "asone/detectors/yolor/utils/__init__.py",
    "chars": 1,
    "preview": "\n"
  },
  {
    "path": "asone/detectors/yolor/utils/activations.py",
    "chars": 2200,
    "preview": "# Activation functions\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# Swish https://arxiv.org/p"
  },
  {
    "path": "asone/detectors/yolor/utils/autoanchor.py",
    "chars": 6729,
    "preview": "# Auto-anchor utils\n\nimport numpy as np\nimport torch\nimport yaml\nfrom scipy.cluster.vq import kmeans\nfrom tqdm import tq"
  },
  {
    "path": "asone/detectors/yolor/utils/datasets.py",
    "chars": 54966,
    "preview": "# Dataset utils and dataloaders\n\nimport glob\nimport math\nimport os\nimport random\nimport shutil\nimport time\nfrom itertool"
  },
  {
    "path": "asone/detectors/yolor/utils/export.py",
    "chars": 3310,
    "preview": "import argparse\n\nimport torch\nfrom asone.detectors.yolor.models.models import *\nfrom asone.detectors.yolor.utils.google_"
  },
  {
    "path": "asone/detectors/yolor/utils/general.py",
    "chars": 18709,
    "preview": "# General utils\n\nimport glob\nimport logging\nimport math\nimport os\nimport platform\nimport random\nimport re\nimport subproc"
  },
  {
    "path": "asone/detectors/yolor/utils/google_utils.py",
    "chars": 4964,
    "preview": "# Google utils: https://cloud.google.com/storage/docs/reference/libraries\n\nimport os\nimport platform\nimport subprocess\ni"
  },
  {
    "path": "asone/detectors/yolor/utils/layers.py",
    "chars": 18495,
    "preview": "from asone.detectors.yolor.utils.general import *\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\ntry"
  },
  {
    "path": "asone/detectors/yolor/utils/loss.py",
    "chars": 7443,
    "preview": "# Loss functions\n\nimport torch\nimport torch.nn as nn\n\nfrom asone.detectors.yolor.utils.general import bbox_iou\nfrom ason"
  },
  {
    "path": "asone/detectors/yolor/utils/metrics.py",
    "chars": 5137,
    "preview": "# Model validation metrics\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef fitness(x):\n    # Model fitness as "
  },
  {
    "path": "asone/detectors/yolor/utils/parse_config.py",
    "chars": 2995,
    "preview": "import os\n\nimport numpy as np\n\n\ndef parse_model_cfg(path):\n    # Parse the yolo *.cfg file and return module definitions"
  },
  {
    "path": "asone/detectors/yolor/utils/plots.py",
    "chars": 15468,
    "preview": "# Plotting utils\n\nimport glob\nimport math\nimport os\nimport random\nfrom copy import copy\nfrom pathlib import Path\n\nimport"
  },
  {
    "path": "asone/detectors/yolor/utils/torch_utils.py",
    "chars": 9396,
    "preview": "# PyTorch utils\n\nimport logging\nimport math\nimport os\nimport time\nfrom contextlib import contextmanager\nfrom copy import"
  },
  {
    "path": "asone/detectors/yolor/utils/yolor_utils.py",
    "chars": 9267,
    "preview": "import torch\nimport torchvision\nimport time\nimport numpy as np\nimport cv2\n\n\nclass_names = ['person', 'bicycle', 'car', '"
  },
  {
    "path": "asone/detectors/yolor/yolor_detector.py",
    "chars": 5417,
    "preview": "\nimport os\nfrom asone.utils import get_names\nimport numpy as np\nimport warnings\nimport torch\nimport onnxruntime\n\nfrom as"
  },
  {
    "path": "asone/detectors/yolov5/__init__.py",
    "chars": 72,
    "preview": "from .yolov5_detector import YOLOv5Detector\n__all__ = ['YOLOv5Detector']"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov5/yolov5/export.py",
    "chars": 42538,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"\nExport a YOLOv5 PyTorch model to other formats. TensorFlow exports autho"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/models/__init__.py",
    "chars": 70,
    "preview": "# import os\n# import sys\n# sys.path.append(os.path.dirname(__file__))\n"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/models/common.py",
    "chars": 36650,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCommon modules\n\"\"\"\n\nimport json\nimport math\nimport platform\nimport warnin"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/models/experimental.py",
    "chars": 2337,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nExperimental modules\n\"\"\"\nimport math\n\nimport numpy as np\nimport torch\nimp"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/models/general.py",
    "chars": 42685,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nGeneral utils\n\"\"\"\n\nimport contextlib\nimport glob\nimport inspect\nimport lo"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/models/tf.py",
    "chars": 32132,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"\nTensorFlow, Keras and TFLite versions of YOLOv5\nAuthored by https://gith"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/models/yolo.py",
    "chars": 15920,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nYOLO-specific modules\n\nUsage:\n    $ python path/to/models/yolo.py --cfg y"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/activations.py",
    "chars": 4605,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"Activation functions.\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/augmentations.py",
    "chars": 18696,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"Image augmentation functions.\"\"\"\n\nimport math\nimport random\n\nimport cv2\ni"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/dataloaders.py",
    "chars": 60048,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"Dataloaders and dataset utils.\"\"\"\n\nimport contextlib\nimport glob\nimport h"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/downloads.py.py",
    "chars": 5304,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"Download utils.\"\"\"\n\nimport logging\nimport subprocess\nimport urllib\nfrom p"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/general.py",
    "chars": 50903,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"General utils.\"\"\"\n\nimport contextlib\nimport glob\nimport inspect\nimport lo"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/metrics.py",
    "chars": 15493,
    "preview": "# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license\n\"\"\"Model validation metrics.\"\"\"\n\nimport math\nimport warnings\nfrom pathlib im"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/torch_utils.py",
    "chars": 15967,
    "preview": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nPyTorch utils\n\"\"\"\n\nimport math\nimport os\nimport platform\nimport subproces"
  },
  {
    "path": "asone/detectors/yolov5/yolov5/utils/yolov5_utils.py",
    "chars": 8854,
    "preview": "import contextlib\nimport time\nimport numpy as np\nimport torch\nimport torchvision\nimport cv2\nimport sys\nfrom pathlib impo"
  },
  {
    "path": "asone/detectors/yolov5/yolov5_detector.py",
    "chars": 6498,
    "preview": "import os\nfrom asone.utils import get_names\nimport numpy as np\nimport warnings\nimport torch\nfrom PIL import Image\nimport"
  },
  {
    "path": "asone/detectors/yolov6/__init__.py",
    "chars": 72,
    "preview": "from .yolov6_detector import YOLOv6Detector\n__all__ = ['YOLOv6Detector']"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov6/yolov6/assigners/__init__.py",
    "chars": 85,
    "preview": "from .atss_assigner import ATSSAssigner\nfrom .tal_assigner import TaskAlignedAssigner"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/assigners/anchor_generator.py",
    "chars": 2372,
    "preview": "import torch\n\n\ndef generate_anchors(feats, fpn_strides, grid_cell_size=5.0, grid_cell_offset=0.5,  device='cpu', is_eval"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/assigners/assigner_utils.py",
    "chars": 3682,
    "preview": "import torch\nimport torch.nn.functional as F\n\ndef dist_calculator(gt_bboxes, anchor_bboxes):\n    \"\"\"compute center dista"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/assigners/atss_assigner.py",
    "chars": 7145,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom asone.detectors.yolov6.yolov6.assigners.iou2d_ca"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/assigners/iou2d_calculator.py",
    "chars": 9211,
    "preview": "#This code is based on\n#https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/iou_calculators/iou2d_calculator.py\n\n"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/assigners/tal_assigner.py",
    "chars": 6166,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom asone.detectors.yolov6.yolov6.assigners.assigner"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/layers/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov6/yolov6/layers/common.py",
    "chars": 18295,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport warnings\nfrom pathlib import Path\n\nimport numpy as np\nimport torch"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/layers/dbb_transforms.py",
    "chars": 1913,
    "preview": "import torch\nimport numpy as np\nimport torch.nn.functional as F\n\n\ndef transI_fusebn(kernel, bn):\n    gamma = bn.weight\n "
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/efficientrep.py",
    "chars": 5865,
    "preview": "from torch import nn\nfrom asone.detectors.yolov6.yolov6.layers.common import BottleRep, RepVGGBlock, RepBlock, BepC3, Si"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/effidehead.py",
    "chars": 8347,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom asone.detectors.yolov6.yolov6.layers"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/end2end.py",
    "chars": 10838,
    "preview": "import torch\nimport torch.nn as nn\nimport random\n\n\nclass ORT_NMS(torch.autograd.Function):\n    '''ONNX-Runtime NMS opera"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/loss.py",
    "chars": 8875,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.fun"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/loss_distill.py",
    "chars": 13589,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.fun"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/reppan.py",
    "chars": 7240,
    "preview": "import torch\nfrom torch import nn\nfrom asone.detectors.yolov6.yolov6.layers.common import RepBlock, RepVGGBlock, BottleR"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/models/yolo.py",
    "chars": 4066,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional "
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/checkpoint.py",
    "chars": 2306,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport os\nimport shutil\nimport torch\nimport os.path as osp\nfrom asone.dete"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/events.py",
    "chars": 1880,
    "preview": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport yaml\nimport logging\nimport shutil\n\n\ndef set_logging(name="
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/figure_iou.py",
    "chars": 5739,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport math\nimport torch\n\n\nclass IOUloss:\n    \"\"\" Calculate IoU loss.\n    "
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/general.py",
    "chars": 2674,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport os\nimport glob\nimport torch\nfrom pathlib import Path\n\n\ndef incremen"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/torch_utils.py",
    "chars": 3419,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport time\nfrom contextlib import contextmanager\nfrom copy import deepco"
  },
  {
    "path": "asone/detectors/yolov6/yolov6/utils/yolov6_utils.py",
    "chars": 9628,
    "preview": "import time\nimport sys\nimport os\nimport numpy as np\nimport cv2\nimport torch.nn as nn\nimport torch\nimport torchvision\n\nfr"
  },
  {
    "path": "asone/detectors/yolov6/yolov6_detector.py",
    "chars": 5723,
    "preview": "import os\nimport sys\nfrom asone.utils import get_names\nimport numpy as np\nimport warnings\nimport torch\nimport onnxruntim"
  },
  {
    "path": "asone/detectors/yolov7/__init__.py",
    "chars": 72,
    "preview": "from .yolov7_detector import YOLOv7Detector\n__all__ = ['YOLOv7Detector']"
  },
  {
    "path": "asone/detectors/yolov7/yolov7/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov7/yolov7/models/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov7/yolov7/models/common.py",
    "chars": 84188,
    "preview": "import math\nfrom copy import copy\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimpor"
  },
  {
    "path": "asone/detectors/yolov7/yolov7/models/experimental.py",
    "chars": 1639,
    "preview": "import torch\nimport torch.nn as nn\n\nfrom asone.detectors.yolov7.yolov7.models.common import Conv\n\nclass Ensemble(nn.Modu"
  },
  {
    "path": "asone/detectors/yolov7/yolov7/models/yolo.py",
    "chars": 41743,
    "preview": "from asone.detectors.yolov7.yolov7.utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, "
  },
  {
    "path": "asone/detectors/yolov7/yolov7/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov7/yolov7/utils/torch_utils.py",
    "chars": 15464,
    "preview": "# YOLOR PyTorch utils\n\nimport datetime\nimport logging\nimport math\nimport os\nimport platform\nimport subprocess\nimport tim"
  },
  {
    "path": "asone/detectors/yolov7/yolov7/utils/yolov7_utils.py",
    "chars": 8212,
    "preview": "import cv2\nimport numpy as np\nimport torch\nimport torchvision\nimport time\n\ndef prepare_input(image, input_shape):\n    in"
  },
  {
    "path": "asone/detectors/yolov7/yolov7_detector.py",
    "chars": 6917,
    "preview": "import os\nimport sys\nimport onnxruntime\nimport torch\nimport coremltools as ct\nfrom asone.utils import get_names\nimport n"
  },
  {
    "path": "asone/detectors/yolov8/__init__.py",
    "chars": 72,
    "preview": "from .yolov8_detector import YOLOv8Detector\n__all__ = ['YOLOv8Detector']"
  },
  {
    "path": "asone/detectors/yolov8/utils/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov8/utils/yolov8_utils.py",
    "chars": 2033,
    "preview": "import cv2\nimport numpy as np\nfrom ultralytics.utils import ops\nimport torch\nfrom ultralytics.data.augment import Letter"
  },
  {
    "path": "asone/detectors/yolov8/yolov8_detector.py",
    "chars": 6014,
    "preview": "import os\nfrom asone import utils\nfrom asone.utils import get_names\nimport onnxruntime\nimport torch\nfrom asone.detectors"
  },
  {
    "path": "asone/detectors/yolov9/__init__.py",
    "chars": 72,
    "preview": "from .yolov9_detector import YOLOv9Detector\n__all__ = ['YOLOv9Detector']"
  },
  {
    "path": "asone/detectors/yolov9/export.py",
    "chars": 32539,
    "preview": "import argparse\nimport contextlib\nimport json\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport ti"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolov9/yolov9/models/__init__.py",
    "chars": 7,
    "preview": "# init\n"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/models/common.py",
    "chars": 54084,
    "preview": "import ast\nimport contextlib\nimport json\nimport math\nimport platform\nimport warnings\nimport zipfile\nfrom collections imp"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/models/experimental.py",
    "chars": 11697,
    "preview": "import math\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom asone.detectors.yolov9.yolov9.uti"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/models/tf.py",
    "chars": 26813,
    "preview": "import argparse\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nFILE = Path(__file__).resolve()\nROOT = FI"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/models/yolo.py",
    "chars": 38730,
    "preview": "import argparse\nimport os\nimport platform\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nFILE = Path(__f"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/__init__.py",
    "chars": 2201,
    "preview": "import contextlib\nimport platform\nimport threading\n\n\ndef emojis(str=''):\n    # Return platform-dependent emoji-safe vers"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/activations.py",
    "chars": 3373,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SiLU(nn.Module):\n    # SiLU activation https:"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/augmentations.py",
    "chars": 17119,
    "preview": "import math\nimport random\n\nimport cv2\nimport numpy as np\nimport torch\nimport torchvision.transforms as T\nimport torchvis"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/autoanchor.py",
    "chars": 7418,
    "preview": "import random\n\nimport numpy as np\nimport torch\nimport yaml\nfrom tqdm import tqdm\n\nfrom asone.detectors.yolov9.yolov9.uti"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/autobatch.py",
    "chars": 2967,
    "preview": "from copy import deepcopy\n\nimport numpy as np\nimport torch\n\nfrom asone.detectors.yolov9.yolov9.utils.general import LOGG"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/callbacks.py",
    "chars": 2591,
    "preview": "import threading\n\n\nclass Callbacks:\n    \"\"\"\"\n    Handles all registered callbacks for YOLOv5 Hooks\n    \"\"\"\n\n    def __in"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/coco_utils.py",
    "chars": 3256,
    "preview": "import cv2\n\nfrom pycocotools.coco import COCO\nfrom pycocotools import mask as maskUtils\n\n# coco id: https://tech.amikeli"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/dataloaders.py",
    "chars": 55664,
    "preview": "import contextlib\nimport glob\nimport hashlib\nimport json\nimport math\nimport os\nimport random\nimport shutil\nimport time\nf"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/downloads.py",
    "chars": 4641,
    "preview": "import logging\nimport os\nimport subprocess\nimport urllib\nfrom pathlib import Path\n\nimport requests\nimport torch\n\n\ndef is"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/general.py",
    "chars": 47020,
    "preview": "import contextlib\nimport glob\nimport inspect\nimport logging\nimport logging.config\nimport math\nimport os\nimport platform\n"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/lion.py",
    "chars": 2518,
    "preview": "\"\"\"PyTorch implementation of the Lion optimizer.\"\"\"\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass Lio"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/loss.py",
    "chars": 16136,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom asone.detectors.yolov9.yolov9.utils.metrics imp"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/loss_tal.py",
    "chars": 9838,
    "preview": "import os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom asone.detectors.yolov9.yolov9.utils."
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/loss_tal_dual.py",
    "chars": 18187,
    "preview": "import os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom asone.detectors.yolov9.yolov9.utils."
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/loss_tal_triple.py",
    "chars": 13742,
    "preview": "import os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom asone.detectors.yolov9.yolov9.utils."
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/metrics.py",
    "chars": 15939,
    "preview": "import math\nimport warnings\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\nf"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/plots.py",
    "chars": 25356,
    "preview": "import contextlib\nimport math\nimport os\nfrom copy import copy\nfrom pathlib import Path\nfrom urllib.error import URLError"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/__init__.py",
    "chars": 6,
    "preview": "# init"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/augmentations.py",
    "chars": 3672,
    "preview": "import math\nimport random\n\nimport cv2\nimport numpy as np\n\nfrom ..augmentations import box_candidates\nfrom ..general impo"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/dataloaders.py",
    "chars": 13851,
    "preview": "import os\nimport random\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, distributed"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/general.py",
    "chars": 4934,
    "preview": "import cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef crop_mask(masks, boxes):\n    \"\"\"\n    \"C"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/loss.py",
    "chars": 8620,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..general import xywh2xyxy\nfrom ..loss import F"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/loss_tal.py",
    "chars": 12024,
    "preview": "import os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchvision.ops import sigmoid_focal"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/loss_tal_dual.py",
    "chars": 34900,
    "preview": "import os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchvision.ops import sigmoid_focal"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/metrics.py",
    "chars": 5377,
    "preview": "import numpy as np\n\nfrom ..metrics import ap_per_class\n\n\ndef fitness(x):\n    # Model fitness as a weighted combination o"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/plots.py",
    "chars": 6390,
    "preview": "import contextlib\nimport math\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nim"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/tal/__init__.py",
    "chars": 6,
    "preview": "# init"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/tal/anchor_generator.py",
    "chars": 1557,
    "preview": "import torch\n\nfrom utils.general import check_version\n\nTORCH_1_10 = check_version(torch.__version__, '1.10.0')\n\n\ndef mak"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/segment/tal/assigner.py",
    "chars": 8316,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.metrics import bbox_iou\n\n\ndef select_cand"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/tal/__init__.py",
    "chars": 6,
    "preview": "# init"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/tal/anchor_generator.py",
    "chars": 1587,
    "preview": "import torch\n\nfrom asone.detectors.yolov9.yolov9.utils.general import check_version\n\nTORCH_1_10 = check_version(torch.__"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/tal/assigner.py",
    "chars": 8231,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils.metrics import bbox_iou\n\n\ndef select_cand"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/torch_utils.py",
    "chars": 23457,
    "preview": "import math\nimport os\nimport platform\nimport subprocess\nimport time\nimport warnings\nfrom contextlib import contextmanage"
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/triton.py",
    "chars": 3528,
    "preview": "import typing\nfrom urllib.parse import urlparse\n\nimport torch\n\n\nclass TritonRemoteModel:\n    \"\"\" A wrapper over a model "
  },
  {
    "path": "asone/detectors/yolov9/yolov9/utils/yolov9_utils.py",
    "chars": 8777,
    "preview": "import cv2\nimport numpy as np\nimport torch\nimport torchvision\nimport time\n\nfrom asone.detectors.yolov9.yolov9.utils.metr"
  },
  {
    "path": "asone/detectors/yolov9/yolov9_detector.py",
    "chars": 6981,
    "preview": "import os\nimport sys\nimport onnxruntime\nimport torch\nimport coremltools as ct\nfrom asone.utils import get_names\nimport n"
  },
  {
    "path": "asone/detectors/yolox/__init__.py",
    "chars": 69,
    "preview": "from .yolox_detector import YOLOxDetector\n__all__ = ['YOLOxDetector']"
  },
  {
    "path": "asone/detectors/yolox/exps/__init__.py",
    "chars": 95,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n"
  },
  {
    "path": "asone/detectors/yolox/exps/yolov3.py",
    "chars": 1042,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nimport torch."
  },
  {
    "path": "asone/detectors/yolox/exps/yolox_l.py",
    "chars": 377,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom asone.de"
  },
  {
    "path": "asone/detectors/yolox/exps/yolox_m.py",
    "chars": 379,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom asone.de"
  },
  {
    "path": "asone/detectors/yolox/exps/yolox_nano.py",
    "chars": 1561,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nimport torch."
  },
  {
    "path": "asone/detectors/yolox/exps/yolox_s.py",
    "chars": 379,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom asone.de"
  },
  {
    "path": "asone/detectors/yolox/exps/yolox_tiny.py",
    "chars": 562,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom asone.de"
  },
  {
    "path": "asone/detectors/yolox/exps/yolox_x.py",
    "chars": 379,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom asone.de"
  },
  {
    "path": "asone/detectors/yolox/yolox/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "asone/detectors/yolox/yolox/core/__init__.py",
    "chars": 152,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .launch import laun"
  },
  {
    "path": "asone/detectors/yolox/yolox/core/launch.py",
    "chars": 4387,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Code are based on\n# https://github.com/facebookresearch/detectron2/blob/"
  },
  {
    "path": "asone/detectors/yolox/yolox/core/trainer.py",
    "chars": 13707,
    "preview": "#!/usr/bin/env python3\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport datetime\nimport os\nimport time\nfrom logu"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/__init__.py",
    "chars": 354,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .data_augment impor"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/data_augment.py",
    "chars": 7360,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\"\"\"\nData augmentation fun"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/data_prefetcher.py",
    "chars": 1649,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\n\n\nclass Dat"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/dataloading.py",
    "chars": 3671,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\nimport random\n"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/__init__.py",
    "chars": 325,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .coco import COCODa"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/coco.py",
    "chars": 6363,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\nimport copy\nimport os\n\nim"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/coco_classes.py",
    "chars": 1296,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nCOCO_CLASSES = (\n    \"pe"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/datasets_wrapper.py",
    "chars": 10878,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport bisect\nimport cop"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/mosaicdetection.py",
    "chars": 9573,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport random\n\nimport cv"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/voc.py",
    "chars": 11946,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Code are based on\n# https://github.com/fmassa/vision/blob/voc_dataset/to"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/datasets/voc_classes.py",
    "chars": 442,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\n# VOC_CLASSES = ( '__bac"
  },
  {
    "path": "asone/detectors/yolox/yolox/data/samplers.py",
    "chars": 2854,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport itertools\nfrom ty"
  },
  {
    "path": "asone/detectors/yolox/yolox/evaluators/__init__.py",
    "chars": 178,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .coco_evaluator imp"
  },
  {
    "path": "asone/detectors/yolox/yolox/evaluators/coco_evaluator.py",
    "chars": 11467,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport contextlib\nimport"
  },
  {
    "path": "asone/detectors/yolox/yolox/evaluators/voc_eval.py",
    "chars": 5631,
    "preview": "#!/usr/bin/env python3\n# Code are based on\n# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_e"
  },
  {
    "path": "asone/detectors/yolox/yolox/evaluators/voc_evaluator.py",
    "chars": 6564,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport sys\nimport tempfi"
  },
  {
    "path": "asone/detectors/yolox/yolox/exp/__init__.py",
    "chars": 181,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nfrom .base_exp import Ba"
  },
  {
    "path": "asone/detectors/yolox/yolox/exp/base_exp.py",
    "chars": 2017,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport ast\nimport pprint"
  },
  {
    "path": "asone/detectors/yolox/yolox/exp/build.py",
    "chars": 1334,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport importlib\nimport "
  },
  {
    "path": "asone/detectors/yolox/yolox/exp/default/__init__.py",
    "chars": 995,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\n# This file is used for "
  },
  {
    "path": "asone/detectors/yolox/yolox/exp/yolox_base.py",
    "chars": 12149,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport os\nimport random\n"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/__init__.py",
    "chars": 308,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nfrom .build import *\nfro"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/build.py",
    "chars": 4288,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport torch\nfrom torch import nn\nfrom torch.hub import load_state_dict_f"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/darknet.py",
    "chars": 6053,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nfrom torch import nn\n\n"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/losses.py",
    "chars": 1677,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport torch\nimport to"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/network_blocks.py",
    "chars": 6092,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport torch\nimport to"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/yolo_fpn.py",
    "chars": 2544,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport torch\nimport to"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/yolo_head.py",
    "chars": 23429,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport math\nfrom loguru "
  },
  {
    "path": "asone/detectors/yolox/yolox/models/yolo_pafpn.py",
    "chars": 3598,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport torch\nimport to"
  },
  {
    "path": "asone/detectors/yolox/yolox/models/yolox.py",
    "chars": 1432,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport torch.nn as nn\n"
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/__init__.py",
    "chars": 475,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nfrom .allreduce_norm imp"
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/allreduce_norm.py",
    "chars": 2868,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport pickle\nfrom colle"
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/boxes.py",
    "chars": 4521,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport numpy as np\n\nimpo"
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/checkpoint.py",
    "chars": 1312,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\nimport os\nimport shutil\nf"
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/compat.py",
    "chars": 310,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport torch\n\n_TORCH_VER = [int(x) for x in torch.__version__.split(\".\")["
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/demo_utils.py",
    "chars": 3829,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii Inc. All rights reserved.\n\nimport os\n\nimport numpy "
  },
  {
    "path": "asone/detectors/yolox/yolox/utils/dist.py",
    "chars": 8062,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# This file mainly comes from\n# https://github.com/facebookresearch/detect"
  }
]

// ... and 301 more files (download for full content)

About this extraction

This page contains the full source code of the augmentedstartups/AS-One GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 501 files (3.3 MB), approximately 882.7k tokens, and a symbol index with 4580 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!