Repository: MIV-XJTU/ARTrack Branch: main Commit: 5931f4af8dd3 Files: 756 Total size: 5.3 MB Directory structure: gitextract_o26t4wgy/ ├── ARTrack_env_cuda113.yaml ├── LICENSE ├── README.md ├── artrackv2_mindspore/ │ ├── .gitignore │ ├── README.md │ ├── experiments/ │ │ └── ostrack/ │ │ ├── 2stage_256_got.yaml │ │ ├── best_384.yaml │ │ ├── finetune.yaml │ │ ├── finetune_384.yaml │ │ ├── finetune_384_got.yaml │ │ ├── vitb_256_mae_32x4_ep300.yaml │ │ ├── vitb_256_mae_ce_32x4_ep300.yaml │ │ ├── vitb_256_mae_ce_32x4_got10k_ep100.yaml │ │ ├── vitb_384_mae_32x4_ep300.yaml │ │ ├── vitb_384_mae_ce_32x4_ep300.yaml │ │ └── vitb_384_mae_ce_32x4_got10k_ep100.yaml │ ├── external/ │ │ ├── AR/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── ltr/ │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── actors/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_actor.py │ │ │ │ │ ├── bbreg.py │ │ │ │ │ └── tracking.py │ │ │ │ ├── admin/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── environment.py │ │ │ │ │ ├── loading.py │ │ │ │ │ ├── model_constructor.py │ │ │ │ │ ├── multigpu.py │ │ │ │ │ ├── settings.py │ │ │ │ │ ├── stats.py │ │ │ │ │ └── tensorboard.py │ │ │ │ ├── data/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bounding_box_utils.py │ │ │ │ │ ├── image_loader.py │ │ │ │ │ ├── loader.py │ │ │ │ │ ├── processing.py │ │ │ │ │ ├── processing_utils.py │ │ │ │ │ ├── processing_utils_SE.py │ │ │ │ │ ├── sampler.py │ │ │ │ │ └── transforms.py │ │ │ │ ├── dataset/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_image_dataset.py │ │ │ │ │ ├── base_video_dataset.py │ │ │ │ │ ├── coco.py │ │ │ │ │ ├── coco_seq.py │ │ │ │ │ ├── davis.py │ │ │ │ │ ├── ecssd.py │ │ │ │ │ ├── got10k.py │ │ │ │ │ ├── hku_is.py │ │ │ │ │ ├── imagenetvid.py │ │ │ │ │ ├── lasot.py │ │ │ │ │ ├── lvis.py │ │ │ │ │ ├── msra10k.py │ │ │ │ │ ├── sbd.py │ │ │ │ │ ├── synthetic_video.py │ │ │ │ │ ├── synthetic_video_blend.py │ │ │ │ │ ├── tracking_net.py │ │ │ │ │ ├── vos_base.py │ │ │ │ │ └── youtubevos.py │ │ │ │ ├── external/ │ │ │ │ │ └── PreciseRoIPooling/ │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── LICENSE │ │ │ │ │ ├── README.md │ │ │ │ │ ├── pytorch/ │ │ │ │ │ │ ├── prroi_pool/ │ │ │ │ │ │ │ ├── .gitignore │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ ├── functional.py │ │ │ │ │ │ │ ├── prroi_pool.py │ │ │ │ │ │ │ └── src/ │ │ │ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ │ │ └── tests/ │ │ │ │ │ │ └── test_prroi_pooling2d.py │ │ │ │ │ └── src/ │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ ├── models/ │ │ │ │ │ ├── AR_seg_mask/ │ │ │ │ │ │ ├── AR_seg_mask.py │ │ │ │ │ │ └── __init__.py │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── backbone/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base.py │ │ │ │ │ │ ├── resnet.py │ │ │ │ │ │ ├── resnet18_vggm.py │ │ │ │ │ │ └── resnet_seg.py │ │ │ │ │ ├── bbreg/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── atom.py │ │ │ │ │ │ └── atom_iou_net.py │ │ │ │ │ ├── head/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── seg_network.py │ │ │ │ │ │ └── utils.py │ │ │ │ │ ├── layers/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── activation.py │ │ │ │ │ │ ├── blocks.py │ │ │ │ │ │ ├── distance.py │ │ │ │ │ │ ├── filter.py │ │ │ │ │ │ ├── normalization.py │ │ │ │ │ │ └── transform.py │ │ │ │ │ ├── loss/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── kl_regression.py │ │ │ │ │ │ └── target_classification.py │ │ │ │ │ ├── meta/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── steepestdescent.py │ │ │ │ │ ├── neck/ │ │ │ │ │ │ ├── CorrNL.py │ │ │ │ │ │ └── neck_utils.py │ │ │ │ │ ├── target_classifier/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── features.py │ │ │ │ │ │ ├── initializer.py │ │ │ │ │ │ ├── linear_filter.py │ │ │ │ │ │ ├── optimizer.py │ │ │ │ │ │ └── residual_modules.py │ │ │ │ │ └── tracking/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── dimpnet.py │ │ │ │ ├── run_training.py │ │ │ │ ├── train_settings/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbreg/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── atom.py │ │ │ │ │ │ ├── atom_gmm_sampl.py │ │ │ │ │ │ ├── atom_paper.py │ │ │ │ │ │ └── atom_prob_ml.py │ │ │ │ │ └── dimp/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dimp18.py │ │ │ │ │ ├── dimp50.py │ │ │ │ │ ├── prdimp18.py │ │ │ │ │ ├── prdimp50.py │ │ │ │ │ └── super_dimp.py │ │ │ │ └── trainers/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base_trainer.py │ │ │ │ └── ltr_trainer.py │ │ │ └── pytracking/ │ │ │ ├── ARcm_seg.py │ │ │ ├── VOT/ │ │ │ │ ├── tracker_DiMP.m │ │ │ │ ├── trackers.ini │ │ │ │ └── vot.py │ │ │ ├── VOT2020_super_only_mask_384_HP/ │ │ │ │ ├── dimp_alpha_065.py │ │ │ │ ├── dimp_alpha_seg_class.py │ │ │ │ ├── mixformer_alpha_seg_class.py │ │ │ │ ├── mixformer_large_alpha_seg_class.py │ │ │ │ └── vot.py │ │ │ ├── __init__.py │ │ │ ├── analysis/ │ │ │ │ ├── __init__.py │ │ │ │ ├── evaluate_vos.py │ │ │ │ ├── extract_results.py │ │ │ │ ├── playback_results.py │ │ │ │ ├── plot_results.py │ │ │ │ └── vos_utils.py │ │ │ ├── evaluation/ │ │ │ │ ├── __init__.py │ │ │ │ ├── data.py │ │ │ │ ├── datasets.py │ │ │ │ ├── environment.py │ │ │ │ ├── got10kdataset.py │ │ │ │ ├── lasotdataset.py │ │ │ │ ├── mobifacedataset.py │ │ │ │ ├── multi_object_wrapper.py │ │ │ │ ├── nfsdataset.py │ │ │ │ ├── otbdataset.py │ │ │ │ ├── running.py │ │ │ │ ├── tpldataset.py │ │ │ │ ├── tracker.py │ │ │ │ ├── trackingnetdataset.py │ │ │ │ ├── uavdataset.py │ │ │ │ ├── vot.py │ │ │ │ ├── vot2020.py │ │ │ │ └── votdataset.py │ │ │ ├── experiments/ │ │ │ │ ├── __init__.py │ │ │ │ └── myexperiments.py │ │ │ ├── features/ │ │ │ │ ├── __init__.py │ │ │ │ ├── augmentation.py │ │ │ │ ├── color.py │ │ │ │ ├── deep.py │ │ │ │ ├── extractor.py │ │ │ │ ├── featurebase.py │ │ │ │ ├── net_wrappers.py │ │ │ │ ├── preprocessing.py │ │ │ │ └── util.py │ │ │ ├── libs/ │ │ │ │ ├── __init__.py │ │ │ │ ├── complex.py │ │ │ │ ├── dcf.py │ │ │ │ ├── fourier.py │ │ │ │ ├── operation.py │ │ │ │ ├── optimization.py │ │ │ │ ├── tensordict.py │ │ │ │ └── tensorlist.py │ │ │ ├── parameter/ │ │ │ │ ├── __init__.py │ │ │ │ ├── atom/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── atom_gmm_sampl.py │ │ │ │ │ ├── atom_prob_ml.py │ │ │ │ │ ├── default.py │ │ │ │ │ ├── default_vot.py │ │ │ │ │ └── multiscale_no_iounet.py │ │ │ │ ├── dimp/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dimp18.py │ │ │ │ │ ├── dimp18_vot.py │ │ │ │ │ ├── dimp50.py │ │ │ │ │ ├── dimp50_vot.py │ │ │ │ │ ├── dimp50_vot19.py │ │ │ │ │ ├── prdimp18.py │ │ │ │ │ ├── prdimp50.py │ │ │ │ │ └── super_dimp.py │ │ │ │ └── eco/ │ │ │ │ ├── __init__.py │ │ │ │ └── default.py │ │ │ ├── tracker/ │ │ │ │ ├── __init__.py │ │ │ │ ├── atom/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── atom.py │ │ │ │ │ └── optim.py │ │ │ │ ├── base/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── basetracker.py │ │ │ │ ├── dimp/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── dimp.py │ │ │ │ └── eco/ │ │ │ │ ├── __init__.py │ │ │ │ ├── eco.py │ │ │ │ └── optim.py │ │ │ ├── util_scripts/ │ │ │ │ ├── __init__.py │ │ │ │ ├── download_results.py │ │ │ │ ├── pack_got10k_results.py │ │ │ │ └── pack_trackingnet_results.py │ │ │ ├── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── convert_vot_anno_to_rect.py │ │ │ │ ├── load_text.py │ │ │ │ ├── loading.py │ │ │ │ ├── params.py │ │ │ │ ├── plotting.py │ │ │ │ └── visdom.py │ │ │ └── vot20_utils.py │ │ ├── PreciseRoIPooling/ │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── pytorch/ │ │ │ │ ├── prroi_pool/ │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── functional.py │ │ │ │ │ ├── prroi_pool.py │ │ │ │ │ └── src/ │ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ └── tests/ │ │ │ │ └── test_prroi_pooling2d.py │ │ │ └── src/ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ └── vot20/ │ │ └── cttrack/ │ │ ├── config.yaml │ │ └── trackers.ini │ ├── lib/ │ │ ├── __init__.py │ │ ├── config/ │ │ │ ├── __init__.py │ │ │ └── ostrack/ │ │ │ └── config.py │ │ ├── models/ │ │ │ ├── __init__.py │ │ │ ├── component/ │ │ │ │ ├── __init__.py │ │ │ │ ├── attention.py │ │ │ │ ├── block.py │ │ │ │ ├── drop.py │ │ │ │ ├── mlp.py │ │ │ │ ├── norm.py │ │ │ │ ├── patch_embed.py │ │ │ │ ├── pos_embed.py │ │ │ │ └── weight_init.py │ │ │ ├── layers/ │ │ │ │ ├── __init__.py │ │ │ │ ├── attn.py │ │ │ │ ├── attn_blocks.py │ │ │ │ ├── head.py │ │ │ │ ├── mask_decoder.py │ │ │ │ ├── patch_embed.py │ │ │ │ ├── rpe.py │ │ │ │ └── self_practice.py │ │ │ ├── ostrack/ │ │ │ │ ├── Vit_model_test.py │ │ │ │ ├── __init__.py │ │ │ │ ├── base_backbone.py │ │ │ │ ├── load_parameter_test.py │ │ │ │ ├── ostrack.py │ │ │ │ ├── ostrack_test.py │ │ │ │ ├── utils.py │ │ │ │ ├── vit.py │ │ │ │ └── vit_ce.py │ │ │ └── timm.py │ │ ├── train/ │ │ │ ├── __init__.py │ │ │ ├── _init_paths.py │ │ │ ├── actors/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base_actor.py │ │ │ │ └── ostrack.py │ │ │ ├── admin/ │ │ │ │ ├── __init__.py │ │ │ │ ├── environment.py │ │ │ │ ├── local.py │ │ │ │ ├── settings.py │ │ │ │ ├── stats.py │ │ │ │ └── tensorboard.py │ │ │ ├── base_functions.py │ │ │ ├── data/ │ │ │ │ ├── __init__.py │ │ │ │ ├── bounding_box_utils.py │ │ │ │ ├── image_loader.py │ │ │ │ ├── loader.py │ │ │ │ ├── processing.py │ │ │ │ ├── processing_utils.py │ │ │ │ ├── sampler.py │ │ │ │ ├── sequence_sampler.py │ │ │ │ ├── transforms.py │ │ │ │ └── wandb_logger.py │ │ │ ├── data_specs/ │ │ │ │ ├── README.md │ │ │ │ ├── got10k_train_full_split.txt │ │ │ │ ├── got10k_train_split.txt │ │ │ │ ├── got10k_val_split.txt │ │ │ │ ├── got10k_vot_exclude.txt │ │ │ │ ├── got10k_vot_train_split.txt │ │ │ │ ├── got10k_vot_val_split.txt │ │ │ │ ├── lasot_train_split.txt │ │ │ │ └── trackingnet_classmap.txt │ │ │ ├── dataset/ │ │ │ │ ├── COCO_tool.py │ │ │ │ ├── __init__.py │ │ │ │ ├── base_image_dataset.py │ │ │ │ ├── base_video_dataset.py │ │ │ │ ├── coco.py │ │ │ │ ├── coco_seq.py │ │ │ │ ├── coco_seq_lmdb.py │ │ │ │ ├── got10k.py │ │ │ │ ├── got10k_lmdb.py │ │ │ │ ├── imagenetvid.py │ │ │ │ ├── imagenetvid_lmdb.py │ │ │ │ ├── lasot.py │ │ │ │ ├── lasot_lmdb.py │ │ │ │ ├── tracking_net.py │ │ │ │ └── tracking_net_lmdb.py │ │ │ ├── run_training.py │ │ │ ├── train_script.py │ │ │ ├── train_script_distill.py │ │ │ └── trainers/ │ │ │ ├── __init__.py │ │ │ ├── base_trainer.py │ │ │ └── ltr_trainer.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── box_ops.py │ │ │ ├── ce_utils.py │ │ │ ├── focal_loss.py │ │ │ ├── heapmap_utils.py │ │ │ ├── image.py │ │ │ ├── lmdb_utils.py │ │ │ ├── merge.py │ │ │ ├── misc.py │ │ │ ├── tensor.py │ │ │ └── variable_hook.py │ │ └── vis/ │ │ ├── __init__.py │ │ ├── plotting.py │ │ └── utils.py │ └── tracking/ │ ├── _init_paths.py │ ├── analysis_results.ipynb │ ├── analysis_results.py │ ├── analysis_results_ITP.py │ ├── convert_transt.py │ ├── create_default_local_file.py │ ├── download_pytracking_results.py │ ├── pre_read_datasets.py │ ├── profile_model.py │ ├── test.py │ ├── test_exp.py │ ├── train.py │ ├── video_demo.py │ └── vis_results.py ├── experiments/ │ ├── artrack/ │ │ ├── artrack_256_full.yaml │ │ ├── artrack_256_got.yaml │ │ ├── artrack_384_full.yaml │ │ └── artrack_large_384_full.yaml │ ├── artrack_seq/ │ │ ├── artrack_seq_256_full.yaml │ │ ├── artrack_seq_256_got.yaml │ │ ├── artrack_seq_384_full.yaml │ │ └── artrack_seq_large_384_full.yaml │ ├── artrackv2/ │ │ ├── artrackv2_256_full.yaml │ │ ├── artrackv2_256_got.yaml │ │ └── artrackv2_large_384_got.yaml │ └── artrackv2_seq/ │ ├── artrackv2_seq_256_full.yaml │ ├── artrackv2_seq_256_got.yaml │ └── artrackv2_seq_large_384_got.yaml ├── external/ │ ├── AR/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── ltr/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── actors/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base_actor.py │ │ │ │ ├── bbreg.py │ │ │ │ └── tracking.py │ │ │ ├── admin/ │ │ │ │ ├── __init__.py │ │ │ │ ├── environment.py │ │ │ │ ├── loading.py │ │ │ │ ├── model_constructor.py │ │ │ │ ├── multigpu.py │ │ │ │ ├── settings.py │ │ │ │ ├── stats.py │ │ │ │ └── tensorboard.py │ │ │ ├── data/ │ │ │ │ ├── __init__.py │ │ │ │ ├── bounding_box_utils.py │ │ │ │ ├── image_loader.py │ │ │ │ ├── loader.py │ │ │ │ ├── processing.py │ │ │ │ ├── processing_utils.py │ │ │ │ ├── processing_utils_SE.py │ │ │ │ ├── sampler.py │ │ │ │ └── transforms.py │ │ │ ├── dataset/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base_image_dataset.py │ │ │ │ ├── base_video_dataset.py │ │ │ │ ├── coco.py │ │ │ │ ├── coco_seq.py │ │ │ │ ├── davis.py │ │ │ │ ├── ecssd.py │ │ │ │ ├── got10k.py │ │ │ │ ├── hku_is.py │ │ │ │ ├── imagenetvid.py │ │ │ │ ├── lasot.py │ │ │ │ ├── lvis.py │ │ │ │ ├── msra10k.py │ │ │ │ ├── sbd.py │ │ │ │ ├── synthetic_video.py │ │ │ │ ├── synthetic_video_blend.py │ │ │ │ ├── tracking_net.py │ │ │ │ ├── vos_base.py │ │ │ │ └── youtubevos.py │ │ │ ├── external/ │ │ │ │ └── PreciseRoIPooling/ │ │ │ │ ├── .gitignore │ │ │ │ ├── LICENSE │ │ │ │ ├── README.md │ │ │ │ ├── pytorch/ │ │ │ │ │ ├── prroi_pool/ │ │ │ │ │ │ ├── .gitignore │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── functional.py │ │ │ │ │ │ ├── prroi_pool.py │ │ │ │ │ │ └── src/ │ │ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ │ │ └── tests/ │ │ │ │ │ └── test_prroi_pooling2d.py │ │ │ │ └── src/ │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ ├── models/ │ │ │ │ ├── AR_seg_mask/ │ │ │ │ │ ├── AR_seg_mask.py │ │ │ │ │ └── __init__.py │ │ │ │ ├── __init__.py │ │ │ │ ├── backbone/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base.py │ │ │ │ │ ├── resnet.py │ │ │ │ │ ├── resnet18_vggm.py │ │ │ │ │ └── resnet_seg.py │ │ │ │ ├── bbreg/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── atom.py │ │ │ │ │ └── atom_iou_net.py │ │ │ │ ├── head/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── seg_network.py │ │ │ │ │ └── utils.py │ │ │ │ ├── layers/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── activation.py │ │ │ │ │ ├── blocks.py │ │ │ │ │ ├── distance.py │ │ │ │ │ ├── filter.py │ │ │ │ │ ├── normalization.py │ │ │ │ │ └── transform.py │ │ │ │ ├── loss/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── kl_regression.py │ │ │ │ │ └── target_classification.py │ │ │ │ ├── meta/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── steepestdescent.py │ │ │ │ ├── neck/ │ │ │ │ │ ├── CorrNL.py │ │ │ │ │ └── neck_utils.py │ │ │ │ ├── target_classifier/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── features.py │ │ │ │ │ ├── initializer.py │ │ │ │ │ ├── linear_filter.py │ │ │ │ │ ├── optimizer.py │ │ │ │ │ └── residual_modules.py │ │ │ │ └── tracking/ │ │ │ │ ├── __init__.py │ │ │ │ └── dimpnet.py │ │ │ ├── run_training.py │ │ │ ├── train_settings/ │ │ │ │ ├── __init__.py │ │ │ │ ├── bbreg/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── atom.py │ │ │ │ │ ├── atom_gmm_sampl.py │ │ │ │ │ ├── atom_paper.py │ │ │ │ │ └── atom_prob_ml.py │ │ │ │ └── dimp/ │ │ │ │ ├── __init__.py │ │ │ │ ├── dimp18.py │ │ │ │ ├── dimp50.py │ │ │ │ ├── prdimp18.py │ │ │ │ ├── prdimp50.py │ │ │ │ └── super_dimp.py │ │ │ └── trainers/ │ │ │ ├── __init__.py │ │ │ ├── base_trainer.py │ │ │ └── ltr_trainer.py │ │ └── pytracking/ │ │ ├── ARcm_seg.py │ │ ├── VOT/ │ │ │ ├── tracker_DiMP.m │ │ │ ├── trackers.ini │ │ │ └── vot.py │ │ ├── VOT2020_super_only_mask_384_HP/ │ │ │ ├── dimp_alpha_065.py │ │ │ ├── dimp_alpha_seg_class.py │ │ │ ├── mixformer_alpha_seg_class.py │ │ │ ├── mixformer_large_alpha_seg_class.py │ │ │ └── vot.py │ │ ├── __init__.py │ │ ├── analysis/ │ │ │ ├── __init__.py │ │ │ ├── evaluate_vos.py │ │ │ ├── extract_results.py │ │ │ ├── playback_results.py │ │ │ ├── plot_results.py │ │ │ └── vos_utils.py │ │ ├── evaluation/ │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── datasets.py │ │ │ ├── environment.py │ │ │ ├── got10kdataset.py │ │ │ ├── lasotdataset.py │ │ │ ├── mobifacedataset.py │ │ │ ├── multi_object_wrapper.py │ │ │ ├── nfsdataset.py │ │ │ ├── otbdataset.py │ │ │ ├── running.py │ │ │ ├── tpldataset.py │ │ │ ├── tracker.py │ │ │ ├── trackingnetdataset.py │ │ │ ├── uavdataset.py │ │ │ ├── vot.py │ │ │ ├── vot2020.py │ │ │ └── votdataset.py │ │ ├── experiments/ │ │ │ ├── __init__.py │ │ │ └── myexperiments.py │ │ ├── features/ │ │ │ ├── __init__.py │ │ │ ├── augmentation.py │ │ │ ├── color.py │ │ │ ├── deep.py │ │ │ ├── extractor.py │ │ │ ├── featurebase.py │ │ │ ├── net_wrappers.py │ │ │ ├── preprocessing.py │ │ │ └── util.py │ │ ├── libs/ │ │ │ ├── __init__.py │ │ │ ├── complex.py │ │ │ ├── dcf.py │ │ │ ├── fourier.py │ │ │ ├── operation.py │ │ │ ├── optimization.py │ │ │ ├── tensordict.py │ │ │ └── tensorlist.py │ │ ├── parameter/ │ │ │ ├── __init__.py │ │ │ ├── atom/ │ │ │ │ ├── __init__.py │ │ │ │ ├── atom_gmm_sampl.py │ │ │ │ ├── atom_prob_ml.py │ │ │ │ ├── default.py │ │ │ │ ├── default_vot.py │ │ │ │ └── multiscale_no_iounet.py │ │ │ ├── dimp/ │ │ │ │ ├── __init__.py │ │ │ │ ├── dimp18.py │ │ │ │ ├── dimp18_vot.py │ │ │ │ ├── dimp50.py │ │ │ │ ├── dimp50_vot.py │ │ │ │ ├── dimp50_vot19.py │ │ │ │ ├── prdimp18.py │ │ │ │ ├── prdimp50.py │ │ │ │ └── super_dimp.py │ │ │ └── eco/ │ │ │ ├── __init__.py │ │ │ └── default.py │ │ ├── tracker/ │ │ │ ├── __init__.py │ │ │ ├── atom/ │ │ │ │ ├── __init__.py │ │ │ │ ├── atom.py │ │ │ │ └── optim.py │ │ │ ├── base/ │ │ │ │ ├── __init__.py │ │ │ │ └── basetracker.py │ │ │ ├── dimp/ │ │ │ │ ├── __init__.py │ │ │ │ └── dimp.py │ │ │ └── eco/ │ │ │ ├── __init__.py │ │ │ ├── eco.py │ │ │ └── optim.py │ │ ├── util_scripts/ │ │ │ ├── __init__.py │ │ │ ├── download_results.py │ │ │ ├── pack_got10k_results.py │ │ │ └── pack_trackingnet_results.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── convert_vot_anno_to_rect.py │ │ │ ├── load_text.py │ │ │ ├── loading.py │ │ │ ├── params.py │ │ │ ├── plotting.py │ │ │ └── visdom.py │ │ └── vot20_utils.py │ ├── PreciseRoIPooling/ │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── pytorch/ │ │ │ ├── prroi_pool/ │ │ │ │ ├── .gitignore │ │ │ │ ├── __init__.py │ │ │ │ ├── functional.py │ │ │ │ ├── prroi_pool.py │ │ │ │ └── src/ │ │ │ │ ├── prroi_pooling_gpu.c │ │ │ │ ├── prroi_pooling_gpu.h │ │ │ │ ├── prroi_pooling_gpu_impl.cu │ │ │ │ └── prroi_pooling_gpu_impl.cuh │ │ │ └── tests/ │ │ │ └── test_prroi_pooling2d.py │ │ └── src/ │ │ ├── prroi_pooling_gpu_impl.cu │ │ └── prroi_pooling_gpu_impl.cuh │ └── vot20/ │ └── cttrack/ │ ├── config.yaml │ └── trackers.ini ├── lib/ │ ├── __init__.py │ ├── config/ │ │ ├── __init__.py │ │ ├── artrack/ │ │ │ └── config.py │ │ ├── artrack_seq/ │ │ │ └── config.py │ │ ├── artrackv2/ │ │ │ └── config.py │ │ └── artrackv2_seq/ │ │ └── config.py │ ├── models/ │ │ ├── __init__.py │ │ ├── artrack/ │ │ │ ├── __init__.py │ │ │ ├── artrack.py │ │ │ ├── base_backbone.py │ │ │ ├── utils.py │ │ │ ├── vit.py │ │ │ └── vit_ce.py │ │ ├── artrack_seq/ │ │ │ ├── __init__.py │ │ │ ├── artrack_seq.py │ │ │ ├── base_backbone.py │ │ │ ├── utils.py │ │ │ ├── vit.py │ │ │ └── vit_ce.py │ │ ├── artrackv2/ │ │ │ ├── __init__.py │ │ │ ├── artrackv2.py │ │ │ ├── base_backbone.py │ │ │ ├── utils.py │ │ │ └── vit.py │ │ ├── artrackv2_seq/ │ │ │ ├── __init__.py │ │ │ ├── artrackv2_seq.py │ │ │ ├── base_backbone.py │ │ │ ├── utils.py │ │ │ └── vit.py │ │ ├── layers/ │ │ │ ├── __init__.py │ │ │ ├── attn.py │ │ │ ├── attn_blocks.py │ │ │ ├── frozen_bn.py │ │ │ ├── head.py │ │ │ ├── head_seq.py │ │ │ ├── mask_decoder.py │ │ │ ├── patch_embed.py │ │ │ └── rpe.py │ │ └── mask_decoder/ │ │ ├── __init__.py │ │ ├── attention.py │ │ ├── block.py │ │ ├── drop.py │ │ ├── mlp.py │ │ ├── norm.py │ │ ├── patch_embed.py │ │ ├── pos_embed.py │ │ └── weight_init.py │ ├── test/ │ │ ├── __init__.py │ │ ├── analysis/ │ │ │ ├── __init__.py │ │ │ ├── extract_results.py │ │ │ └── plot_results.py │ │ ├── evaluation/ │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── datasets.py │ │ │ ├── environment.py │ │ │ ├── got10kdataset.py │ │ │ ├── itbdataset.py │ │ │ ├── lasot_lmdbdataset.py │ │ │ ├── lasotdataset.py │ │ │ ├── lasotextensionsubsetdataset.py │ │ │ ├── local.py │ │ │ ├── nfsdataset.py │ │ │ ├── otbdataset.py │ │ │ ├── running.py │ │ │ ├── tc128cedataset.py │ │ │ ├── tc128dataset.py │ │ │ ├── tnl2kdataset.py │ │ │ ├── tracker.py │ │ │ ├── trackingnetdataset.py │ │ │ ├── uavdataset.py │ │ │ └── votdataset.py │ │ ├── parameter/ │ │ │ ├── __init__.py │ │ │ ├── artrack.py │ │ │ ├── artrack_seq.py │ │ │ ├── artrackv2.py │ │ │ └── artrackv2_seq.py │ │ ├── tracker/ │ │ │ ├── __init__.py │ │ │ ├── artrack.py │ │ │ ├── artrack_seq.py │ │ │ ├── artrackv2.py │ │ │ ├── artrackv2_seq.py │ │ │ ├── basetracker.py │ │ │ ├── data_utils.py │ │ │ └── vis_utils.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── _init_paths.py │ │ ├── hann.py │ │ ├── load_text.py │ │ ├── params.py │ │ ├── transform_got10k.py │ │ └── transform_trackingnet.py │ ├── train/ │ │ ├── __init__.py │ │ ├── _init_paths.py │ │ ├── actors/ │ │ │ ├── __init__.py │ │ │ ├── artrack.py │ │ │ ├── artrack_seq.py │ │ │ ├── artrackv2.py │ │ │ ├── artrackv2_seq.py │ │ │ └── base_actor.py │ │ ├── admin/ │ │ │ ├── __init__.py │ │ │ ├── environment.py │ │ │ ├── local.py │ │ │ ├── multigpu.py │ │ │ ├── settings.py │ │ │ ├── stats.py │ │ │ └── tensorboard.py │ │ ├── base_functions.py │ │ ├── data/ │ │ │ ├── __init__.py │ │ │ ├── bounding_box_utils.py │ │ │ ├── image_loader.py │ │ │ ├── loader.py │ │ │ ├── processing.py │ │ │ ├── processing_utils.py │ │ │ ├── sampler.py │ │ │ ├── sequence_sampler.py │ │ │ ├── sequence_sampler_v2.py │ │ │ ├── transforms.py │ │ │ └── wandb_logger.py │ │ ├── data_specs/ │ │ │ ├── README.md │ │ │ ├── got10k_train_full_split.txt │ │ │ ├── got10k_train_split.txt │ │ │ ├── got10k_val_split.txt │ │ │ ├── got10k_vot_exclude.txt │ │ │ ├── got10k_vot_train_split.txt │ │ │ ├── got10k_vot_val_split.txt │ │ │ ├── lasot_train_split.txt │ │ │ └── trackingnet_classmap.txt │ │ ├── dataset/ │ │ │ ├── COCO_tool.py │ │ │ ├── __init__.py │ │ │ ├── base_image_dataset.py │ │ │ ├── base_video_dataset.py │ │ │ ├── coco.py │ │ │ ├── coco_seq.py │ │ │ ├── coco_seq_lmdb.py │ │ │ ├── got10k.py │ │ │ ├── got10k_lmdb.py │ │ │ ├── imagenetvid.py │ │ │ ├── imagenetvid_lmdb.py │ │ │ ├── lasot.py │ │ │ ├── lasot_lmdb.py │ │ │ ├── tracking_net.py │ │ │ └── tracking_net_lmdb.py │ │ ├── run_training.py │ │ ├── train_script.py │ │ ├── train_script_distill.py │ │ └── trainers/ │ │ ├── __init__.py │ │ ├── base_trainer.py │ │ ├── ltr_seq_trainer.py │ │ ├── ltr_seq_trainer_v2.py │ │ └── ltr_trainer.py │ ├── utils/ │ │ ├── __init__.py │ │ ├── box_ops.py │ │ ├── ce_utils.py │ │ ├── focal_loss.py │ │ ├── heapmap_utils.py │ │ ├── image.py │ │ ├── lmdb_utils.py │ │ ├── merge.py │ │ ├── misc.py │ │ ├── tensor.py │ │ └── variable_hook.py │ └── vis/ │ ├── __init__.py │ ├── plotting.py │ ├── utils.py │ └── visdom_cus.py └── tracking/ ├── _init_paths.py ├── analysis_results.py ├── analysis_results_ITP.py ├── convert_transt.py ├── create_default_local_file.py ├── download_pytracking_results.py ├── pre_read_datasets.py ├── test.py ├── test_exp.py └── train.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: ARTrack_env_cuda113.yaml ================================================ name: artrack channels: - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main - defaults dependencies: - ca-certificates=2021.10.8=ha878542_0 - certifi=2021.10.8=py39hf3d152e_2 - fvcore=0.1.5.post20220305=pyhd8ed1ab_0 - portalocker=2.4.0=py39hf3d152e_0 - python_abi=3.9=2_cp39 - tabulate=0.8.9=pyhd8ed1ab_0 - termcolor=1.1.0=py_2 - yacs=0.1.8=pyhd8ed1ab_0 - ffmpeg=4.3=hf484d3e_0 - pytorch=1.11.0=py3.9_cuda11.3_cudnn8.2.0_0 - pytorch-mutex=1.0=cuda - torchvision=0.12.0=py39_cu113 - _anaconda_depends=2021.11=py39_0 - _libgcc_mutex=0.1=main - _openmp_mutex=4.5=1_gnu - alabaster=0.7.12=pyhd3eb1b0_0 - anaconda-client=1.9.0=py39h06a4308_0 - anaconda=custom=py39_1 - anaconda-project=0.10.2=pyhd3eb1b0_0 - anyio=3.5.0=py39h06a4308_0 - appdirs=1.4.4=pyhd3eb1b0_0 - argh=0.26.2=py39h06a4308_0 - argon2-cffi=21.3.0=pyhd3eb1b0_0 - argon2-cffi-bindings=21.2.0=py39h7f8727e_0 - arrow=0.13.1=py39h06a4308_0 - asn1crypto=1.4.0=py_0 - astroid=2.6.6=py39h06a4308_0 - astropy=5.0.3=py39hce1f21e_0 - asttokens=2.0.5=pyhd3eb1b0_0 - async_generator=1.10=pyhd3eb1b0_0 - atomicwrites=1.4.0=py_0 - attrs=21.4.0=pyhd3eb1b0_0 - autopep8=1.6.0=pyhd3eb1b0_0 - babel=2.9.1=pyhd3eb1b0_0 - backcall=0.2.0=pyhd3eb1b0_0 - backports=1.1=pyhd3eb1b0_0 - backports.shutil_get_terminal_size=1.0.0=pyhd3eb1b0_3 - beautifulsoup4=4.10.0=pyh06a4308_0 - binaryornot=0.4.4=pyhd3eb1b0_1 - bitarray=2.4.1=py39h7f8727e_0 - bkcharts=0.2=py39h06a4308_0 - black=19.10b0=py_0 - blas=1.0=mkl - bleach=4.1.0=pyhd3eb1b0_0 - blosc=1.21.0=h8c45485_0 - bokeh=2.4.2=py39h06a4308_0 - boto=2.49.0=py39h06a4308_0 - bottleneck=1.3.4=py39hce1f21e_0 - brotli=1.0.9=he6710b0_2 - brotlipy=0.7.0=py39h27cfd23_1003 - brunsli=0.1=h2531618_0 - bzip2=1.0.8=h7b6447c_0 - c-ares=1.18.1=h7f8727e_0 - cached-property=1.5.2=py_0 - cairo=1.16.0=hf32fb01_1 - cffi=1.15.0=py39hd667e15_1 - cfitsio=3.470=hf0d0db6_6 - chardet=4.0.0=py39h06a4308_1003 - charls=2.2.0=h2531618_0 - charset-normalizer=2.0.4=pyhd3eb1b0_0 - click=8.0.4=py39h06a4308_0 - cloudpickle=2.0.0=pyhd3eb1b0_0 - clyent=1.2.2=py39h06a4308_1 - colorama=0.4.4=pyhd3eb1b0_0 - conda=4.12.0=py39h06a4308_0 - conda-content-trust=0.1.1=pyhd3eb1b0_0 - conda-pack=0.6.0=pyhd3eb1b0_0 - conda-package-handling=1.8.0=py39h7f8727e_0 - conda-token=0.3.0=pyhd3eb1b0_0 - contextlib2=0.6.0.post1=pyhd3eb1b0_0 - cookiecutter=1.7.2=pyhd3eb1b0_0 - cryptography=36.0.0=py39h9ce1e76_0 - cudatoolkit=11.3.1=h2bc3f7f_2 - curl=7.80.0=h7f8727e_0 - cycler=0.11.0=pyhd3eb1b0_0 - cython=0.29.28=py39h295c915_0 - cytoolz=0.11.0=py39h27cfd23_0 - daal4py=2021.5.0=py39h78b71dc_0 - dal=2021.5.1=h06a4308_803 - dask=2022.2.1=pyhd3eb1b0_0 - dask-core=2022.2.1=pyhd3eb1b0_0 - dataclasses=0.8=pyh6d0b6a4_7 - dbus=1.13.18=hb2f20db_0 - debugpy=1.5.1=py39h295c915_0 - decorator=5.1.1=pyhd3eb1b0_0 - defusedxml=0.7.1=pyhd3eb1b0_0 - diff-match-patch=20200713=pyhd3eb1b0_0 - distributed=2022.2.1=pyhd3eb1b0_0 - docutils=0.17.1=py39h06a4308_1 - entrypoints=0.3=py39h06a4308_0 - et_xmlfile=1.1.0=py39h06a4308_0 - executing=0.8.3=pyhd3eb1b0_0 - expat=2.4.4=h295c915_0 - fastcache=1.1.0=py39he8ac12f_0 - filelock=3.6.0=pyhd3eb1b0_0 - flake8=3.9.2=pyhd3eb1b0_0 - flask=1.1.2=pyhd3eb1b0_0 - fontconfig=2.13.1=h6c09931_0 - fonttools=4.25.0=pyhd3eb1b0_0 - freetype=2.11.0=h70c0345_0 - fribidi=1.0.10=h7b6447c_0 - fsspec=2022.2.0=pyhd3eb1b0_0 - get_terminal_size=1.0.0=haa9412d_0 - gevent=21.8.0=py39h7f8727e_1 - giflib=5.2.1=h7b6447c_0 - glib=2.69.1=h4ff587b_1 - glob2=0.7=pyhd3eb1b0_0 - gmp=6.2.1=h2531618_2 - gmpy2=2.1.2=py39heeb90bb_0 - gnutls=3.6.15=he1e5248_0 - graphite2=1.3.14=h23475e2_0 - greenlet=1.1.1=py39h295c915_0 - gst-plugins-base=1.14.0=h8213a91_2 - gstreamer=1.14.0=h28cd5cc_2 - h5py=3.6.0=py39ha0f2276_0 - harfbuzz=2.8.1=h6f93f22_0 - hdf5=1.10.6=hb1b8bf9_0 - heapdict=1.0.1=pyhd3eb1b0_0 - html5lib=1.1=pyhd3eb1b0_0 - icu=58.2=he6710b0_3 - idna=3.3=pyhd3eb1b0_0 - imagecodecs=2021.8.26=py39h4cda21f_0 - imageio=2.9.0=pyhd3eb1b0_0 - imagesize=1.3.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py39h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - inflection=0.5.1=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - intel-openmp=2021.4.0=h06a4308_3561 - intervaltree=3.1.0=pyhd3eb1b0_0 - ipykernel=6.9.1=py39h06a4308_0 - ipython=8.1.1=py39h06a4308_0 - ipython_genutils=0.2.0=pyhd3eb1b0_1 - ipywidgets=7.6.5=pyhd3eb1b0_1 - isort=5.9.3=pyhd3eb1b0_0 - itsdangerous=2.0.1=pyhd3eb1b0_0 - jbig=2.1=hdba287a_0 - jdcal=1.4.1=pyhd3eb1b0_0 - jedi=0.18.1=py39h06a4308_1 - jeepney=0.7.1=pyhd3eb1b0_0 - jinja2=2.11.3=pyhd3eb1b0_0 - jinja2-time=0.2.0=pyhd3eb1b0_2 - joblib=1.1.0=pyhd3eb1b0_0 - jpeg=9d=h7f8727e_0 - json5=0.9.6=pyhd3eb1b0_0 - jsonschema=3.2.0=pyhd3eb1b0_2 - jupyter=1.0.0=py39h06a4308_7 - jupyter_client=6.1.12=pyhd3eb1b0_0 - jupyter_console=6.4.0=pyhd3eb1b0_0 - jupyter_core=4.9.2=py39h06a4308_0 - jupyter_server=1.13.5=pyhd3eb1b0_0 - jupyterlab=3.3.2=pyhd3eb1b0_0 - jupyterlab_pygments=0.1.2=py_0 - jupyterlab_server=2.10.3=pyhd3eb1b0_1 - jupyterlab_widgets=1.0.0=pyhd3eb1b0_1 - jxrlib=1.1=h7b6447c_2 - keyring=23.4.0=py39h06a4308_0 - kiwisolver=1.3.2=py39h295c915_0 - krb5=1.19.2=hac12032_0 - lame=3.100=h7b6447c_0 - lazy-object-proxy=1.6.0=py39h27cfd23_0 - lcms2=2.12=h3be6417_0 - ld_impl_linux-64=2.35.1=h7274673_9 - lerc=3.0=h295c915_0 - libaec=1.0.4=he6710b0_1 - libarchive=3.4.2=h62408e4_0 - libcurl=7.80.0=h0b77cf5_0 - libdeflate=1.8=h7f8727e_5 - libedit=3.1.20210910=h7f8727e_0 - libev=4.33=h7f8727e_1 - libffi=3.3=he6710b0_2 - libgcc-ng=9.3.0=h5101ec6_17 - libgfortran-ng=7.5.0=ha8ba4b0_17 - libgfortran4=7.5.0=ha8ba4b0_17 - libgomp=9.3.0=h5101ec6_17 - libiconv=1.15=h63c8f33_5 - libidn2=2.3.2=h7f8727e_0 - liblief=0.11.5=h295c915_1 - libllvm11=11.1.0=h3826bc1_1 - libnghttp2=1.46.0=hce63b2e_0 - libpng=1.6.37=hbc83047_0 - libsodium=1.0.18=h7b6447c_0 - libspatialindex=1.9.3=h2531618_0 - libssh2=1.9.0=h1ba5d50_1 - libstdcxx-ng=9.3.0=hd4cf53a_17 - libtasn1=4.16.0=h27cfd23_0 - libtiff=4.2.0=h85742a9_0 - libtool=2.4.6=h295c915_1008 - libunistring=0.9.10=h27cfd23_0 - libuuid=1.0.3=h7f8727e_2 - libuv=1.40.0=h7b6447c_0 - libwebp=1.2.2=h55f646e_0 - libwebp-base=1.2.2=h7f8727e_0 - libxcb=1.14=h7b6447c_0 - libxml2=2.9.12=h03d6c58_0 - libxslt=1.1.34=hc22bd24_0 - libzopfli=1.0.3=he6710b0_0 - llvmlite=0.38.0=py39h4ff587b_0 - locket=0.2.1=py39h06a4308_2 - lxml=4.8.0=py39h1f438cf_0 - lz4-c=1.9.3=h295c915_1 - lzo=2.10=h7b6447c_2 - markupsafe=1.1.1=py39h27cfd23_0 - matplotlib=3.5.1=py39h06a4308_1 - matplotlib-base=3.5.1=py39ha18d171_1 - matplotlib-inline=0.1.2=pyhd3eb1b0_2 - mccabe=0.6.1=py39h06a4308_1 - mistune=0.8.4=py39h27cfd23_1000 - mkl=2021.4.0=h06a4308_640 - mkl-service=2.4.0=py39h7f8727e_0 - mkl_fft=1.3.1=py39hd3c417c_0 - mkl_random=1.2.2=py39h51133e4_0 - mock=4.0.3=pyhd3eb1b0_0 - more-itertools=8.12.0=pyhd3eb1b0_0 - mpc=1.1.0=h10f8cd9_1 - mpfr=4.0.2=hb69a4c5_1 - mpi=1.0=mpich - mpich=3.3.2=hc856adb_0 - mpmath=1.2.1=py39h06a4308_0 - msgpack-python=1.0.2=py39hff7bd54_1 - multipledispatch=0.6.0=py39h06a4308_0 - munkres=1.1.4=py_0 - mypy_extensions=0.4.3=py39h06a4308_1 - nbclassic=0.3.5=pyhd3eb1b0_0 - nbclient=0.5.11=pyhd3eb1b0_0 - nbconvert=6.3.0=py39h06a4308_0 - nbformat=5.1.3=pyhd3eb1b0_0 - ncurses=6.3=h7f8727e_2 - nest-asyncio=1.5.1=pyhd3eb1b0_0 - nettle=3.7.3=hbbd107a_1 - networkx=2.7.1=pyhd3eb1b0_0 - nltk=3.7=pyhd3eb1b0_0 - nose=1.3.7=pyhd3eb1b0_1008 - notebook=6.4.8=py39h06a4308_0 - numba=0.55.1=py39h51133e4_0 - numexpr=2.8.1=py39h6abb31d_0 - numpy=1.21.2=py39h20f2e39_0 - numpy-base=1.21.2=py39h79a1101_0 - numpydoc=1.2=pyhd3eb1b0_0 - olefile=0.46=pyhd3eb1b0_0 - openh264=2.1.1=h4ff587b_0 - openjpeg=2.4.0=h3ad879b_0 - openpyxl=3.0.9=pyhd3eb1b0_0 - openssl=1.1.1n=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pandas=1.4.1=py39h295c915_1 - pandocfilters=1.5.0=pyhd3eb1b0_0 - pango=1.45.3=hd140c19_0 - parso=0.8.3=pyhd3eb1b0_0 - partd=1.2.0=pyhd3eb1b0_1 - patchelf=0.13=h295c915_0 - path=16.2.0=pyhd3eb1b0_0 - path.py=12.5.0=hd3eb1b0_0 - pathlib2=2.3.6=py39h06a4308_2 - pathspec=0.7.0=py_0 - patsy=0.5.2=py39h06a4308_1 - pcre=8.45=h295c915_0 - pep8=1.7.1=py39h06a4308_0 - pexpect=4.8.0=pyhd3eb1b0_3 - pickleshare=0.7.5=pyhd3eb1b0_1003 - pillow=9.0.1=py39h22f2fdc_0 - pip=21.2.4=py39h06a4308_0 - pixman=0.40.0=h7f8727e_1 - pkginfo=1.8.2=pyhd3eb1b0_0 - pluggy=1.0.0=py39h06a4308_1 - ply=3.11=py39h06a4308_0 - poyo=0.5.0=pyhd3eb1b0_0 - prometheus_client=0.13.1=pyhd3eb1b0_0 - prompt-toolkit=3.0.20=pyhd3eb1b0_0 - prompt_toolkit=3.0.20=hd3eb1b0_0 - psutil=5.8.0=py39h27cfd23_1 - ptyprocess=0.7.0=pyhd3eb1b0_2 - pure_eval=0.2.2=pyhd3eb1b0_0 - py=1.11.0=pyhd3eb1b0_0 - py-lief=0.11.5=py39h295c915_1 - pycodestyle=2.7.0=pyhd3eb1b0_0 - pycosat=0.6.3=py39h27cfd23_0 - pycparser=2.21=pyhd3eb1b0_0 - pycurl=7.44.1=py39h8f2d780_1 - pydocstyle=6.1.1=pyhd3eb1b0_0 - pyerfa=2.0.0=py39h27cfd23_0 - pyflakes=2.3.1=pyhd3eb1b0_0 - pygments=2.11.2=pyhd3eb1b0_0 - pylint=2.9.6=py39h06a4308_1 - pyls-spyder=0.4.0=pyhd3eb1b0_0 - pyodbc=4.0.32=py39h295c915_1 - pyopenssl=22.0.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pyqt=5.9.2=py39h2531618_6 - pyrsistent=0.18.0=py39heee7806_0 - pysocks=1.7.1=py39h06a4308_0 - pytables=3.6.1=py39h77479fe_1 - pytest=6.2.5=py39h06a4308_2 - python=3.9.7=h12debd9_1 - python-dateutil=2.8.2=pyhd3eb1b0_0 - python-libarchive-c=2.9=pyhd3eb1b0_1 - python-lsp-black=1.0.0=pyhd3eb1b0_0 - python-lsp-jsonrpc=1.0.0=pyhd3eb1b0_0 - python-lsp-server=1.2.4=pyhd3eb1b0_0 - python-slugify=5.0.2=pyhd3eb1b0_0 - pytz=2021.3=pyhd3eb1b0_0 - pywavelets=1.3.0=py39h7f8727e_0 - pyxdg=0.27=pyhd3eb1b0_0 - pyyaml=6.0=py39h7f8727e_1 - pyzmq=22.3.0=py39h295c915_2 - qdarkstyle=3.0.2=pyhd3eb1b0_0 - qstylizer=0.1.10=pyhd3eb1b0_0 - qt=5.9.7=h5867ecd_1 - qtawesome=1.0.3=pyhd3eb1b0_0 - qtconsole=5.2.2=pyhd3eb1b0_0 - qtpy=1.11.2=pyhd3eb1b0_0 - readline=8.1.2=h7f8727e_1 - regex=2022.3.15=py39h7f8727e_0 - requests=2.27.1=pyhd3eb1b0_0 - ripgrep=12.1.1=0 - rope=0.22.0=pyhd3eb1b0_0 - rtree=0.9.7=py39h06a4308_1 - ruamel_yaml=0.15.100=py39h27cfd23_0 - scikit-image=0.19.2=py39h51133e4_0 - scikit-learn=1.0.2=py39h51133e4_1 - scikit-learn-intelex=2021.5.0=py39h06a4308_0 - scipy=1.7.3=py39hc147768_0 - seaborn=0.11.2=pyhd3eb1b0_0 - secretstorage=3.3.1=py39h06a4308_0 - send2trash=1.8.0=pyhd3eb1b0_1 - setuptools=58.0.4=py39h06a4308_0 - simplegeneric=0.8.1=py39h06a4308_2 - singledispatch=3.7.0=pyhd3eb1b0_1001 - sip=4.19.13=py39h295c915_0 - six=1.16.0=pyhd3eb1b0_1 - snappy=1.1.8=he6710b0_0 - sniffio=1.2.0=py39h06a4308_1 - snowballstemmer=2.2.0=pyhd3eb1b0_0 - sortedcollections=2.1.0=pyhd3eb1b0_0 - sortedcontainers=2.4.0=pyhd3eb1b0_0 - soupsieve=2.3.1=pyhd3eb1b0_0 - sphinx=4.4.0=pyhd3eb1b0_0 - sphinxcontrib=1.0=py39h06a4308_1 - sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0 - sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0 - sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0 - sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0 - sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0 - sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0 - sphinxcontrib-websupport=1.2.4=py_0 - spyder=5.1.5=py39h06a4308_1 - spyder-kernels=2.1.3=py39h06a4308_0 - sqlalchemy=1.4.32=py39h7f8727e_0 - sqlite=3.38.2=hc218d9a_0 - stack_data=0.2.0=pyhd3eb1b0_0 - statsmodels=0.13.2=py39h7f8727e_0 - sympy=1.10.1=py39h06a4308_0 - tbb=2021.5.0=hd09550d_0 - tbb4py=2021.5.0=py39hd09550d_0 - tblib=1.7.0=pyhd3eb1b0_0 - terminado=0.13.1=py39h06a4308_0 - testpath=0.5.0=pyhd3eb1b0_0 - text-unidecode=1.3=pyhd3eb1b0_0 - textdistance=4.2.1=pyhd3eb1b0_0 - threadpoolctl=2.2.0=pyh0d69192_0 - three-merge=0.1.1=pyhd3eb1b0_0 - tifffile=2021.7.2=pyhd3eb1b0_2 - tinycss=0.4=pyhd3eb1b0_1002 - tk=8.6.11=h1ccaba5_0 - toml=0.10.2=pyhd3eb1b0_0 - toolz=0.11.2=pyhd3eb1b0_0 - tornado=6.1=py39h27cfd23_0 - tqdm=4.63.0=pyhd3eb1b0_0 - traitlets=5.1.1=pyhd3eb1b0_0 - typed-ast=1.4.3=py39h7f8727e_1 - typing-extensions=4.1.1=hd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - tzdata=2022a=hda174b7_0 - ujson=5.1.0=py39h295c915_0 - unicodecsv=0.14.1=py39h06a4308_0 - unidecode=1.2.0=pyhd3eb1b0_0 - unixodbc=2.3.9=h7b6447c_0 - urllib3=1.26.8=pyhd3eb1b0_0 - watchdog=2.1.6=py39h06a4308_0 - wcwidth=0.2.5=pyhd3eb1b0_0 - webencodings=0.5.1=py39h06a4308_1 - websocket-client=0.58.0=py39h06a4308_4 - werkzeug=2.0.3=pyhd3eb1b0_0 - wheel=0.37.1=pyhd3eb1b0_0 - whichcraft=0.6.1=pyhd3eb1b0_0 - widgetsnbextension=3.5.2=py39h06a4308_0 - wrapt=1.12.1=py39he8ac12f_1 - wurlitzer=3.0.2=py39h06a4308_0 - xlrd=2.0.1=pyhd3eb1b0_0 - xlsxwriter=3.0.2=pyhd3eb1b0_0 - xlwt=1.3.0=py39h06a4308_0 - xz=5.2.5=h7b6447c_0 - yaml=0.2.5=h7b6447c_0 - yapf=0.31.0=pyhd3eb1b0_0 - zeromq=4.3.4=h2531618_0 - zfp=0.5.5=h295c915_6 - zict=2.0.0=pyhd3eb1b0_0 - zipp=3.7.0=pyhd3eb1b0_0 - zlib=1.2.11=h7f8727e_4 - zope=1.0=py39h06a4308_1 - zope.event=4.5.0=py39h06a4308_0 - zope.interface=5.4.0=py39h7f8727e_0 - zstd=1.4.9=haebb681_0 - pip: - astor==0.8.1 - configparser==5.2.0 - data==0.4 - docker-pycreds==0.4.0 - easydict==1.9 - einops==0.4.1 - formulaic==0.5.2 - funcsigs==1.0.2 - future==0.18.2 - gitdb==4.0.9 - gitpython==3.1.27 - interface-meta==1.3.0 - iopath==0.1.9 - jpeg4py==0.1.4 - jsonpatch==1.32 - jsonpointer==2.3 - latex==0.7.0 - libarchive-c==2.9 - linearmodels==4.29 - lmdb==1.3.0 - loguru==0.6.0 - mat73==0.59 - memory-profiler==0.60.0 - msgpack==1.0.2 - ninja==1.11.1 - opencv-python==4.5.5.64 - pathtools==0.1.2 - promise==2.3 - property-cached==1.6.4 - protobuf==3.20.0 - pycocotools==2.0.4 - pyhdfe==0.1.2 - ruamel-yaml-conda==0.15.100 - sentry-sdk==1.5.8 - setproctitle==1.2.2 - setuptools-scm==7.1.0 - shapely==1.8.1.post1 - shortuuid==1.0.8 - shutilwhich==1.1.0 - smmap==5.0.0 - tables==3.6.1 - tempdir==0.7.1 - tensorboardx==2.5.1 - thop==0.1.0.post2207010342 - tikzplotlib==0.10.1 - timm==0.5.4 - tomli==2.0.1 - torch==1.11.0 - torchfile==0.1.0 - visdom==0.1.8.9 - wandb==0.12.11 - webcolors==1.12 - yaspin==2.1.0 prefix: /public/baiyifan/conda_envs/artrack ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ # ARTrack [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-got-10k)](https://paperswithcode.com/sota/visual-object-tracking-on-got-10k?p=artrackv2-prompting-autoregressive-tracker) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-lasot)](https://paperswithcode.com/sota/visual-object-tracking-on-lasot?p=artrackv2-prompting-autoregressive-tracker) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-trackingnet)](https://paperswithcode.com/sota/visual-object-tracking-on-trackingnet?p=artrackv2-prompting-autoregressive-tracker) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-needforspeed)](https://paperswithcode.com/sota/visual-object-tracking-on-needforspeed?p=artrackv2-prompting-autoregressive-tracker) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-tnl2k)](https://paperswithcode.com/sota/visual-object-tracking-on-tnl2k?p=artrackv2-prompting-autoregressive-tracker) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-lasot-ext)](https://paperswithcode.com/sota/visual-object-tracking-on-lasot-ext?p=artrackv2-prompting-autoregressive-tracker) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-uav123)](https://paperswithcode.com/sota/visual-object-tracking-on-uav123?p=artrackv2-prompting-autoregressive-tracker) The official PyTorch implementation of our **CVPR 2023 Highlight** and **CVPR 2024 Poster** paper: **Autoregressive Visual Tracking** **ARTrackV2: Prompting Autoregressive Tracker Where to Look and How to Describe** GitHub maintainer: [Yifan Bai](https://github.com/AlexDotHam) [[CVPR2023](https://openaccess.thecvf.com/content/CVPR2023/papers/Wei_Autoregressive_Visual_Tracking_CVPR_2023_paper.pdf)] [[CVPR2024](https://artrackv2.github.io/)] ### :bookmark: Update List: - [ ] Fastitnp - [ ] VastTrack and More Datasets ### :bookmark: Checkpoints in Baidu Pan [[Baidu Pan](https://pan.baidu.com/s/1bpdLdwO39kHD5iH06aGIzg?pwd=g06j)] passwd: g06j Google Drive: [ARTrackV2-B-256](https://drive.google.com/file/d/1tGaY5jQxZOTzJDWXgOgoHtBwc5l4NLQ2/view?usp=drive_link) | [ARTrackV2-B-256-GOT](https://drive.google.com/file/d/1RqsjHqTBsIN5ivD-C5tXDyhZlGZjJN88/view?usp=drive_link) | [ARTrackV2-L-384-GOT](https://drive.google.com/file/d/1KJ-TXFSn6K_OhchoRE29ePZbSm9sBHVS/view?usp=drive_link) | ### :bookmark:Our ARTrackV2 is accepted by CVPR2024!!! [[Deprecated Code](https://drive.google.com/file/d/15PHRN9utPfm1L4djr7U6MLHWIkx90EVD/view?usp=sharing)] [[Raw Result](https://drive.google.com/drive/folders/1Kd6IA60OQStfYCgsi42I20VRDTB7VcWl?usp=sharing)] We have released the training code for ARTrackV2 and merged it into this repository. You can adjust the config to use the corresponding ARTrackV2-marked modules. The training process remains the same as V1. | Variant | [ARTrackV2-B-256](https://drive.google.com/file/d/1tGaY5jQxZOTzJDWXgOgoHtBwc5l4NLQ2/view?usp=drive_link) | [ARTrackV2-B-256-GOT](https://drive.google.com/file/d/1RqsjHqTBsIN5ivD-C5tXDyhZlGZjJN88/view?usp=drive_link) | [ARTrackV2-L-384-GOT](https://drive.google.com/file/d/1KJ-TXFSn6K_OhchoRE29ePZbSm9sBHVS/view?usp=drive_link) | |:-------------------------------:|:-----------------------:|:-----------------------:|:-----------------------:| | Model Config | ViT-B, 256^2 resolution | ViT-B, 256^2 resolution | ViT-L, 384^2 resolution | | GOT-10k (AO / SR 0.5 / SR 0.75) | - / - / - | 76.1 / 85.5 / 72.9 | 79.6 / 88.0 / 78.7 | | LaSOT (AUC / Norm P / P) | 71.5 / 80.3 / 77.5 | - / - / - | - / - / - | | TrackingNet (AUC / Norm P / P) | 84.3 / 89.1 / 83.6 | - / - / - | - / - / - | | LaSOT_ext (AUC / Norm P / P) | 51.1 / 58.4 / 61.5 | - / - / - | - / - / - | ## Highlight ![](figure/overview.jpg) ### :bookmark:Brief Introduction We present **ARTrack**, an autoregressive framework for visual object tracking. ARTrack tackles tracking as a coordinate sequence interpretation task that estimates object trajectories progressively, where the current estimate is induced by previous states and in turn affects subsequences. This time-autoregressive approach models the sequential evolution of trajectories to keep tracing the object **across frames**, making it superior to existing template matching based trackers that only consider the **per-frame** localization accuracy. ARTrack is simple and direct, eliminating customized localization heads and post-processings. Despite its simplicity, ARTrack achieves state-of-the-art performance on prevailing benchmark datasets. ### :bookmark:Strong Performance | Variant | ARTrack-256 | ARTrack-384 | ARTrack-L-384 | |:-------------------------------:|:-----------------------:|:-----------------------:|:-----------------------:| | Model Config | ViT-B, 256^2 resolution | ViT-B, 384^2 resolution | ViT-L, 384^2 resolution | | GOT-10k (AO / SR 0.5 / SR 0.75) | 73.5 / 82.2 / 70.9 | 75.5 / 84.3 / 74.3 | 78.5 / 87.4 / 77.8 | | LaSOT (AUC / Norm P / P) | 70.4 / 79.5 / 76.6 | 72.6 / 81.7 / 79.1 | 73.1 / 82.2 / 80.3 | | TrackingNet (AUC / Norm P / P) | 84.2 / 88.7 / 83.5 | 85.1 / 89.1 / 84.8 | 85.6 / 89.6 / 84.8 | | LaSOT_ext (AUC / Norm P / P) | 46.4 / 56.5 / 52.3 | 51.9 / 62.0 / 58.5 | 52.8 / 62.9 / 59.7 | | TNL-2K (AUC) | 57.5 | 59.8 | 60.3 | | NfS30 (AUC) | 64.3 | 66.8 | 67.9 | | UAV123 (AUC) | 67.7 | 70.5 | 71.2 | ### :bookmark:Inference Speed Our baseline model (backbone: ViT-B, resolution: 256x256) can run at **26 fps** (frames per second) on a single NVIDIA GeForce RTX 3090, our alter decoder version can run at **45 fps** on a single NVIDIA GeForce RTX 3090. ## Bug of array of inhomogeneous shape Thanks to [MrtXue](https://github.com/MrtXue), if you meet the "ValueError: setting an array element with a sequence." when you train in the second stage, you can try to reduce your numpy version to 1.23. ## Update for checkpoint(ARTrack_large_384_full): You can download the model weights from [Google Drive](https://drive.google.com/drive/folders/1KsH_MIZIdgjZpUZBmR4P88yeYDqM8yNW?usp=sharing) | Variant | ARTrack-L-384 | |:-------------------------------:|:-----------------------:| | Model Config | ViT-L, 384^2 resolution | | GOT-10k (AO / SR 0.5 / SR 0.75) | 80.0 / 88.5 / 80.0 | | LaSOT (AUC / Norm P / P) | 73.5 / 82.4 / 80.6 | | TrackingNet (AUC / Norm P / P) | 85.5 / 90.1 / 85.9 | | LaSOT_ext (AUC / Norm P / P) | 51.8 / 62.3 / 58.8 | ## Update for checkpoint and raw_result(ARTrack_base_256_full): You can download the model weights and raw_result from [Google Drive](https://drive.google.com/drive/folders/1KsH_MIZIdgjZpUZBmR4P88yeYDqM8yNW?usp=sharing) | Variant | ARTrack-256 | ARTrack-256-got | |:-------------------------------:|:-----------------------:|:-----------------------:| | Model Config | ViT-B, 256^2 resolution | ViT-B, 256^2 resolution | | GOT-10k (AO / SR 0.5 / SR 0.75) | 76.7 / 85.7 / 74.8 | 74.1 / 83.1 / 70.0 | | LaSOT (AUC / Norm P / P) | 70.8 / 79.6 / 76.3 | - / - / - | | TrackingNet (AUC / Norm P / P) | 84.3 / 88.7 / 83.4 | - / - / - | | LaSOT_ext (AUC / Norm P / P) | 48.4 / 57.7 / 53.7 | - / - / - | ## Install the environment Use the Anaconda (CUDA 11.3) ``` conda env create -f ARTrack_env_cuda113.yaml ``` ## Set project paths Run the following command to set paths for this project ``` python tracking/create_default_local_file.py --workspace_dir . --data_dir ./data --save_dir ./output ``` After running this command, you can also modify paths by editing these two files ``` lib/train/admin/local.py # paths about training lib/test/evaluation/local.py # paths about testing ``` ## Data Preparation Put the tracking datasets in ./data. It should look like this: ``` ${PROJECT_ROOT} -- data -- lasot |-- airplane |-- basketball |-- bear ... -- got10k |-- test |-- train |-- val -- coco |-- annotations |-- images -- trackingnet |-- TRAIN_0 |-- TRAIN_1 ... |-- TRAIN_11 |-- TEST ``` ## Training Download pre-trained [MAE ViT-Base weights](https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth) and put it under `$PROJECT_ROOT$/pretrained_models` (different pretrained models can also be used, see [MAE](https://github.com/facebookresearch/mae) for more details). ### One-stage pair-level training Since sequence-level training requires video input, and the COCO dataset contains only images, traditional training methods were first used to train the model so that it could be fairly compared to other trackers. ``` python tracking/train.py --script artrack --config artrack_256_full --save_dir ./output --mode multiple --nproc_per_node 4 --use_wandb 0 ``` Replace `--config` with the desired model config under `experiments/artrack`. We use [wandb](https://github.com/wandb/client) to record detailed training logs, in case you don't want to use wandb, set `--use_wandb 0`. ### Two-stage sequence-level training To enable sequence-level training, replace 'experience/artrack_seq/*.yaml' PRETRAIN_PTH in the yaml configuration file with the path to your pretrained checkpoint, such as './output/artrack_256_full/checkpoints/train/artrack/artrack_256_full/ARTrack_ep0240.pth.tar'. ``` python tracking/train.py --script artrack_seq --config artrack_seq_256_full --save_dir ./output --mode multiple --nproc_per_node 4 --use_wandb 0 ``` ## Evaluation Change the corresponding values of `lib/test/evaluation/local.py` to the actual benchmark saving paths Some testing examples: - LaSOT or other off-line evaluated benchmarks (modify `--dataset` correspondingly) ``` python tracking/test.py artrack_seq artrack_seq_256_full --dataset lasot --threads 16 --num_gpus 4 python tracking/analysis_results.py # need to modify tracker configs and names ``` - GOT10K-test ``` python tracking/test.py artrack_seq artrack_seq_256_full --dataset got10k_test --threads 16 --num_gpus 4 python lib/test/utils/transform_got10k.py --tracker_name ostrack --cfg_name vitb_384_mae_ce_32x4_got10k_ep100 ``` - TrackingNet ``` python tracking/test.py artrack_seq artrack_seq_256_full --dataset trackingnet --threads 16 --num_gpus 4 python lib/test/utils/transform_trackingnet.py --tracker_name ostrack --cfg_name vitb_384_mae_ce_32x4_ep300 ``` ## Acknowledgement :heart::heart::heart:Our idea is implemented base on the following projects. We really appreciate their excellent open-source works! - [SIoU](https://github.com/AlexDotHam/SIoU-loss) [[related paper](https://arxiv.org/abs/2205.12740)] - [OSTrack](https://github.com/botaoye/OSTrack) [[related paper](https://arxiv.org/abs/2203.11991)] - [PyTracking](https://github.com/visionml/pytracking) [[related paper](https://arxiv.org/abs/2208.06888)] :heart::heart::heart:This project is not for commercial use. For commercial use, please contact the author. :heart::heart::heart:This project is not for commercial use. For commercial use, please contact the author. :heart::heart::heart:This project is not for commercial use. For commercial use, please contact the author. ## Citation If any parts of our paper and code help your research, please consider citing us and giving a star to our repository. ``` @InProceedings{Wei_2023_CVPR, author = {Wei, Xing and Bai, Yifan and Zheng, Yongchao and Shi, Dahu and Gong, Yihong}, title = {Autoregressive Visual Tracking}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2023}, pages = {9697-9706} } @InProceedings{Bai_2024_CVPR, author = {Bai, Yifan and Zhao, Zeyang and Gong, Yihong and Wei, Xing}, title = {ARTrackV2: Prompting Autoregressive Tracker Where to Look and How to Describe}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2024} } ``` ## Contact If you have any questions or concerns, feel free to open issues or directly contact me through the ways on my GitHub homepage **provide below paper's title**. ================================================ FILE: artrackv2_mindspore/.gitignore ================================================ test/ lib/models/__pycache__/ lib/config/__pycache__/ lib/test/tracker/__pycache__/ lib/models/__pycache__/ lib/config/ostrack/__pycache__/ lib/config/ostrack/__pycache__/ ================================================ FILE: artrackv2_mindspore/README.md ================================================ # ARTrackV2 ## Evaluation Make sure you have installed the GPU version of MindSpore according to [link](https://www.mindspore.cn/install/en). Change the corresponding values of `lib/test/evaluation/local.py` to the actual benchmark saving paths Some testing examples: - GOT10K-test ```python cd tracking python test.py ostrack 2stage_256_got --dataset got10k_test --thread 0 --num_gpus 1 ``` ================================================ FILE: artrackv2_mindspore/experiments/ostrack/2stage_256_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 14 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 NUMBER: 2 TRAIN: DATASETS_NAME: #- LASOT - GOT10K_train_full #- TRACKINGNET DATASETS_RATIO: #- 1 - 1 #- 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "../checkpoint1.ckpt" EXTRA_MERGER: False RETURN_INTER: False DECODER: TYPE: "mask" MASK_RATIO: 0.75 EMBEDDIM: 512 DEPTH: 8 NUMHEADS: 16 MLPRATIO: 4 BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 90 NUM_WORKER: 6 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.05 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 300 SEARCH_FACTOR: 3.95 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/best_384.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10240 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 16 EPOCH: 500 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 400 NUM_WORKER: 16 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 500 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/finetune.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 8 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 960 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000004 LR_DROP_EPOCH: 400 NUM_WORKER: 8 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 30 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/finetune_384.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 12 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 960 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_large.pth" EXTRA_MERGER: False RETURN_INTER: False DECODER: TYPE: "mask" MASK_RATIO: 0.75 EMBEDDIM: 512 DEPTH: 8 NUMHEADS: 16 MLPRATIO: 4 BACKBONE: TYPE: vit_large_patch16_224 STRIDE: 16 EMBEDDIM: 1024 HEAD: TYPE: PIX NUM_CHANNELS: 1024 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 4 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 400 NUM_WORKER: 8 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 500 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/finetune_384_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 17 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 960 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 4 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000004 LR_DROP_EPOCH: 400 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 500 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/vitb_256_mae_32x4_ep300.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 60000 # TRAIN: # DATASETS_NAME: # - LASOT # - GOT10K_vottrain # - COCO17 # - TRACKINGNET # DATASETS_RATIO: # - 1 # - 1 # - 1 # - 1 # SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: CENTER NUM_CHANNELS: 256 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 32 EPOCH: 300 GIOU_WEIGHT: 2.0 L1_WEIGHT: 5.0 GRAD_CLIP_NORM: 0.1 LR: 0.0004 LR_DROP_EPOCH: 240 NUM_WORKER: 10 OPTIMIZER: ADAMW PRINT_INTERVAL: 50 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 300 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/vitb_256_mae_ce_32x4_ep300.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 # TRAIN: # DATASETS_NAME: # - GOT10K_train_full # DATASETS_RATIO: # - 1 # SAMPLE_PER_EPOCH: 60000 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 CE_START_EPOCH: 600 # candidate elimination start epoch CE_WARM_EPOCH: 560 # candidate elimination warm up epoch BATCH_SIZE: 48 EPOCH: 500 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 400 NUM_WORKER: 8 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 500 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/vitb_256_mae_ce_32x4_got10k_ep100.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 24 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000001 LR_DROP_EPOCH: 400 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 30 SEARCH_FACTOR: 4.2 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/vitb_384_mae_32x4_ep300.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 # TRAIN: # DATASETS_NAME: # - GOT10K_train_full # DATASETS_RATIO: # - 1 # SAMPLE_PER_EPOCH: 60000 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: CENTER NUM_CHANNELS: 256 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 32 EPOCH: 300 GIOU_WEIGHT: 2.0 L1_WEIGHT: 5.0 GRAD_CLIP_NORM: 0.1 LR: 0.0004 LR_DROP_EPOCH: 240 NUM_WORKER: 10 OPTIMIZER: ADAMW PRINT_INTERVAL: 50 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 300 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/vitb_384_mae_ce_32x4_ep300.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 # TRAIN: # DATASETS_NAME: # - GOT10K_train_full # DATASETS_RATIO: # - 1 # SAMPLE_PER_EPOCH: 60000 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224_ce STRIDE: 16 CE_LOC: [3, 6, 9] CE_KEEP_RATIO: [0.7, 0.7, 0.7] CE_TEMPLATE_RANGE: 'CTR_POINT' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX HEAD: TYPE: CENTER NUM_CHANNELS: 256 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 32 EPOCH: 300 GIOU_WEIGHT: 2.0 L1_WEIGHT: 5.0 GRAD_CLIP_NORM: 0.1 LR: 0.0004 LR_DROP_EPOCH: 240 NUM_WORKER: 10 OPTIMIZER: ADAMW PRINT_INTERVAL: 50 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 300 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: artrackv2_mindspore/experiments/ostrack/vitb_384_mae_ce_32x4_got10k_ep100.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224_ce STRIDE: 16 CE_LOC: [3, 6, 9] CE_KEEP_RATIO: [0.7, 0.7, 0.7] CE_TEMPLATE_RANGE: 'CTR_POINT' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX HEAD: TYPE: CENTER NUM_CHANNELS: 256 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 CE_START_EPOCH: 20 # candidate elimination start epoch CE_WARM_EPOCH: 50 # candidate elimination warm up epoch BATCH_SIZE: 32 EPOCH: 100 GIOU_WEIGHT: 2.0 L1_WEIGHT: 5.0 GRAD_CLIP_NORM: 0.1 LR: 0.0004 LR_DROP_EPOCH: 80 NUM_WORKER: 10 OPTIMIZER: ADAMW PRINT_INTERVAL: 50 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 100 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: artrackv2_mindspore/external/AR/README.md ================================================ # Alpha-Refine ## Introduction Alpha-Refine is the winner of the VOT Real-Time Challenge 2020, which has great ability to predict high-quality masks. In this work, we combine the STARK tracker with Alpha-Refine to test on the VOT2020 benchamark. ## Installation After the environment has been installed according to the README.md of STARK, you only need to install a few more packages as shown below. * Install ninja-build for Precise ROI pooling ```bash sudo apt-get install ninja-build ``` In case of issues, we refer to https://github.com/vacancy/PreciseRoIPooling. * Install the Precise ROI pooling ``` cd ltr/external git clone https://github.com/vacancy/PreciseRoIPooling.git cd ../.. ``` * Add the project path to environment variables ``` export PYTHONPATH=:$PYTHONPATH ``` * Setup the environment Create the default environment setting files. ```bash # Environment settings for pytracking. Saved at pytracking/evaluation/local.py python -c "from pytracking.evaluation.environment import create_default_local_file; create_default_local_file()" # Environment settings for ltr. Saved at ltr/admin/local.py python -c "from ltr.admin.environment import create_default_local_file; create_default_local_file()" ``` You can modify these files to set the paths to datasets, results paths etc. * Download the pre-trained Alpha-Refine network Download the network for [Alpha-Refine](https://drive.google.com/open?id=1qOQRfaRMbQ2nmgX1NFjoQHfXOAn609QM) and put it under the ltr/checkpoints/ltr/ARcm_seg/ARcm_coco_seg_only_mask_384 dir. ================================================ FILE: artrackv2_mindspore/external/AR/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/README.md ================================================ # LTR A general PyTorch based framework for learning tracking representations. ## Table of Contents * [Quick Start](#quick-start) * [Overview](#overview) * [Trackers](#trackers) * [PrDiMP](#PrDiMP) * [DiMP](#DiMP) * [ATOM](#ATOM) * [Training your own networks](#training-your-own-networks) ## Quick Start The installation script will automatically generate a local configuration file "admin/local.py". In case the file was not generated, run ```admin.environment.create_default_local_file()``` to generate it. Next, set the paths to the training workspace, i.e. the directory where the checkpoints will be saved. Also set the paths to the datasets you want to use. If all the dependencies have been correctly installed, you can train a network using the run_training.py script in the correct conda environment. ```bash conda activate pytracking python run_training.py train_module train_name ``` Here, ```train_module``` is the sub-module inside ```train_settings``` and ```train_name``` is the name of the train setting file to be used. For example, you can train using the included default ATOM settings by running: ```bash python run_training bbreg atom_default ``` ## Overview The framework consists of the following sub-modules. - [actors](actors): Contains the actor classes for different trainings. The actor class is responsible for passing the input data through the network can calculating losses. - [admin](admin): Includes functions for loading networks, tensorboard etc. and also contains environment settings. - [dataset](dataset): Contains integration of a number of training datasets, namely [TrackingNet](https://tracking-net.org/), [GOT-10k](http://got-10k.aitestunion.com/), [LaSOT](https://cis.temple.edu/lasot/), [ImageNet-VID](http://image-net.org/), [DAVIS](https://davischallenge.org), [YouTube-VOS](https://youtube-vos.org), [MS-COCO](http://cocodataset.org/#home), [SBD](http://home.bharathh.info/pubs/codes/SBD), [LVIS](https://www.lvisdataset.org), [ECSSD](http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html), [MSRA10k](https://mmcheng.net/msra10k), and [HKU-IS](https://sites.google.com/site/ligb86/hkuis). Additionally, it includes modules to generate synthetic videos from image datasets. - [data_specs](data_specs): Information about train/val splits of different datasets. - [data](data): Contains functions for processing data, e.g. loading images, data augmentations, sampling frames from videos. - [external](external): External libraries needed for training. Added as submodules. - [models](models): Contains different layers and network definitions. - [trainers](trainers): The main class which runs the training. - [train_settings](train_settings): Contains settings files, specifying the training of a network. ## Trackers The framework currently contains the training code for the following trackers. ### PrDiMP The following setting files can be used train the DiMP networks, or to know the exact training details. - [dimp.prdimp18](train_settings/dimp/prdimp18.py): The default settings used for training the PrDiMP model with ResNet-18 backbone. - [dimp.prdimp50](train_settings/dimp/prdimp50.py): The default settings used for training the PrDiMP model with ResNet-50 backbone. - [dimp.super_dimp](train_settings/dimp/super_dimp.py): Combines the bounding-box regressor of PrDiMP with the standard DiMP classifier and better training and inference settings. ### DiMP The following setting files can be used train the DiMP networks, or to know the exact training details. - [dimp.dimp18](train_settings/dimp/dimp18.py): The default settings used for training the DiMP model with ResNet-18 backbone. - [dimp.dimp50](train_settings/dimp/dimp50.py): The default settings used for training the DiMP model with ResNet-50 backbone. ### ATOM The following setting file can be used train the ATOM network, or to know the exact training details. - [bbreg.atom](train_settings/bbreg/atom_paper.py): The settings used in the paper for training the network in ATOM. - [bbreg.atom](train_settings/bbreg/atom.py): Newer settings used for training the network in ATOM, also utilizing the GOT10k dataset. - [bbreg.atom](train_settings/bbreg/atom_prob_ml.py): Settings for ATOM with the probabilistic bounding box regression proposed in [this paper](https://arxiv.org/abs/1909.12297). - [bbreg.atom](train_settings/bbreg/atom_paper.py): The baseline ATOM* setting evaluated in [this paper](https://arxiv.org/abs/1909.12297). ## Training your own networks To train a custom network using the toolkit, the following components need to be specified in the train settings. For reference, see [atom.py](train_settings/bbreg/atom.py). - Datasets: The datasets to be used for training. A number of standard tracking datasets are already available in ```dataset``` module. - Processing: This function should perform the necessary post-processing of the data, e.g. cropping of target region, data augmentations etc. - Sampler: Determines how the frames are sampled from a video sequence to form the batches. - Network: The network module to be trained. - Objective: The training objective. - Actor: The trainer passes the training batch to the actor who is responsible for passing the data through the network correctly, and calculating the training loss. - Optimizer: Optimizer to be used, e.g. Adam. - Trainer: The main class which runs the epochs and saves checkpoints. ================================================ FILE: artrackv2_mindspore/external/AR/ltr/__init__.py ================================================ from .admin.loading import load_network from .admin.model_constructor import model_constructor from .admin.multigpu import MultiGPU ================================================ FILE: artrackv2_mindspore/external/AR/ltr/actors/__init__.py ================================================ from .base_actor import BaseActor from .bbreg import AtomActor from .tracking import DiMPActor ================================================ FILE: artrackv2_mindspore/external/AR/ltr/actors/base_actor.py ================================================ from pytracking import TensorDict class BaseActor: """ Base class for actor. The actor class handles the passing of the data through the network and calculation the loss""" def __init__(self, net, objective): """ args: net - The network to train objective - The loss function """ self.net = net self.objective = objective def __call__(self, data: TensorDict): """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and return the training stats for the input data args: data - A TensorDict containing all the necessary data blocks. returns: loss - loss for the input data stats - a dict containing detailed losses """ raise NotImplementedError def to(self, device): """ Move the network to device args: device - device to use. 'cpu' or 'cuda' """ self.net.to(device) def train(self, mode=True): """ Set whether the network is in train mode. args: mode (True) - Bool specifying whether in training mode. """ self.net.train(mode) def eval(self): """ Set network to eval mode""" self.train(False) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/actors/bbreg.py ================================================ from . import BaseActor class AtomActor(BaseActor): """ Actor for training the IoU-Net in ATOM""" def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals' and 'proposal_iou'. returns: loss - the training loss states - dict containing detailed losses """ # Run network to obtain IoU prediction for each proposal in 'test_proposals' iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals']) iou_pred = iou_pred.view(-1, iou_pred.shape[2]) iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2]) # Compute loss loss = self.objective(iou_pred, iou_gt) # Return training stats stats = {'Loss/total': loss.item(), 'Loss/iou': loss.item()} return loss, stats class AtomBBKLActor(BaseActor): """ Actor for training the IoU-Net in ATOM with BBKL""" def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals', 'proposal_density', and 'gt_density'. returns: loss - the training loss states - dict containing detailed losses """ # Run network to obtain IoU prediction for each proposal in 'test_proposals' bb_scores = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals']) bb_scores = bb_scores.view(-1, bb_scores.shape[2]) proposal_density = data['proposal_density'].view(-1, data['proposal_density'].shape[2]) gt_density = data['gt_density'].view(-1, data['gt_density'].shape[2]) # Compute loss loss = self.objective(bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1) # Return training stats stats = {'Loss/total': loss.item(), 'Loss/bb_ce': loss.item()} return loss, stats ================================================ FILE: artrackv2_mindspore/external/AR/ltr/actors/tracking.py ================================================ from . import BaseActor import torch class DiMPActor(BaseActor): """Actor for training the DiMP network.""" def __init__(self, net, objective, loss_weight=None): super().__init__(net, objective) if loss_weight is None: loss_weight = {'iou': 1.0, 'test_clf': 1.0} self.loss_weight = loss_weight def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals', 'proposal_iou' and 'test_label'. returns: loss - the training loss stats - dict containing detailed losses """ # Run network target_scores, iou_pred = self.net(train_imgs=data['train_images'], test_imgs=data['test_images'], train_bb=data['train_anno'], test_proposals=data['test_proposals']) # Classification losses for the different optimization iterations clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores] # Loss of the final filter clf_loss_test = clf_losses_test[-1] loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test # Compute loss for ATOM IoUNet loss_iou = self.loss_weight['iou'] * self.objective['iou'](iou_pred, data['proposal_iou']) # Loss for the initial filter iteration loss_test_init_clf = 0 if 'test_init_clf' in self.loss_weight.keys(): loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0] # Loss for the intermediate filter iterations loss_test_iter_clf = 0 if 'test_iter_clf' in self.loss_weight.keys(): test_iter_weights = self.loss_weight['test_iter_clf'] if isinstance(test_iter_weights, list): loss_test_iter_clf = sum([a*b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])]) else: loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1]) # Total loss loss = loss_iou + loss_target_classifier + loss_test_init_clf + loss_test_iter_clf # Log stats stats = {'Loss/total': loss.item(), 'Loss/iou': loss_iou.item(), 'Loss/target_clf': loss_target_classifier.item()} if 'test_init_clf' in self.loss_weight.keys(): stats['Loss/test_init_clf'] = loss_test_init_clf.item() if 'test_iter_clf' in self.loss_weight.keys(): stats['Loss/test_iter_clf'] = loss_test_iter_clf.item() stats['ClfTrain/test_loss'] = clf_loss_test.item() if len(clf_losses_test) > 0: stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item() if len(clf_losses_test) > 2: stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2) return loss, stats class KLDiMPActor(BaseActor): """Actor for training the DiMP network.""" def __init__(self, net, objective, loss_weight=None): super().__init__(net, objective) if loss_weight is None: loss_weight = {'bb_ce': 1.0} self.loss_weight = loss_weight def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals', 'proposal_iou' and 'test_label'. returns: loss - the training loss stats - dict containing detailed losses """ # Run network target_scores, bb_scores = self.net(train_imgs=data['train_images'], test_imgs=data['test_images'], train_bb=data['train_anno'], test_proposals=data['test_proposals']) # Reshape bb reg variables is_valid = data['test_anno'][:, :, 0] < 99999.0 bb_scores = bb_scores[is_valid, :] proposal_density = data['proposal_density'][is_valid, :] gt_density = data['gt_density'][is_valid, :] # Compute loss bb_ce = self.objective['bb_ce'](bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1) loss_bb_ce = self.loss_weight['bb_ce'] * bb_ce # If standard DiMP classifier is used loss_target_classifier = 0 loss_test_init_clf = 0 loss_test_iter_clf = 0 if 'test_clf' in self.loss_weight.keys(): # Classification losses for the different optimization iterations clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores] # Loss of the final filter clf_loss_test = clf_losses_test[-1] loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test # Loss for the initial filter iteration if 'test_init_clf' in self.loss_weight.keys(): loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0] # Loss for the intermediate filter iterations if 'test_iter_clf' in self.loss_weight.keys(): test_iter_weights = self.loss_weight['test_iter_clf'] if isinstance(test_iter_weights, list): loss_test_iter_clf = sum([a * b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])]) else: loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1]) # If PrDiMP classifier is used loss_clf_ce = 0 loss_clf_ce_init = 0 loss_clf_ce_iter = 0 if 'clf_ce' in self.loss_weight.keys(): # Classification losses for the different optimization iterations clf_ce_losses = [self.objective['clf_ce'](s, data['test_label_density'], grid_dim=(-2,-1)) for s in target_scores] # Loss of the final filter clf_ce = clf_ce_losses[-1] loss_clf_ce = self.loss_weight['clf_ce'] * clf_ce # Loss for the initial filter iteration if 'clf_ce_init' in self.loss_weight.keys(): loss_clf_ce_init = self.loss_weight['clf_ce_init'] * clf_ce_losses[0] # Loss for the intermediate filter iterations if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2: test_iter_weights = self.loss_weight['clf_ce_iter'] if isinstance(test_iter_weights, list): loss_clf_ce_iter = sum([a * b for a, b in zip(test_iter_weights, clf_ce_losses[1:-1])]) else: loss_clf_ce_iter = (test_iter_weights / (len(clf_ce_losses) - 2)) * sum(clf_ce_losses[1:-1]) # Total loss loss = loss_bb_ce + loss_clf_ce + loss_clf_ce_init + loss_clf_ce_iter + \ loss_target_classifier + loss_test_init_clf + loss_test_iter_clf if torch.isinf(loss) or torch.isnan(loss): raise Exception('ERROR: Loss was nan or inf!!!') # Log stats stats = {'Loss/total': loss.item(), 'Loss/bb_ce': bb_ce.item(), 'Loss/loss_bb_ce': loss_bb_ce.item()} if 'test_clf' in self.loss_weight.keys(): stats['Loss/target_clf'] = loss_target_classifier.item() if 'test_init_clf' in self.loss_weight.keys(): stats['Loss/test_init_clf'] = loss_test_init_clf.item() if 'test_iter_clf' in self.loss_weight.keys(): stats['Loss/test_iter_clf'] = loss_test_iter_clf.item() if 'clf_ce' in self.loss_weight.keys(): stats['Loss/clf_ce'] = loss_clf_ce.item() if 'clf_ce_init' in self.loss_weight.keys(): stats['Loss/clf_ce_init'] = loss_clf_ce_init.item() if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2: stats['Loss/clf_ce_iter'] = loss_clf_ce_iter.item() if 'test_clf' in self.loss_weight.keys(): stats['ClfTrain/test_loss'] = clf_loss_test.item() if len(clf_losses_test) > 0: stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item() if len(clf_losses_test) > 2: stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2) if 'clf_ce' in self.loss_weight.keys(): stats['ClfTrain/clf_ce'] = clf_ce.item() if len(clf_ce_losses) > 0: stats['ClfTrain/clf_ce_init'] = clf_ce_losses[0].item() if len(clf_ce_losses) > 2: stats['ClfTrain/clf_ce_iter'] = sum(clf_ce_losses[1:-1]).item() / (len(clf_ce_losses) - 2) return loss, stats ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/environment.py ================================================ import importlib import os from collections import OrderedDict def create_default_local_file(): path = os.path.join(os.path.dirname(__file__), 'local.py') empty_str = '\'\'' default_settings = OrderedDict({ 'workspace_dir': empty_str, 'tensorboard_dir': 'self.workspace_dir + \'/tensorboard/\'', 'lasot_dir': empty_str, 'got10k_dir': empty_str, 'trackingnet_dir': empty_str, 'coco_dir': empty_str, 'lvis_dir': empty_str, 'sbd_dir': empty_str, 'imagenet_dir': empty_str, 'imagenetdet_dir': empty_str, 'ecssd_dir': empty_str, 'hkuis_dir': empty_str, 'msra10k_dir': empty_str, 'davis_dir': empty_str, 'youtubevos_dir': empty_str}) comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 'tensorboard_dir': 'Directory for tensorboard files.'} with open(path, 'w') as f: f.write('class EnvironmentSettings:\n') f.write(' def __init__(self):\n') for attr, attr_val in default_settings.items(): comment_str = None if attr in comment: comment_str = comment[attr] if comment_str is None: f.write(' self.{} = {}\n'.format(attr, attr_val)) else: f.write(' self.{} = {} # {}\n'.format(attr, attr_val, comment_str)) def env_settings(): env_module_name = 'ltr.admin.local' try: env_module = importlib.import_module(env_module_name) return env_module.EnvironmentSettings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file)) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/loading.py ================================================ import torch import os import sys from pathlib import Path import importlib import inspect from ltr.admin import settings as ws_settings def load_trained_network(workspace_dir, network_path, checkpoint=None): """OUTDATED. Use load_pretrained instead!""" checkpoint_dir = os.path.join(workspace_dir, 'checkpoints') directory = '{}/{}'.format(checkpoint_dir, network_path) net, _ = load_network(directory, checkpoint) return net def load_pretrained(module, name, checkpoint=None, **kwargs): """Load a network trained using the LTR framework. This is useful when you want to initialize your new network with a previously trained model. args: module - Name of the train script module. I.e. the name of the folder in ltr/train_scripts. name - The name of the train_script. checkpoint - You can supply the checkpoint number or the full path to the checkpoint file (see load_network). **kwargs - These are passed to load_network (see that function). """ settings = ws_settings.Settings() network_dir = os.path.join(settings.env.workspace_dir, 'checkpoints', 'ltr', module, name) return load_network(network_dir=network_dir, checkpoint=checkpoint, **kwargs) def load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs): """Loads a network checkpoint file. Can be called in two different ways: load_checkpoint(network_dir): Loads the checkpoint file given by the path. If checkpoint_dir is a directory, it tries to find the latest checkpoint in that directory. load_checkpoint(network_dir, checkpoint=epoch_num): Loads the network at the given epoch number (int). The extra keyword arguments are supplied to the network constructor to replace saved ones. """ if network_dir is not None: net_path = Path(network_dir) else: net_path = None if net_path.is_file(): checkpoint = str(net_path) if checkpoint is None: # Load most recent checkpoint checkpoint_list = sorted(net_path.glob('*.pth.tar')) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No matching checkpoint file found') elif isinstance(checkpoint, int): # Checkpoint is the epoch number checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint))) if not checkpoint_list or len(checkpoint_list) == 0: raise Exception('No matching checkpoint file found') if len(checkpoint_list) > 1: raise Exception('Multiple matching checkpoint files found') else: checkpoint_path = checkpoint_list[0] elif isinstance(checkpoint, str): # Checkpoint is the path checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network checkpoint_dict = torch_load_legacy(checkpoint_path) # Construct network model if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None: net_constr = checkpoint_dict['constructor'] if constructor_fun_name is not None: net_constr.fun_name = constructor_fun_name if constructor_module is not None: net_constr.fun_module = constructor_module # Legacy networks before refactoring if net_constr.fun_module.startswith('dlframework.'): net_constr.fun_module = net_constr.fun_module[len('dlframework.'):] net_fun = getattr(importlib.import_module(net_constr.fun_module), net_constr.fun_name) net_fun_args = list(inspect.signature(net_fun).parameters.keys()) for arg, val in kwargs.items(): if arg in net_fun_args: net_constr.kwds[arg] = val else: print('WARNING: Keyword argument "{}" not found when loading network. It was ignored.'.format(arg)) net = net_constr.get() else: raise RuntimeError('No constructor for the given network.') net.load_state_dict(checkpoint_dict['net']) net.constructor = checkpoint_dict['constructor'] if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None: net.info = checkpoint_dict['net_info'] return net, checkpoint_dict def load_weights(net, path, strict=True): checkpoint_dict = torch.load(path) weight_dict = checkpoint_dict['net'] net.load_state_dict(weight_dict, strict=strict) return net def torch_load_legacy(path): """Load network with legacy environment.""" # Setup legacy env (for older networks) _setup_legacy_env() # Load network checkpoint_dict = torch.load(path, map_location='cpu') # Cleanup legacy _cleanup_legacy_env() return checkpoint_dict def _setup_legacy_env(): importlib.import_module('ltr') sys.modules['dlframework'] = sys.modules['ltr'] sys.modules['dlframework.common'] = sys.modules['ltr'] importlib.import_module('ltr.admin') sys.modules['dlframework.common.utils'] = sys.modules['ltr.admin'] for m in ('model_constructor', 'stats', 'settings', 'local'): importlib.import_module('ltr.admin.' + m) sys.modules['dlframework.common.utils.' + m] = sys.modules['ltr.admin.' + m] def _cleanup_legacy_env(): del_modules = [] for m in sys.modules.keys(): if m.startswith('dlframework'): del_modules.append(m) for m in del_modules: del sys.modules[m] ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/model_constructor.py ================================================ from functools import wraps import importlib def model_constructor(f): """ Wraps the function 'f' which returns the network. An extra field 'constructor' is added to the network returned by 'f'. This field contains an instance of the 'NetConstructor' class, which contains the information needed to re-construct the network, such as the name of the function 'f', the function arguments etc. Thus, the network can be easily constructed from a saved checkpoint by calling NetConstructor.get() function. """ @wraps(f) def f_wrapper(*args, **kwds): net_constr = NetConstructor(f.__name__, f.__module__, args, kwds) output = f(*args, **kwds) if isinstance(output, (tuple, list)): # Assume first argument is the network output[0].constructor = net_constr else: output.constructor = net_constr return output return f_wrapper class NetConstructor: """ Class to construct networks. Takes as input the function name (e.g. atom_resnet18), the name of the module which contains the network function (e.g. ltr.models.bbreg.atom) and the arguments for the network function. The class object can then be stored along with the network weights to re-construct the network.""" def __init__(self, fun_name, fun_module, args, kwds): """ args: fun_name - The function which returns the network fun_module - the module which contains the network function args - arguments which are passed to the network function kwds - arguments which are passed to the network function """ self.fun_name = fun_name self.fun_module = fun_module self.args = args self.kwds = kwds def get(self): """ Rebuild the network by calling the network function with the correct arguments. """ net_module = importlib.import_module(self.fun_module) net_fun = getattr(net_module, self.fun_name) return net_fun(*self.args, **self.kwds) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/multigpu.py ================================================ import torch.nn as nn def is_multi_gpu(net): return isinstance(net, (MultiGPU, nn.DataParallel)) class MultiGPU(nn.DataParallel): """Wraps a network to allow simple multi-GPU training.""" def __getattr__(self, item): try: return super().__getattr__(item) except: pass return getattr(self.module, item) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/settings.py ================================================ from ltr.admin.environment import env_settings class Settings: """ Training settings, e.g. the paths to datasets and networks.""" def __init__(self): self.set_default() def set_default(self): self.env = env_settings() self.use_gpu = True ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/stats.py ================================================ class StatValue: def __init__(self): self.clear() def reset(self): self.val = 0 def clear(self): self.reset() self.history = [] def update(self, val): self.val = val self.history.append(self.val) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.clear() self.has_new_data = False def reset(self): self.avg = 0 self.val = 0 self.sum = 0 self.count = 0 def clear(self): self.reset() self.history = [] def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def new_epoch(self): if self.count > 0: self.history.append(self.avg) self.reset() self.has_new_data = True else: self.has_new_data = False def topk_accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" single_input = not isinstance(topk, (tuple, list)) if single_input: topk = (topk,) maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0] res.append(correct_k * 100.0 / batch_size) if single_input: return res[0] return res ================================================ FILE: artrackv2_mindspore/external/AR/ltr/admin/tensorboard.py ================================================ import os from collections import OrderedDict try: from torch.utils.tensorboard import SummaryWriter except: print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.') from tensorboardX import SummaryWriter class TensorboardWriter: def __init__(self, directory, loader_names): self.directory = directory self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names}) def write_info(self, module_name, script_name, description): tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info')) tb_info_writer.add_text('Modulet_name', module_name) tb_info_writer.add_text('Script_name', script_name) tb_info_writer.add_text('Description', description) tb_info_writer.close() def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1): for loader_name, loader_stats in stats.items(): if loader_stats is None: continue for var_name, val in loader_stats.items(): if hasattr(val, 'history') and getattr(val, 'has_new_data', True): self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/__init__.py ================================================ from .loader import LTRLoader ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/bounding_box_utils.py ================================================ import torch def rect_to_rel(bb, sz_norm=None): """Convert standard rectangular parametrization of the bounding box [x, y, w, h] to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate. args: bb - N x 4 tensor of boxes. sz_norm - [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given. """ c = bb[...,:2] + 0.5 * bb[...,2:] if sz_norm is None: c_rel = c / bb[...,2:] else: c_rel = c / sz_norm sz_rel = torch.log(bb[...,2:]) return torch.cat((c_rel, sz_rel), dim=-1) def rel_to_rect(bb, sz_norm=None): """Inverts the effect of rect_to_rel. See above.""" sz = torch.exp(bb[...,2:]) if sz_norm is None: c = bb[...,:2] * sz else: c = bb[...,:2] * sz_norm tl = c - 0.5 * sz return torch.cat((tl, sz), dim=-1) def masks_to_bboxes(mask, fmt='c'): """ Convert a mask tensor to one or more bounding boxes. Note: This function is a bit new, make sure it does what it says. /Andreas :param mask: Tensor of masks, shape = (..., H, W) :param fmt: bbox layout. 'c' => "center + size" or (x_center, y_center, width, height) 't' => "top left + size" or (x_left, y_top, width, height) 'v' => "vertices" or (x_left, y_top, x_right, y_bottom) :return: tensor containing a batch of bounding boxes, shape = (..., 4) """ batch_shape = mask.shape[:-2] mask = mask.reshape((-1, *mask.shape[-2:])) bboxes = [] for m in mask: mx = m.sum(dim=-2).nonzero() my = m.sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bboxes.append(bb) bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device) bboxes = bboxes.reshape(batch_shape + (4,)) if fmt == 'v': return bboxes x1 = bboxes[..., :2] s = bboxes[..., 2:] - x1 + 1 if fmt == 'c': return torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': return torch.cat((x1, s), dim=-1) raise ValueError("Undefined bounding box layout '%s'" % fmt) def masks_to_bboxes_multi(mask, ids, fmt='c'): assert mask.dim() == 2 bboxes = [] for id in ids: mx = (mask == id).sum(dim=-2).nonzero() my = (mask == id).float().sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bb = torch.tensor(bb, dtype=torch.float32, device=mask.device) x1 = bb[:2] s = bb[2:] - x1 + 1 if fmt == 'v': pass elif fmt == 'c': bb = torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': bb = torch.cat((x1, s), dim=-1) else: raise ValueError("Undefined bounding box layout '%s'" % fmt) bboxes.append(bb) return bboxes ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/image_loader.py ================================================ import jpeg4py import cv2 as cv from PIL import Image import numpy as np davis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8) davis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128], [128, 64, 128]] def default_image_loader(path): """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, but reverts to the opencv_loader if the former is not available.""" if default_image_loader.use_jpeg4py is None: # Try using jpeg4py im = jpeg4py_loader(path) if im is None: default_image_loader.use_jpeg4py = False print('Using opencv_loader instead.') else: default_image_loader.use_jpeg4py = True return im if default_image_loader.use_jpeg4py: return jpeg4py_loader(path) return opencv_loader(path) default_image_loader.use_jpeg4py = None def jpeg4py_loader(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_loader(path): """ Read image using opencv's imread function and returns it in rgb format""" try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def jpeg4py_loader_w_failsafe(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except: try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_seg_loader(path): """ Read segmentation annotation using opencv's imread function""" try: return cv.imread(path) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def imread_indexed(filename): """ Load indexed image with given filename. Used to read segmentation annotations.""" im = Image.open(filename) annotation = np.atleast_3d(im)[...,0] return annotation def imwrite_indexed(filename, array, color_palette=None): """ Save indexed image as png. Used to save segmentation annotation.""" if color_palette is None: color_palette = davis_palette if np.atleast_3d(array).shape[2] != 1: raise Exception("Saving indexed PNGs requires 2D array.") im = Image.fromarray(array) im.putpalette(color_palette.ravel()) im.save(filename, format='PNG') ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/loader.py ================================================ import torch import torch.utils.data.dataloader import importlib import collections from torch._six import string_classes, int_classes from pytracking import TensorDict, TensorList def _check_use_shared_memory(): if hasattr(torch.utils.data.dataloader, '_use_shared_memory'): return getattr(torch.utils.data.dataloader, '_use_shared_memory') collate_lib = importlib.import_module('torch.utils.data._utils.collate') if hasattr(collate_lib, '_use_shared_memory'): return getattr(collate_lib, '_use_shared_memory') return torch.utils.data.get_worker_info() is not None def ltr_collate(batch): """Puts each data field into a tensor with outer dimension batch size""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) def ltr_collate_stack1(batch): """Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 1, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 1) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate_stack1(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate_stack1(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) class LTRLoader(torch.utils.data.dataloader.DataLoader): """ Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset. Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to select along which dimension the data should be stacked to form a batch. Arguments: dataset (Dataset): dataset from which to load the data. batch_size (int, optional): how many samples per batch to load (default: 1). shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: False). sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False. batch_sampler (Sampler, optional): like sampler, but returns a batch of indices at a time. Mutually exclusive with batch_size, shuffle, sampler, and drop_last. num_workers (int, optional): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0) collate_fn (callable, optional): merges a list of samples to form a mini-batch. stack_dim (int): Dimension along which to stack to form the batch. (default: 0) pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: False) timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: 0) worker_init_fn (callable, optional): If not None, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: None) .. note:: By default, each worker will have its PyTorch seed set to ``base_seed + worker_id``, where ``base_seed`` is a long generated by main process using its RNG. However, seeds for other libraies may be duplicated upon initializing workers (w.g., NumPy), causing each worker to return identical random numbers. (See :ref:`dataloader-workers-random-seed` section in FAQ.) You may use ``torch.initial_seed()`` to access the PyTorch seed for each worker in :attr:`worker_init_fn`, and use it to set other seeds before data loading. .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an unpicklable object, e.g., a lambda function. """ __initialized = False def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): if collate_fn is None: if stack_dim == 0: collate_fn = ltr_collate elif stack_dim == 1: collate_fn = ltr_collate_stack1 else: raise ValueError('Stack dim no supported. Must be 0 or 1.') super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn) self.name = name self.training = training self.epoch_interval = epoch_interval self.stack_dim = stack_dim ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/processing.py ================================================ import torch import torchvision.transforms as transforms from pytracking import TensorDict import ltr.data.processing_utils as prutils def stack_tensors(x): if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor): return torch.stack(x) return x class BaseProcessing: """ Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it through the network. For example, it can be used to crop a search region around the object, apply various data augmentations, etc.""" def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None): """ args: transform - The set of transformations to be applied on the images. Used only if train_transform or test_transform is None. train_transform - The set of transformations to be applied on the train images. If None, the 'transform' argument is used instead. test_transform - The set of transformations to be applied on the test images. If None, the 'transform' argument is used instead. joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For example, it can be used to convert both test and train images to grayscale. """ self.transform = {'train': transform if train_transform is None else train_transform, 'test': transform if test_transform is None else test_transform, 'joint': joint_transform} def __call__(self, data: TensorDict): raise NotImplementedError class ATOMProcessing(BaseProcessing): """ The processing class used for training ATOM. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ Generates proposals by adding noise to the input box args: box - input box returns: torch.Tensor - Array of shape (num_proposals, 4) containing proposals torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] """ # Generate proposals num_proposals = self.proposal_params['boxes_per_frame'] proposal_method = self.proposal_params.get('proposal_method', 'default') if proposal_method == 'default': proposals = torch.zeros((num_proposals, 4)) gt_iou = torch.zeros(num_proposals) for i in range(num_proposals): proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'], sigma_factor=self.proposal_params['sigma_factor']) elif proposal_method == 'gmm': proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], num_samples=num_proposals) gt_iou = prutils.iou(box.view(1,4), proposals.view(-1,4)) # Map to [-1, 1] gt_iou = gt_iou * 2 - 1 return proposals, gt_iou def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou' """ # Apply joint transforms if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # Crop image region centered at jittered_anno box crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) # Apply transforms data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = list(frame2_proposals) data['proposal_iou'] = list(gt_iou) # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class KLBBregProcessing(BaseProcessing): """ Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565]. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ """ # Generate proposals proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], gt_sigma=self.proposal_params['gt_sigma'], num_samples=self.proposal_params[ 'boxes_per_frame'], add_mean_box=self.proposal_params.get( 'add_mean_box', False)) return proposals, proposal_density, gt_density def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density' """ # Apply joint transforms if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # Crop image region centered at jittered_anno box crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) # Apply transforms data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = proposals data['proposal_density'] = proposal_density data['gt_density'] = gt_density # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class ATOMwKLProcessing(BaseProcessing): """Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing.""" def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ """ # Generate proposals proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], self.proposal_params['gt_sigma'], self.proposal_params['boxes_per_frame']) iou = prutils.iou_gen(proposals, box.view(1, 4)) return proposals, proposal_density, gt_density, iou def __call__(self, data: TensorDict): # Apply joint transforms if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # Crop image region centered at jittered_anno box crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) # Apply transforms data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals proposals, proposal_density, gt_density, proposal_iou = zip( *[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = proposals data['proposal_density'] = proposal_density data['gt_density'] = gt_density data['proposal_iou'] = proposal_iou # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class DiMPProcessing(BaseProcessing): """ The processing class used for training DiMP. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are used for computing the loss of the predicted classification model on the test images. A set of proposals are also generated for the test images by jittering the ground truth box. These proposals are used to train the bounding box estimating branch. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate', max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.crop_type = crop_type self.mode = mode self.max_scale_change = max_scale_change self.proposal_params = proposal_params self.label_function_params = label_function_params def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ Generates proposals by adding noise to the input box args: box - input box returns: torch.Tensor - Array of shape (num_proposals, 4) containing proposals torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] """ # Generate proposals num_proposals = self.proposal_params['boxes_per_frame'] proposal_method = self.proposal_params.get('proposal_method', 'default') if proposal_method == 'default': proposals = torch.zeros((num_proposals, 4)) gt_iou = torch.zeros(num_proposals) for i in range(num_proposals): proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'], sigma_factor=self.proposal_params['sigma_factor']) elif proposal_method == 'gmm': proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], num_samples=num_proposals) gt_iou = prutils.iou(box.view(1, 4), proposals.view(-1, 4)) else: raise ValueError('Unknown proposal method.') # Map to [-1, 1] gt_iou = gt_iou * 2 - 1 return proposals, gt_iou def _generate_label_function(self, target_bb): """ Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample """ gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'], self.label_function_params['kernel_sz'], self.label_function_params['feature_sz'], self.output_sz, end_pad_if_even=self.label_function_params.get('end_pad_if_even', True)) return gauss_label def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) """ if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz, mode=self.crop_type, max_scale_change=self.max_scale_change) data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals if self.proposal_params: frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = list(frame2_proposals) data['proposal_iou'] = list(gt_iou) # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) # Generate label functions if self.label_function_params is not None: data['train_label'] = self._generate_label_function(data['train_anno']) data['test_label'] = self._generate_label_function(data['test_anno']) return data class KLDiMPProcessing(BaseProcessing): """ The processing class used for training PrDiMP that additionally supports the probabilistic classifier and bounding box regressor. See DiMPProcessing for details. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate', max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, label_density_params=None, *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. label_density_params - Arguments for the label density generation process. See _generate_label_function for details. """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.crop_type = crop_type self.mode = mode self.max_scale_change = max_scale_change self.proposal_params = proposal_params self.label_function_params = label_function_params self.label_density_params = label_density_params def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density. This is used for ML and KL based regression learning of the bounding box regressor. args: box - input bounding box """ # Generate proposals proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], gt_sigma=self.proposal_params['gt_sigma'], num_samples=self.proposal_params['boxes_per_frame'], add_mean_box=self.proposal_params.get('add_mean_box', False)) return proposals, proposal_density, gt_density def _generate_label_function(self, target_bb): """ Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample """ gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'], self.label_function_params['kernel_sz'], self.label_function_params['feature_sz'], self.output_sz, end_pad_if_even=self.label_function_params.get('end_pad_if_even', True)) return gauss_label def _generate_label_density(self, target_bb): """ Generates the gaussian label density centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample """ feat_sz = self.label_density_params['feature_sz'] * self.label_density_params.get('interp_factor', 1) gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_density_params['sigma_factor'], self.label_density_params['kernel_sz'], feat_sz, self.output_sz, end_pad_if_even=self.label_density_params.get('end_pad_if_even', True), density=True, uni_bias=self.label_density_params.get('uni_weight', 0.0)) gauss_label *= (gauss_label > self.label_density_params.get('threshold', 0.0)).float() if self.label_density_params.get('normalize', False): g_sum = gauss_label.sum(dim=(-2,-1)) valid = g_sum>0.01 gauss_label[valid, :, :] /= g_sum[valid].view(-1, 1, 1) gauss_label[~valid, :, :] = 1.0 / (gauss_label.shape[-2] * gauss_label.shape[-1]) gauss_label *= 1.0 - self.label_density_params.get('shrink', 0.0) return gauss_label def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) """ if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz, mode=self.crop_type, max_scale_change=self.max_scale_change) data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = proposals data['proposal_density'] = proposal_density data['gt_density'] = gt_density for s in ['train', 'test']: is_distractor = data.get('is_distractor_{}_frame'.format(s), None) if is_distractor is not None: for is_dist, box in zip(is_distractor, data[s+'_anno']): if is_dist: box[0] = 99999999.9 box[1] = 99999999.9 # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) # Generate label functions if self.label_function_params is not None: data['train_label'] = self._generate_label_function(data['train_anno']) data['test_label'] = self._generate_label_function(data['test_anno']) if self.label_density_params is not None: data['train_label_density'] = self._generate_label_density(data['train_anno']) data['test_label_density'] = self._generate_label_density(data['test_anno']) return data ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/processing_utils.py ================================================ import torch import math import cv2 as cv import random import torch.nn.functional as F from .bounding_box_utils import rect_to_rel, rel_to_rect def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None): """ Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ x, y, w, h = target_bb.tolist() # Crop image crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor) if crop_sz < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5 * w - crop_sz * 0.5) x2 = x1 + crop_sz y1 = round(y + 0.5 * h - crop_sz * 0.5) y2 = y1 + crop_sz x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE) if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) if output_sz is not None: resize_factor = output_sz / crop_sz im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz)) if mask is None: return im_crop_padded, resize_factor mask_crop_padded = \ F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0] return im_crop_padded, resize_factor, mask_crop_padded else: if mask is None: return im_crop_padded, 1.0 return im_crop_padded, 1.0, mask_crop_padded def transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box_in - the box for which the co-ordinates are to be transformed box_extract - the box about which the image crop has been extracted. resize_factor - the ratio between the original image scale and the scale of the image crop crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4] box_in_center = box_in[0:2] + 0.5 * box_in[2:4] box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor box_out_wh = box_in[2:4] * resize_factor box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh)) return box_out def jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if masks is None: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz) for f, a in zip(frames, box_extract)] frames_crop, resize_factors = zip(*crops_resize_factors) masks_crop = None else: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m) for f, a, m in zip(frames, box_extract, masks)] frames_crop, resize_factors, masks_crop = zip(*crops_resize_factors) crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz) for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)] return frames_crop, box_crop, masks_crop def sample_target_adaptive(im, target_bb, search_area_factor, output_sz, mode: str = 'replicate', max_scale_change=None, mask=None): """ Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image size, a smaller crop which fits the image is returned instead. args: im - Input numpy image to crop. target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mask - Optional mask to apply the same crop. returns: numpy image - Extracted crop. torch.Tensor - A bounding box denoting the cropped region in the image. numpy mask - Cropped mask returned only if mask is not None. """ if max_scale_change is None: max_scale_change = float('inf') if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) output_sz = torch.Tensor(output_sz) im_h = im.shape[0] im_w = im.shape[1] bbx, bby, bbw, bbh = target_bb.tolist() # Crop image crop_sz_x, crop_sz_y = (output_sz * ( target_bb[2:].prod() / output_sz.prod()).sqrt() * search_area_factor).ceil().long().tolist() # Get new sample size if forced inside the image if mode == 'inside' or mode == 'inside_major': # Calculate rescaling factor if outside the image rescale_factor = [crop_sz_x / im_w, crop_sz_y / im_h] if mode == 'inside': rescale_factor = max(rescale_factor) elif mode == 'inside_major': rescale_factor = min(rescale_factor) rescale_factor = min(max(1, rescale_factor), max_scale_change) crop_sz_x = math.floor(crop_sz_x / rescale_factor) crop_sz_y = math.floor(crop_sz_y / rescale_factor) if crop_sz_x < 1 or crop_sz_y < 1: raise Exception('Too small bounding box.') x1 = round(bbx + 0.5 * bbw - crop_sz_x * 0.5) x2 = x1 + crop_sz_x y1 = round(bby + 0.5 * bbh - crop_sz_y * 0.5) y2 = y1 + crop_sz_y # Move box inside image shift_x = max(0, -x1) + min(0, im_w - x2) x1 += shift_x x2 += shift_x shift_y = max(0, -y1) + min(0, im_h - y2) y1 += shift_y y2 += shift_y out_x = (max(0, -x1) + max(0, x2 - im_w)) // 2 out_y = (max(0, -y1) + max(0, y2 - im_h)) // 2 shift_x = (-x1 - out_x) * (out_x > 0) shift_y = (-y1 - out_y) * (out_y > 0) x1 += shift_x x2 += shift_x y1 += shift_y y2 += shift_y x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE) if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) # Resize image im_out = cv.resize(im_crop_padded, tuple(output_sz.long().tolist())) if mask is not None: mask_out = \ F.interpolate(mask_crop_padded[None, None], tuple(output_sz.flip(0).long().tolist()), mode='nearest')[0, 0] crop_box = torch.Tensor([x1, y1, x2 - x1, y2 - y1]) if mask is None: return im_out, crop_box else: return im_out, crop_box, mask_out def crop_and_resize(im, box, crop_bb, output_sz, mask=None): if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) im_h = im.shape[0] im_w = im.shape[1] if crop_bb[2] < 1 or crop_bb[3] < 1: raise Exception('Too small bounding box.') x1 = crop_bb[0] x2 = crop_bb[0] + crop_bb[2] y1 = crop_bb[1] y2 = crop_bb[1] + crop_bb[3] x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE) if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) # Resize image im_out = cv.resize(im_crop_padded, output_sz) if mask is not None: mask_out = F.interpolate(mask_crop_padded[None, None], (output_sz[1], output_sz[0]), mode='nearest')[0, 0] rescale_factor = output_sz[0] / crop_bb[2] # Hack if box is not None: box_crop = box.clone() box_crop[0] -= crop_bb[0] box_crop[1] -= crop_bb[1] box_crop *= rescale_factor else: box_crop = None if mask is None: return im_out, box_crop else: return im_out, box_crop, mask_out def transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box - the box for which the co-ordinates are to be transformed crop_box - bounding box defining the crop in the original image crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_out = box.clone() box_out[:2] -= crop_box[:2] scale_factor = crop_sz / crop_box[2:] box_out[:2] *= scale_factor box_out[2:] *= scale_factor return box_out def target_image_crop(frames, box_extract, box_gt, search_area_factor, output_sz, mode: str = 'replicate', max_scale_change=None, masks=None): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') masks - Optional masks to apply the same crop. returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) if masks is None: frame_crops_boxes = [sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change) for f, a in zip(frames, box_extract)] frames_crop, crop_boxes = zip(*frame_crops_boxes) else: frame_crops_boxes_masks = [ sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change, mask=m) for f, a, m in zip(frames, box_extract, masks)] frames_crop, crop_boxes, masks_crop = zip(*frame_crops_boxes_masks) crop_sz = torch.Tensor(output_sz) # find the bb location in the crop box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz) for bb_gt, crop_bb in zip(box_gt, crop_boxes)] if masks is None: return frames_crop, box_crop else: return frames_crop, box_crop, masks_crop def iou(reference, proposals): """Compute the IoU between a reference box with multiple proposal boxes. args: reference - Tensor of shape (1, 4). proposals - Tensor of shape (num_proposals, 4) returns: torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box. """ # Intersection box tl = torch.max(reference[:, :2], proposals[:, :2]) br = torch.min(reference[:, :2] + reference[:, 2:], proposals[:, :2] + proposals[:, 2:]) sz = (br - tl).clamp(0) # Area intersection = sz.prod(dim=1) union = reference[:, 2:].prod(dim=1) + proposals[:, 2:].prod(dim=1) - intersection return intersection / union def rand_uniform(a, b, shape=1): """ sample numbers uniformly between a and b. args: a - lower bound b - upper bound shape - shape of the output tensor returns: torch.Tensor - tensor of shape=shape """ return (b - a) * torch.rand(shape) + a def perturb_box(box, min_iou=0.5, sigma_factor=0.1): """ Perturb the input box by adding gaussian noise to the co-ordinates args: box - input box min_iou - minimum IoU overlap between input box and the perturbed box sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of sigma_factors, in which case one of them will be uniformly sampled. Further, each of the sigma_factor element can be either a float, or a tensor of shape (4,) specifying the sigma_factor per co-ordinate returns: torch.Tensor - the perturbed box """ if isinstance(sigma_factor, list): # If list, sample one sigma_factor as current sigma factor c_sigma_factor = random.choice(sigma_factor) else: c_sigma_factor = sigma_factor if not isinstance(c_sigma_factor, torch.Tensor): c_sigma_factor = c_sigma_factor * torch.ones(4) perturb_factor = torch.sqrt(box[2] * box[3]) * c_sigma_factor # multiple tries to ensure that the perturbed box has iou > min_iou with the input box for i_ in range(100): c_x = box[0] + 0.5 * box[2] c_y = box[1] + 0.5 * box[3] c_x_per = random.gauss(c_x, perturb_factor[0]) c_y_per = random.gauss(c_y, perturb_factor[1]) w_per = random.gauss(box[2], perturb_factor[2]) h_per = random.gauss(box[3], perturb_factor[3]) if w_per <= 1: w_per = box[2] * rand_uniform(0.15, 0.5) if h_per <= 1: h_per = box[3] * rand_uniform(0.15, 0.5) box_per = torch.Tensor([c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per]).round() if box_per[2] <= 1: box_per[2] = box[2] * rand_uniform(0.15, 0.5) if box_per[3] <= 1: box_per[3] = box[3] * rand_uniform(0.15, 0.5) box_iou = iou(box.view(1, 4), box_per.view(1, 4)) # if there is sufficient overlap, return if box_iou > min_iou: return box_per, box_iou # else reduce the perturb factor perturb_factor *= 0.9 return box_per, box_iou def gauss_1d(sz, sigma, center, end_pad=0, density=False): k = torch.arange(-(sz - 1) / 2, (sz + 1) / 2 + end_pad).reshape(1, -1) gauss = torch.exp(-1.0 / (2 * sigma ** 2) * (k - center.reshape(-1, 1)) ** 2) if density: gauss /= math.sqrt(2 * math.pi) * sigma return gauss def gauss_2d(sz, sigma, center, end_pad=(0, 0), density=False): if isinstance(sigma, (float, int)): sigma = (sigma, sigma) return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0], density).reshape(center.shape[0], 1, -1) * \ gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1], density).reshape(center.shape[0], -1, 1) def gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True, density=False, uni_bias=0): """Construct Gaussian label function.""" if isinstance(kernel_sz, (float, int)): kernel_sz = (kernel_sz, kernel_sz) if isinstance(feat_sz, (float, int)): feat_sz = (feat_sz, feat_sz) if isinstance(image_sz, (float, int)): image_sz = (image_sz, image_sz) image_sz = torch.Tensor(image_sz) feat_sz = torch.Tensor(feat_sz) target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4] target_center_norm = (target_center - image_sz / 2) / image_sz center = feat_sz * target_center_norm + 0.5 * \ torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2]) sigma = sigma_factor * feat_sz.prod().sqrt().item() if end_pad_if_even: end_pad = (int(kernel_sz[0] % 2 == 0), int(kernel_sz[1] % 2 == 0)) else: end_pad = (0, 0) gauss_label = gauss_2d(feat_sz, sigma, center, end_pad, density=density) if density: sz = (feat_sz + torch.Tensor(end_pad)).prod() label = (1.0 - uni_bias) * gauss_label + uni_bias / sz else: label = gauss_label + uni_bias return label def gauss_density_centered(x, std): """Evaluate the probability density of a Gaussian centered at zero. args: x - Samples. std - List of standard deviations """ return torch.exp(-0.5 * (x / std) ** 2) / (math.sqrt(2 * math.pi) * std) def gmm_density_centered(x, std): """Evaluate the probability density of a GMM centered at zero. args: x - Samples. Assumes dim=-1 is the component dimension and dim=-2 is feature dimension. Rest are sample dimension. std - Tensor of standard deviations """ if x.dim() == std.dim() - 1: x = x.unsqueeze(-1) elif not (x.dim() == std.dim() and x.shape[-1] == 1): raise ValueError('Last dimension must be the gmm stds.') return gauss_density_centered(x, std).prod(-2).mean(-1) def sample_gmm_centered(std, num_samples=1): """Sample from a GMM distribution centered at zero: args: std - Tensor of standard deviations num_samples - number of samples """ num_components = std.shape[-1] num_dims = std.numel() // num_components std = std.view(1, num_dims, num_components) # Sample component ids k = torch.randint(num_components, (num_samples,), dtype=torch.int64) std_samp = std[0, :, k].t() # Sample x_centered = std_samp * torch.randn(num_samples, num_dims) prob_dens = gmm_density_centered(x_centered, std) return x_centered, prob_dens def sample_gmm(mean, std, num_samples=1): """Sample from a GMM distribution: args: mean - a single mean vector std - Tensor of standard deviations num_samples - number of samples """ num_dims = mean.numel() num_components = std.shape[-1] mean = mean.view(1, num_dims) std = std.view(1, -1, num_components) # Sample component ids k = torch.randint(num_components, (num_samples,), dtype=torch.int64) std_samp = std[0, :, k].t() # Sample x_centered = std_samp * torch.randn(num_samples, num_dims) x = x_centered + mean prob_dens = gmm_density_centered(x_centered, std) return x, prob_dens def sample_box_gmm(mean_box, proposal_sigma, gt_sigma=None, num_samples=1, add_mean_box=False): """Sample boxes from a Gaussian mixture model. args: mean_box - Center (or mean) bounding box proposal_sigma - List of standard deviations for each Gaussian gt_sigma - Standard deviation of the ground truth distribution num_samples - Number of sampled boxes add_mean_box - Also add mean box as first element returns: proposals, proposal density and ground truth density for all samples """ center_std = torch.Tensor([s[0] for s in proposal_sigma]) sz_std = torch.Tensor([s[1] for s in proposal_sigma]) std = torch.stack([center_std, center_std, sz_std, sz_std]) mean_box = mean_box.view(1, 4) sz_norm = mean_box[:, 2:].clone() # Sample boxes proposals_rel_centered, proposal_density = sample_gmm_centered(std, num_samples) # Add mean and map back mean_box_rel = rect_to_rel(mean_box, sz_norm) proposals_rel = proposals_rel_centered + mean_box_rel proposals = rel_to_rect(proposals_rel, sz_norm) if gt_sigma is None or gt_sigma[0] == 0 and gt_sigma[1] == 0: gt_density = torch.zeros_like(proposal_density) else: std_gt = torch.Tensor([gt_sigma[0], gt_sigma[0], gt_sigma[1], gt_sigma[1]]).view(1, 4) gt_density = gauss_density_centered(proposals_rel_centered, std_gt).prod(-1) if add_mean_box: proposals = torch.cat((mean_box, proposals)) proposal_density = torch.cat((torch.Tensor([-1]), proposal_density)) gt_density = torch.cat((torch.Tensor([1]), gt_density)) return proposals, proposal_density, gt_density ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/processing_utils_SE.py ================================================ import torch import math import cv2 as cv import random import numpy as np def stack_tensors(x): if isinstance(x, list) and isinstance(x[0], torch.Tensor): return torch.stack(x) return x '''Added on 2019.12.23''' def sample_target_SE(im, target_bb, search_area_factor, output_sz=None, mode=cv.BORDER_REPLICATE): """ Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width) args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ x, y, w, h = target_bb.tolist() # Crop image ws = math.ceil(search_area_factor * w) hs = math.ceil(search_area_factor * h) if ws < 1 or hs < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5*w - ws*0.5) x2 = x1 + ws y1 = round(y + 0.5 * h - hs * 0.5) y2 = y1 + hs x1_pad = max(0, -x1) x2_pad = max(x2-im.shape[1]+1, 0) y1_pad = max(0, -y1) y2_pad = max(y2-im.shape[0]+1, 0) # Crop target im_crop = im[y1+y1_pad:y2-y2_pad, x1+x1_pad:x2-x2_pad, :] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, mode) if output_sz is not None: w_rsz_f = output_sz / ws h_rsz_f = output_sz / hs im_crop_padded_rsz = cv.resize(im_crop_padded, (output_sz, output_sz)) if len(im_crop_padded_rsz.shape)==2: im_crop_padded_rsz = im_crop_padded_rsz[...,np.newaxis] return im_crop_padded_rsz, h_rsz_f, w_rsz_f else: return im_crop_padded, 1.0, 1.0 '''把mask映射到原图上''' def map_mask_back(im, target_bb, search_area_factor, mask, mode=cv.BORDER_REPLICATE): """ Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width) args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ H,W = (im.shape[0],im.shape[1]) base = np.zeros((H,W)) x, y, w, h = target_bb.tolist() # Crop image ws = math.ceil(search_area_factor * w) hs = math.ceil(search_area_factor * h) if ws < 1 or hs < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5*w - ws*0.5) x2 = x1 + ws y1 = round(y + 0.5 * h - hs * 0.5) y2 = y1 + hs x1_pad = max(0, -x1) x2_pad = max(x2-im.shape[1]+1, 0) y1_pad = max(0, -y1) y2_pad = max(y2-im.shape[0]+1, 0) '''pad base''' base_padded = cv.copyMakeBorder(base, y1_pad, y2_pad, x1_pad, x2_pad, mode) '''Resize mask''' mask_rsz = cv.resize(mask,(ws,hs)) '''fill region with mask''' base_padded[y1+y1_pad:y2+y1_pad, x1+x1_pad:x2+x1_pad] = mask_rsz.copy() '''crop base_padded to get final mask''' final_mask = base_padded[y1_pad:y1_pad+H,x1_pad:x1_pad+W] assert (final_mask.shape == (H,W)) return final_mask '''Added on 2019.12.23''' def transform_image_to_crop_SE(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor_h: float, resize_factor_w: float, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box_in - the box for which the co-ordinates are to be transformed box_extract - the box about which the image crop has been extracted. resize_factor - the ratio between the original image scale and the scale of the image crop crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_extract_center = box_extract[0:2] + 0.5*box_extract[2:4] box_in_center = box_in[0:2] + 0.5*box_in[2:4] box_out_xc = (crop_sz[0] -1)/2 + (box_in_center[0] - box_extract_center[0])*resize_factor_w box_out_yc = (crop_sz[0] -1)/2 + (box_in_center[1] - box_extract_center[1])*resize_factor_h box_out_w = box_in[2] * resize_factor_w box_out_h = box_in[3] * resize_factor_h '''2019.12.28 为了避免出现(x1,y1)小于0,或者(x2,y2)大于256的情况,这里我对它们加上了一些限制''' max_sz = crop_sz[0].item() box_out_x1 = torch.clamp(box_out_xc - 0.5 * box_out_w,0,max_sz) box_out_y1 = torch.clamp(box_out_yc - 0.5 * box_out_h,0,max_sz) box_out_x2 = torch.clamp(box_out_xc + 0.5 * box_out_w,0,max_sz) box_out_y2 = torch.clamp(box_out_yc + 0.5 * box_out_h,0,max_sz) box_out_w_new = box_out_x2 - box_out_x1 box_out_h_new = box_out_y2 - box_out_y1 box_out = torch.stack((box_out_x1, box_out_y1, box_out_w_new, box_out_h_new)) return box_out def centered_crop(frames, anno, area_factor, output_sz): crops_resize_factors = [sample_target(f, a, area_factor, output_sz) for f, a in zip(frames, anno)] frames_crop, resize_factors = zip(*crops_resize_factors) crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop anno_crop = [transform_image_to_crop(a, a, rf, crop_sz) for a, rf in zip(anno, resize_factors)] return frames_crop, anno_crop '''Added by Bin Yan 2019.12.23, changed on 2020.1.4(add a new args: "get_bbox_coord")''' def jittered_center_crop_SE(frames, box_extract, box_gt, search_area_factor, output_sz, get_bbox_coord=True, mode=cv.BORDER_REPLICATE): """ Crop a patch centered at box_extract. The height and width of cropped region is search_area_factor times that of box_extract. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ '''call function "sample_target_SE" and function "transform_image_to_crop_SE"''' '''get cropped patch(fixed size)''' crops_resize_factors = [sample_target_SE(f, a, search_area_factor, output_sz, mode=mode) for f, a in zip(frames, box_extract)] frames_crop, resize_factors_h, resize_factors_w = zip(*crops_resize_factors) if get_bbox_coord: crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop '''get GT's cooridinate on the cropped patch''' box_crop = [transform_image_to_crop_SE(a_gt, a_ex, h_rsf, w_rsf, crop_sz) for a_gt, a_ex, h_rsf, w_rsf in zip(box_gt, box_extract, resize_factors_h, resize_factors_w)] return frames_crop, box_crop else: return frames_crop def sample_target_nopad(im, target_bb, search_area_factor, output_sz): """ Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image size, a smaller crop which fits the image is returned instead. args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop torch.Tensor - a bounding box denoting the cropped region in the image. """ if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) output_sz = torch.Tensor(output_sz) im_h = im.shape[0] im_w = im.shape[1] bbx, bby, bbw, bbh = target_bb.tolist() # Crop image crop_sz_x, crop_sz_y = (output_sz * (target_bb[2:].prod()/output_sz.prod()).sqrt() * search_area_factor).ceil() # Calculate rescaling factor if outside the image rescale_factor = max(1, crop_sz_x/im_w, crop_sz_y/im_h) crop_sz_x = math.floor(crop_sz_x / rescale_factor) crop_sz_y = math.floor(crop_sz_y / rescale_factor) if crop_sz_x < 1 or crop_sz_y < 1: raise Exception('Too small bounding box.') x1 = round(bbx + 0.5*bbw - crop_sz_x*0.5) x2 = x1 + crop_sz_x y1 = round(bby + 0.5*bbh - crop_sz_y*0.5) y2 = y1 + crop_sz_y # Move box inside image shift_x = max(0, -x1) + min(0, im_w - x2) x1 += shift_x x2 += shift_x shift_y = max(0, -y1) + min(0, im_h - y2) y1 += shift_y y2 += shift_y # Crop and resize image im_crop = im[y1:y2, x1:x2, :] im_out = cv.resize(im_crop, tuple(output_sz.long().tolist())) crop_box = torch.Tensor([x1, y1, x2-x1, y2-y1]) return im_out, crop_box def transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box - the box for which the co-ordinates are to be transformed crop_box - bounding box defining the crop in the original image crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_out = box.clone() box_out[:2] -= crop_box[:2] scale_factor = crop_sz / crop_box[2:] box_out[:2] *= scale_factor box_out[2:] *= scale_factor return box_out def jittered_center_crop_nopad(frames, box_extract, box_gt, search_area_factor, output_sz): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) frame_crops_boxes = [sample_target_nopad(f, a, search_area_factor, output_sz) for f, a in zip(frames, box_extract)] frames_crop, crop_boxes = zip(*frame_crops_boxes) crop_sz = torch.Tensor(output_sz) # find the bb location in the crop box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz) for bb_gt, crop_bb in zip(box_gt, crop_boxes)] return frames_crop, box_crop def iou(reference, proposals): """Compute the IoU between a reference box with multiple proposal boxes. args: reference - Tensor of shape (1, 4). proposals - Tensor of shape (num_proposals, 4) returns: torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box. """ # Intersection box tl = torch.max(reference[:,:2], proposals[:,:2]) br = torch.min(reference[:,:2] + reference[:,2:], proposals[:,:2] + proposals[:,2:]) sz = (br - tl).clamp(0) # Area intersection = sz.prod(dim=1) union = reference[:,2:].prod(dim=1) + proposals[:,2:].prod(dim=1) - intersection return intersection / union def rand_uniform(a, b, shape=1): """ sample numbers uniformly between a and b. args: a - lower bound b - upper bound shape - shape of the output tensor returns: torch.Tensor - tensor of shape=shape """ return (b - a) * torch.rand(shape) + a def perturb_box(box, min_iou=0.5, sigma_factor=0.1): """ Perturb the input box by adding gaussian noise to the co-ordinates args: box - input box min_iou - minimum IoU overlap between input box and the perturbed box sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of sigma_factors, in which case one of them will be uniformly sampled. Further, each of the sigma_factor element can be either a float, or a tensor of shape (4,) specifying the sigma_factor per co-ordinate returns: torch.Tensor - the perturbed box """ if isinstance(sigma_factor, list): # If list, sample one sigma_factor as current sigma factor c_sigma_factor = random.choice(sigma_factor) else: c_sigma_factor = sigma_factor if not isinstance(c_sigma_factor, torch.Tensor): c_sigma_factor = c_sigma_factor * torch.ones(4) perturb_factor = torch.sqrt(box[2]*box[3])*c_sigma_factor # multiple tries to ensure that the perturbed box has iou > min_iou with the input box for i_ in range(100): c_x = box[0] + 0.5*box[2] c_y = box[1] + 0.5 * box[3] c_x_per = random.gauss(c_x, perturb_factor[0]) c_y_per = random.gauss(c_y, perturb_factor[1]) w_per = random.gauss(box[2], perturb_factor[2]) h_per = random.gauss(box[3], perturb_factor[3]) if w_per <= 1: w_per = box[2]*rand_uniform(0.15, 0.5) if h_per <= 1: h_per = box[3]*rand_uniform(0.15, 0.5) box_per = torch.Tensor([c_x_per - 0.5*w_per, c_y_per - 0.5*h_per, w_per, h_per]).round() if box_per[2] <= 1: box_per[2] = box[2]*rand_uniform(0.15, 0.5) if box_per[3] <= 1: box_per[3] = box[3]*rand_uniform(0.15, 0.5) box_iou = iou(box.view(1, 4), box_per.view(1, 4)) # if there is sufficient overlap, return if box_iou > min_iou: return box_per, box_iou # else reduce the perturb factor perturb_factor *= 0.9 return box_per, box_iou def gauss_1d(sz, sigma, center, end_pad=0): k = torch.arange(-(sz-1)/2, (sz+1)/2 + end_pad).reshape(1, -1) return torch.exp(-1.0/(2*sigma**2) * (k - center.reshape(-1, 1))**2) def gauss_2d(sz, sigma, center, end_pad=(0, 0)): if isinstance(sigma, (float, int)): sigma = (sigma, sigma) return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0]).reshape(center.shape[0], 1, -1) * \ gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1]).reshape(center.shape[0], -1, 1) def gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True): """Construct Gaussian label function.""" if isinstance(kernel_sz, (float, int)): kernel_sz = (kernel_sz, kernel_sz) if isinstance(feat_sz, (float, int)): feat_sz = (feat_sz, feat_sz) if isinstance(image_sz, (float, int)): image_sz = (image_sz, image_sz) image_sz = torch.Tensor(image_sz) feat_sz = torch.Tensor(feat_sz) target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4] target_center_norm = (target_center - image_sz / 2) / image_sz center = feat_sz * target_center_norm + 0.5 * \ torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2]) sigma = sigma_factor * feat_sz.prod().sqrt().item() if end_pad_if_even: end_pad = (int(kernel_sz[0]%2 == 0), int(kernel_sz[1]%2 == 0)) else: end_pad = (0, 0) gauss_label = gauss_2d(feat_sz, sigma, center, end_pad) return gauss_label ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/sampler.py ================================================ import random import torch.utils.data from pytracking import TensorDict def no_processing(data): return data class TrackingSampler(torch.utils.data.Dataset): """ Class responsible for sampling frames from training sequences to form batches. Each training sample is a tuple consisting of i) a set of train frames, used to learn the DiMP classification model and obtain the modulation vector for IoU-Net, and ii) a set of test frames on which target classification loss for the predicted DiMP model, and the IoU prediction loss for the IoU-Net is calculated. The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and 'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id] and (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled. If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found. The sampled frames are then passed through the input 'processing' function for the necessary processing- """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the test frames. num_test_frames - Number of test frames to sample. num_train_frames - Number of train frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally, otherwise randomly within the interval. """ self.datasets = datasets # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.num_test_frames = num_test_frames self.num_train_frames = num_train_frames self.processing = processing self.frame_sample_mode = frame_sample_mode def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def __getitem__(self, index): """ args: index (int): Index (Ignored since we sample randomly) returns: TensorDict - dict containing all the data blocks """ # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_test_frames + self.num_train_frames) and len(visible) >= 20 enough_visible_frames = enough_visible_frames or not is_video_dataset if is_video_dataset: train_frame_ids = None test_frame_ids = None gap_increase = 0 if self.frame_sample_mode == 'interval': # Sample frame numbers within interval defined by the first frame while test_frame_ids is None: base_frame_id = self._sample_visible_ids(visible, num_ids=1) extra_train_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1, min_id=base_frame_id[ 0] - self.max_gap - gap_increase, max_id=base_frame_id[ 0] + self.max_gap + gap_increase) if extra_train_frame_ids is None: gap_increase += 5 continue train_frame_ids = base_frame_id + extra_train_frame_ids test_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_test_frames, min_id=train_frame_ids[0] - self.max_gap - gap_increase, max_id=train_frame_ids[0] + self.max_gap + gap_increase) gap_increase += 5 # Increase gap until a frame is found elif self.frame_sample_mode == 'causal': # Sample test and train frames in a causal manner, i.e. test_frame_ids > train_frame_ids while test_frame_ids is None: base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_train_frames - 1, max_id=len(visible) - self.num_test_frames) prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1, min_id=base_frame_id[0] - self.max_gap - gap_increase, max_id=base_frame_id[0]) if prev_frame_ids is None: gap_increase += 5 continue train_frame_ids = base_frame_id + prev_frame_ids test_frame_ids = self._sample_visible_ids(visible, min_id=train_frame_ids[0] + 1, max_id=train_frame_ids[0] + self.max_gap + gap_increase, num_ids=self.num_test_frames) # Increase gap until a frame is found gap_increase += 5 else: # In case of image dataset, just repeat the image to generate synthetic video train_frame_ids = [1] * self.num_train_frames test_frame_ids = [1] * self.num_test_frames train_frames, train_anno, meta_obj_train = dataset.get_frames(seq_id, train_frame_ids, seq_info_dict) test_frames, test_anno, meta_obj_test = dataset.get_frames(seq_id, test_frame_ids, seq_info_dict) data = TensorDict({'train_images': train_frames, 'train_anno': train_anno['bbox'], 'test_images': test_frames, 'test_anno': test_anno['bbox'], 'dataset': dataset.get_name(), 'test_class': meta_obj_test.get('object_class_name')}) return self.processing(data) class DiMPSampler(TrackingSampler): """ See TrackingSampler.""" def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'): super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap, num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing, frame_sample_mode=frame_sample_mode) class ATOMSampler(TrackingSampler): """ See TrackingSampler.""" def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_test_frames=1, num_train_frames=1, processing=no_processing, frame_sample_mode='interval'): super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap, num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing, frame_sample_mode=frame_sample_mode) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/data/transforms.py ================================================ import random import numpy as np import math import cv2 as cv import torch import torch.nn.functional as F import torchvision.transforms.functional as tvisf class Transform: """A set of transformations, used for e.g. data augmentation. Args of constructor: transforms: An arbitrary number of transformations, derived from the TransformBase class. They are applied in the order they are given. The Transform object can jointly transform images, bounding boxes and segmentation masks. This is done by calling the object with the following key-word arguments (all are optional). The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances. image - Image coords - 2xN dimensional Tensor of 2D image coordinates [y, x] bbox - Bounding box on the form [x, y, w, h] mask - Segmentation mask with discrete classes The following parameters can be supplied with calling the transform object: joint [Bool] - If True then transform all images/coords/bbox/mask in the list jointly using the same transformation. Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using different random rolls. Default: True. new_roll [Bool] - If False, then no new random roll is performed, and the saved result from the previous roll is used instead. Default: True. Check the DiMPProcessing class for examples. """ def __init__(self, *transforms): if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)): transforms = transforms[0] self.transforms = transforms self._valid_inputs = ['image', 'coords', 'bbox', 'mask'] self._valid_args = ['joint', 'new_roll'] self._valid_all = self._valid_inputs + self._valid_args def __call__(self, **inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] for v in inputs.keys(): if v not in self._valid_all: raise ValueError('Incorrect input \"{}\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args)) joint_mode = inputs.get('joint', True) new_roll = inputs.get('new_roll', True) if not joint_mode: out = zip(*[self(**inp) for inp in self._split_inputs(inputs)]) return tuple(list(o) for o in out) out = {k: v for k, v in inputs.items() if k in self._valid_inputs} for t in self.transforms: out = t(**out, joint=joint_mode, new_roll=new_roll) if len(var_names) == 1: return out[var_names[0]] # Make sure order is correct return tuple(out[v] for v in var_names) def _split_inputs(self, inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])] for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()): if isinstance(arg_val, list): for inp, av in zip(split_inputs, arg_val): inp[arg_name] = av else: for inp in split_inputs: inp[arg_name] = arg_val return split_inputs def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class TransformBase: """Base class for transformation objects. See the Transform class for details.""" def __init__(self): self._valid_inputs = ['image', 'coords', 'bbox', 'mask'] self._valid_args = ['new_roll'] self._valid_all = self._valid_inputs + self._valid_args self._rand_params = None def __call__(self, **inputs): # Split input input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs} input_args = {k: v for k, v in inputs.items() if k in self._valid_args} # Roll random parameters for the transform if input_args.get('new_roll', True): rand_params = self.roll() if rand_params is None: rand_params = () elif not isinstance(rand_params, tuple): rand_params = (rand_params,) self._rand_params = rand_params outputs = dict() for var_name, var in input_vars.items(): if var is not None: transform_func = getattr(self, 'transform_' + var_name) if var_name in ['coords', 'bbox']: params = (self._get_image_size(input_vars),) + self._rand_params else: params = self._rand_params if isinstance(var, (list, tuple)): outputs[var_name] = [transform_func(x, *params) for x in var] else: outputs[var_name] = transform_func(var, *params) return outputs def _get_image_size(self, inputs): im = None for var_name in ['image', 'mask']: if inputs.get(var_name) is not None: im = inputs[var_name] break if im is None: return None if isinstance(im, (list, tuple)): im = im[0] if isinstance(im, np.ndarray): return im.shape[:2] if torch.is_tensor(im): return (im.shape[-2], im.shape[-1]) raise Exception('Unknown image type') def roll(self): return None def transform_image(self, image, *rand_params): """Must be deterministic""" return image def transform_coords(self, coords, image_shape, *rand_params): """Must be deterministic""" return coords def transform_bbox(self, bbox, image_shape, *rand_params): """Assumes [x, y, w, h]""" # Check if not overloaded if self.transform_coords.__code__ == TransformBase.transform_coords.__code__: return bbox coord = bbox.clone().view(-1,2).t().flip(0) x1 = coord[1, 0] x2 = coord[1, 0] + coord[1, 1] y1 = coord[0, 0] y2 = coord[0, 0] + coord[0, 1] coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]]) coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0) tl = torch.min(coord_transf, dim=1)[0] sz = torch.max(coord_transf, dim=1)[0] - tl bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape) return bbox_out def transform_mask(self, mask, *rand_params): """Must be deterministic""" return mask class ToTensor(TransformBase): """Convert to a Tensor""" def transform_image(self, image): # handle numpy array if image.ndim == 2: image = image[:, :, None] image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if isinstance(image, torch.ByteTensor): return image.float().div(255) else: return image def transfrom_mask(self, mask): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) class ToTensorAndJitter(TransformBase): """Convert to a Tensor and jitter brightness""" def __init__(self, brightness_jitter=0.0, normalize=True): super().__init__() self.brightness_jitter = brightness_jitter self.normalize = normalize def roll(self): return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter) def transform_image(self, image, brightness_factor): # handle numpy array image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if self.normalize: return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0) else: return image.float().mul(brightness_factor).clamp(0.0, 255.0) def transform_mask(self, mask, brightness_factor): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) else: return mask class Normalize(TransformBase): """Normalize image""" def __init__(self, mean, std, inplace=False): super().__init__() self.mean = mean self.std = std self.inplace = inplace def transform_image(self, image): return tvisf.normalize(image, self.mean, self.std, self.inplace) class ToGrayscale(TransformBase): """Converts image to grayscale with probability""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32) def roll(self): return random.random() < self.probability def transform_image(self, image, do_grayscale): if do_grayscale: if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY) return np.stack([img_gray, img_gray, img_gray], axis=2) # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2) return image class ToBGR(TransformBase): """Converts image to BGR""" def transform_image(self, image): if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR) return img_bgr class RandomHorizontalFlip(TransformBase): """Horizontally flip image randomly with a probability p.""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability def roll(self): return random.random() < self.probability def transform_image(self, image, do_flip): if do_flip: if torch.is_tensor(image): return image.flip((2,)) return np.fliplr(image).copy() return image def transform_coords(self, coords, image_shape, do_flip): if do_flip: coords = coords.clone() coords[1,:] = (image_shape[1] - 1) - coords[1,:] return coords def transform_mask(self, mask, do_flip): if do_flip: if torch.is_tensor(mask): return mask.flip((-1,)) return np.fliplr(mask).copy() return mask class Blur(TransformBase): """ Blur the image by applying a gaussian kernel with given sigma""" def __init__(self, sigma): super().__init__() if isinstance(sigma, (float, int)): sigma = (sigma, sigma) self.sigma = sigma self.filter_size = [math.ceil(2*s) for s in self.sigma] x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size] self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)] self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum() self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum() def transform_image(self, image): if torch.is_tensor(image): sz = image.shape[2:] im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0)) return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1]) else: raise NotImplementedError class RandomBlur(TransformBase): """ Blur the image, with a given probability, by applying a gaussian kernel with given sigma""" def __init__(self, sigma, probability=0.1): super().__init__() self.probability = probability if isinstance(sigma, (float, int)): sigma = (sigma, sigma) self.sigma = sigma self.filter_size = [math.ceil(2*s) for s in self.sigma] x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size] self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)] self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum() self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum() def roll(self): return random.random() < self.probability def transform(self, image, do_blur=None): if do_blur is None: do_blur = False if do_blur: if torch.is_tensor(image): sz = image.shape[1:] im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0)) return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1]) else: raise NotImplementedError else: return image class RandomAffine(TransformBase): """Apply random affine transformation.""" def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0, border_mode='constant', pad_amount=0): super().__init__() self.p_flip = p_flip self.max_rotation = max_rotation self.max_shear = max_shear self.max_scale = max_scale self.max_ar_factor = max_ar_factor if border_mode == 'constant': self.border_flag = cv.BORDER_CONSTANT elif border_mode == 'replicate': self.border_flag == cv.BORDER_REPLICATE else: raise Exception self.pad_amount = pad_amount def roll(self): do_flip = random.random() < self.p_flip theta = random.uniform(-self.max_rotation, self.max_rotation) shear_x = random.uniform(-self.max_shear, self.max_shear) shear_y = random.uniform(-self.max_shear, self.max_shear) ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor)) scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale)) return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor) def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors): im_h, im_w = image_shape t_mat = np.identity(3) if do_flip: if do_flip: t_mat[0, 0] = -1.0 t_mat[0, 2] = im_w t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0) t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3))) t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w], [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w], [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_mat = t_scale @ t_rot @ t_shear @ t_mat t_mat[0, 2] += self.pad_amount t_mat[1, 2] += self.pad_amount t_mat = t_mat[:2, :] return t_mat def transform_image(self, image, do_flip, theta, shear_values, scale_factors): if torch.is_tensor(image): raise Exception('Only supported for numpy input') t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors) output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount) image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR, borderMode=self.border_flag) return image_t def transform_coords(self, coords, image_shape, do_flip, theta, shear_values, scale_factors): t_mat = self._construct_t_mat(image_shape, do_flip, theta, shear_values, scale_factors) t_mat_tensor = torch.from_numpy(t_mat).float() coords_xy1 = torch.stack((coords[1, :], coords[0, :], torch.ones_like(coords[1, :]))) coords_xy_t = torch.mm(t_mat_tensor, coords_xy1) return coords_xy_t[[1, 0], :] def transform_mask(self, mask, do_flip, theta, shear_values, scale_factors): t_mat = self._construct_t_mat(mask.shape[:2], do_flip, theta, shear_values, scale_factors) output_sz = (mask.shape[1] + 2*self.pad_amount, mask.shape[0] + 2*self.pad_amount) mask_t = cv.warpAffine(mask.numpy(), t_mat, output_sz, flags=cv.INTER_NEAREST, borderMode=self.border_flag) return torch.from_numpy(mask_t) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/__init__.py ================================================ from .lasot import Lasot from .got10k import Got10k from .tracking_net import TrackingNet from .imagenetvid import ImagenetVID from .coco import MSCOCO from .coco_seq import MSCOCOSeq from .youtubevos import YouTubeVOS from .davis import Davis from .lvis import LVIS from .ecssd import ECSSD from .msra10k import MSRA10k from .hku_is import HKUIS from .sbd import SBD from .synthetic_video import SyntheticVideo from .synthetic_video_blend import SyntheticVideoBlend ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/base_image_dataset.py ================================================ import torch.utils.data from ltr.data.image_loader import jpeg4py_loader class BaseImageDataset(torch.utils.data.Dataset): """ Base class for image datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.image_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_images() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_images(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.image_list) def has_class_info(self): return False def get_class_name(self, image_id): return None def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_images_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_image_info(self, seq_id): """ Returns information about a particular image, args: seq_id - index of the image returns: Dict """ raise NotImplementedError def get_image(self, image_id, anno=None): """ Get a image args: image_id - index of image anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: image - anno - dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/base_video_dataset.py ================================================ import torch.utils.data from ltr.data.image_loader import jpeg4py_loader class BaseVideoDataset(torch.utils.data.Dataset): """ Base class for video datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.sequence_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_sequences() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def is_video_sequence(self): """ Returns whether the dataset is a video dataset or an image dataset returns: bool - True if a video dataset """ return True def is_synthetic_video_dataset(self): """ Returns whether the dataset contains real videos or synthetic returns: bool - True if a video dataset """ return False def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_sequences(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.sequence_list) def has_class_info(self): return False def has_occlusion_info(self): return False def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_sequences_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_sequence_info(self, seq_id): """ Returns information about a particular sequences, args: seq_id - index of the sequence returns: Dict """ raise NotImplementedError def get_frames(self, seq_id, frame_ids, anno=None): """ Get a set of frames from a particular sequence args: seq_id - index of sequence frame_ids - a list of frame numbers anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: list - List of frames corresponding to frame_ids list - List of dicts for each frame dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/coco.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader import torch from pycocotools.coco import COCO import random from collections import OrderedDict from ltr.admin.environment import env_settings class MSCOCO(BaseImageDataset): """ The COCO object detection dataset. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None, split="train", version="2014"): """ args: root - path to coco root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() # the parent class thing would happen in the sampler self.image_list = self._get_image_list(min_area=min_area) if data_fraction is not None: self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction)) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): ann_list = list(self.coco_set.anns.keys()) image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] if min_area is not None: image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area] return image_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def has_segmentation_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def _build_im_per_class(self): im_per_class = {} for i, im in enumerate(self.image_list): class_name = self.cats[self.coco_set.anns[im]['category_id']]['name'] if class_name not in im_per_class: im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4,) mask = torch.Tensor(self.coco_set.annToMask(anno)) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.coco_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/coco_seq.py ================================================ import os from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader import torch import random from pycocotools.coco import COCO from collections import OrderedDict from ltr.admin.environment import env_settings class MSCOCOSeq(BaseVideoDataset): """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"): """ args: root - path to the coco dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the images will be used split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) # Load the COCO set. self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() self.sequence_list = self._get_sequence_list() if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_seq_per_class() def _get_sequence_list(self): ann_list = list(self.coco_set.anns.keys()) seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] return seq_list def is_video_sequence(self): return False def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def _build_seq_per_class(self): seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name'] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def get_sequence_info(self, seq_id): anno = self._get_anno(seq_id) bbox = torch.Tensor(anno['bbox']).view(1, 4) mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, seq_id): anno = self.coco_set.anns[self.sequence_list[seq_id]] return anno def _get_frames(self, seq_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, seq_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, seq_id): cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] return cat_dict_current['name'] def get_frames(self, seq_id=None, frame_ids=None, anno=None): # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a # list containing these replicated images. frame = self._get_frames(seq_id) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0, ...] for _ in frame_ids] object_meta = self.get_meta_info(seq_id) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/davis.py ================================================ from pathlib import Path from ltr.dataset.vos_base import VOSDatasetBase, VOSMeta from pytracking.evaluation import Sequence from ltr.admin.environment import env_settings from ltr.data.image_loader import jpeg4py_loader class Davis(VOSDatasetBase): """ The Davis VOS dataset Publication: A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung CVPR, 2016 http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Perazzi_A_Benchmark_Dataset_CVPR_2016_paper.pdf Download the dataset from https://davischallenge.org/davis2017/code.html """ def __init__(self, root=None, sequences=None, version='2017', split='train', multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader): """ args: root - Dataset root path. If unset, it uses the path in your local.py config. sequences - List of sequence names. Limit to a subset of sequences if not None. version - '2016' or '2017 split - Any name in DAVIS/ImageSets/ multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one object in each. vis_threshold - Minimum number of pixels required to consider a target object "visible". image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ if version == '2017': if split in ['train', 'val']: root = env_settings().davis_dir if root is None else root elif split in ['test-dev']: root = env_settings().davis_testdev_dir if root is None else root else: raise Exception('Unknown split {}'.format(split)) else: root = env_settings().davis16_dir if root is None else root super().__init__(name='DAVIS', root=Path(root), version=version, split=split, multiobj=multiobj, vis_threshold=vis_threshold, image_loader=image_loader) dset_path = self.root self._jpeg_path = dset_path / 'JPEGImages' / '480p' self._anno_path = dset_path / 'Annotations' / '480p' meta_path = dset_path / "generated_meta.json" if meta_path.exists(): self.gmeta = VOSMeta(filename=meta_path) else: self.gmeta = VOSMeta.generate('DAVIS', self._jpeg_path, self._anno_path) self.gmeta.save(meta_path) if sequences is None: if self.split != 'all': fname = dset_path / 'ImageSets' / self.version / (self.split + '.txt') sequences = open(fname).read().splitlines() else: sequences = [p for p in sorted(self._jpeg_path.glob("*")) if p.is_dir()] self.sequence_names = sequences self._samples = [] for seq in sequences: obj_ids = self.gmeta.get_obj_ids(seq) if self.multiobj: # Multiple objects per sample self._samples.append((seq, obj_ids)) else: # One object per sample self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids]) print("%s loaded." % self.get_name()) def _construct_sequence(self, sequence_info): seq_name = sequence_info['sequence'] images, gt_labels, gt_bboxes = self.get_paths_and_bboxes(sequence_info) return Sequence(name=seq_name, frames=images, dataset='DAVIS', ground_truth_rect=gt_bboxes, ground_truth_seg=gt_labels, object_ids=sequence_info['object_ids'], multiobj_mode=self.multiobj) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/ecssd.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed import torch from collections import OrderedDict from ltr.admin.environment import env_settings from ltr.data.bounding_box_utils import masks_to_bboxes class ECSSD(BaseImageDataset): """ Extended Complex Scene Saliency Dataset (ECSSD) Publication: Hierarchical Image Saliency Detection on Extended CSSD Jianping Shi, Qiong Yan, Li Xu, Jiaya Jia TPAMI, 2016 https://arxiv.org/pdf/1408.5418.pdf Download the dataset from http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None): """ args: root - path to ECSSD root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 """ root = env_settings().ecssd_dir if root is None else root super().__init__('ECSSD', root, image_loader) self.image_list = self._load_dataset(min_area=min_area) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, min_area=None): images = [] for i in range(1, 1001): a = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(i))) if min_area is None or (a > 0).sum() > min_area: images.append(i) return images def get_name(self): return 'ecssd' def has_segmentation_info(self): return True def get_image_info(self, im_id): mask = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(self.image_list[im_id]))) mask = torch.Tensor(mask == 255) bbox = masks_to_bboxes(mask, fmt='t').view(4,) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): frame = self.image_loader(os.path.join(self.root, 'images', '{:04d}.jpg'.format(self.image_list[image_id]))) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/got10k.py ================================================ import os import os.path import numpy as np import torch import csv import pandas import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader from ltr.admin.environment import env_settings class Got10k(BaseVideoDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None): """ args: root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split, not NOT the official got-10k validation split. To use the official validation split, provide that as the root folder instead. seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids' options can be used at the same time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().got10k_dir if root is None else root super().__init__('GOT10k', root, image_loader) # all folders inside the root self.sequence_list = self._get_sequence_list() # seq_id is the index of the folder inside the got10k root path if split is not None: if seq_ids is not None: raise ValueError('Cannot set both split_name and seq_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt') elif split == 'val': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt') elif split == 'vottrain': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt') elif split == 'votval': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt') else: raise ValueError('Unknown split name.') seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist() elif seq_ids is None: seq_ids = list(range(0, len(self.sequence_list))) self.sequence_list = [self.sequence_list[i] for i in seq_ids] if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.sequence_meta_info = self._load_meta_info() self.seq_per_class = self._build_seq_per_class() self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def get_name(self): return 'got10k' def has_class_info(self): return True def has_occlusion_info(self): return True def _load_meta_info(self): sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list} return sequence_meta_info def _read_meta(self, seq_path): try: with open(os.path.join(seq_path, 'meta_info.ini')) as f: meta_info = f.readlines() object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1], 'motion_class': meta_info[6].split(': ')[-1][:-1], 'major_class': meta_info[7].split(': ')[-1][:-1], 'root_class': meta_info[8].split(': ')[-1][:-1], 'motion_adverb': meta_info[9].split(': ')[-1][:-1]}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def _build_seq_per_class(self): seq_per_class = {} for i, s in enumerate(self.sequence_list): object_class = self.sequence_meta_info[s]['object_class_name'] if object_class in seq_per_class: seq_per_class[object_class].append(i) else: seq_per_class[object_class] = [i] return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _get_sequence_list(self): with open(os.path.join(self.root, 'list.txt')) as f: dir_list = list(csv.reader(f)) dir_list = [dir_name[0] for dir_name in dir_list] return dir_list def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "absence.label") cover_file = os.path.join(seq_path, "cover.label") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) with open(cover_file, 'r', newline='') as f: cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) target_visible = ~occlusion & (cover>0).byte() visible_ratio = cover.float() / 8 return target_visible, visible_ratio def _get_sequence_path(self, seq_id): return os.path.join(self.root, self.sequence_list[seq_id]) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible, visible_ratio = self._read_target_visible(seq_path) visible = visible & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def get_class_name(self, seq_id): obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] return obj_meta['object_class_name'] def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return frame_list, anno_frames, obj_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/hku_is.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed import torch from collections import OrderedDict from ltr.admin.environment import env_settings from ltr.data.bounding_box_utils import masks_to_bboxes class HKUIS(BaseImageDataset): """ HKU-IS salient object detection dataset Publication: Visual saliency based on multiscale deep features Guanbin Li and Yizhou Yu CVPR, 2015 https://arxiv.org/pdf/1503.08663.pdf Dowload dataset from https://sites.google.com/site/ligb86/hkuis """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None): """ args: root - path to HKU-IS root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 """ root = env_settings().hkuis_dir if root is None else root super().__init__('HKUIS', root, image_loader) self.image_list, self.anno_list = self._load_dataset(min_area=min_area) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, min_area=None): files_list = os.listdir(os.path.join(self.root, 'imgs')) image_list = [f[:-4] for f in files_list] images = [] annos = [] for f in image_list: a = imread_indexed(os.path.join(self.root, 'gt', '{}.png'.format(f))) if min_area is None or (a > 0).sum() > min_area: im = opencv_loader(os.path.join(self.root, 'imgs', '{}.png'.format(f))) images.append(im) annos.append(a) return images, annos def get_name(self): return 'hku-is' def has_segmentation_info(self): return True def get_image_info(self, im_id): mask = self.anno_list[im_id] mask = torch.Tensor(mask == 255) bbox = masks_to_bboxes(mask, fmt='t').view(4,) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): frame = self.image_list[image_id] if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/imagenetvid.py ================================================ import os from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import default_image_loader import xml.etree.ElementTree as ET import json import torch import random from collections import OrderedDict from ltr.admin.environment import env_settings def get_target_to_image_ratio(seq): anno = torch.Tensor(seq['anno']) img_sz = torch.Tensor(seq['image_size']) return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt() class ImagenetVID(BaseVideoDataset): """ Imagenet VID dataset. Publication: ImageNet Large Scale Visual Recognition Challenge Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei IJCV, 2015 https://arxiv.org/pdf/1409.0575.pdf Download the dataset from http://image-net.org/ """ def __init__(self, root=None, image_loader=default_image_loader, min_length=0, max_target_area=1): """ args: root - path to the imagenet vid dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. min_length - Minimum allowed sequence length. max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets which cover complete image. """ root = env_settings().imagenet_dir if root is None else root super().__init__(root, image_loader) cache_file = os.path.join(root, 'cache.json') if os.path.isfile(cache_file): # If available, load the pre-processed cache file containing meta-info for each sequence with open(cache_file, 'r') as f: sequence_list_dict = json.load(f) self.sequence_list = sequence_list_dict else: # Else process the imagenet annotations and generate the cache file self.sequence_list = self._process_anno(root) with open(cache_file, 'w') as f: json.dump(self.sequence_list, f) # Filter the sequences based on min_length and max_target_area in the first frame self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and get_target_to_image_ratio(x) < max_target_area] def get_name(self): return 'imagenetvid' def get_num_sequences(self): return len(self.sequence_list) def get_sequence_info(self, seq_id): bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno']) valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0) visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte() return {'bbox': bb_anno, 'valid': valid, 'visible': visible} def _get_frame(self, sequence, frame_id): set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id']) vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id']) frame_number = frame_id + sequence['start_frame'] frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name, '{:06d}.JPEG'.format(frame_number)) return self.image_loader(frame_path) def get_frames(self, seq_id, frame_ids, anno=None): sequence = self.sequence_list[seq_id] frame_list = [self._get_frame(sequence, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) # Create anno dict anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] # added the class info to the meta info object_meta = OrderedDict({'object_class': sequence['class_name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta def _process_anno(self, root): # Builds individual tracklets base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train') all_sequences = [] for set in sorted(os.listdir(base_vid_anno_path)): set_id = int(set.split('_')[-1]) for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))): vid_id = int(vid.split('_')[-1]) anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid))) frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0])) image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)] objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object') for f in anno_files] tracklets = {} # Find all tracklets along with start frame for f_id, all_targets in enumerate(objects): for target in all_targets: tracklet_id = target.find('trackid').text if tracklet_id not in tracklets: tracklets[tracklet_id] = f_id for tracklet_id, tracklet_start in tracklets.items(): tracklet_anno = [] target_visible = [] class_name_id = None for f_id in range(tracklet_start, len(objects)): found = False for target in objects[f_id]: if target.find('trackid').text == tracklet_id: if not class_name_id: class_name_id = target.find('name').text x1 = int(target.find('bndbox/xmin').text) y1 = int(target.find('bndbox/ymin').text) x2 = int(target.find('bndbox/xmax').text) y2 = int(target.find('bndbox/ymax').text) tracklet_anno.append([x1, y1, x2 - x1, y2 - y1]) target_visible.append(target.find('occluded').text == '0') found = True break if not found: break new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id, 'start_frame': tracklet_start, 'anno': tracklet_anno, 'target_visible': target_visible, 'image_size': image_size} all_sequences.append(new_sequence) return all_sequences ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/lasot.py ================================================ import os import os.path import torch import numpy as np import pandas import csv import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader from ltr.admin.environment import env_settings class Lasot(BaseVideoDataset): """ LaSOT dataset. Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): """ args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().lasot_dir if root is None else root super().__init__('LaSOT', root, image_loader) # Keep a list of all classes self.class_list = [f for f in os.listdir(self.root)] self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)} self.sequence_list = self._build_sequence_list(vid_ids, split) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_class_list() def _build_sequence_list(self, vid_ids=None, split=None): if split is not None: if vid_ids is not None: raise ValueError('Cannot set both split_name and vid_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt') else: raise ValueError('Unknown split name.') sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist() elif vid_ids is not None: sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids] else: raise ValueError('Set either split_name or vid_ids.') return sequence_list def _build_class_list(self): seq_per_class = {} for seq_id, seq_name in enumerate(self.sequence_list): class_name = seq_name.split('-')[0] if class_name in seq_per_class: seq_per_class[class_name].append(seq_id) else: seq_per_class[class_name] = [seq_id] return seq_per_class def get_name(self): return 'lasot' def has_class_info(self): return True def has_occlusion_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "full_occlusion.txt") out_of_view_file = os.path.join(seq_path, "out_of_view.txt") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) with open(out_of_view_file, 'r') as f: out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) target_visible = ~occlusion & ~out_of_view return target_visible def _get_sequence_path(self, seq_id): seq_name = self.sequence_list[seq_id] class_name = seq_name.split('-')[0] vid_id = seq_name.split('-')[1] return os.path.join(self.root, class_name, class_name + '-' + vid_id) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = self._read_target_visible(seq_path) & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def _get_class(self, seq_path): raw_class = seq_path.split('/')[-2] return raw_class def get_class_name(self, seq_id): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/lvis.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader_w_failsafe import torch import random import lvis.lvis as lvis_pk from collections import OrderedDict from ltr.admin.environment import env_settings class LVIS(BaseImageDataset): """ The LVIS object detection dataset Publication: LVIS: A Dataset for Large Vocabulary Instance Segmentation Agrim Gupta, Piotr Dollár, and Ross Girshick CVPR, 2019 https://arxiv.org/pdf/1908.03195.pdf Download the images along with annotations from https://www.lvisdataset.org/dataset. The root folder should be organized as follows. - lvis_root - annotations - lvis_v0.5_train.json - lvis_v0.5_val.json - images - val2017 - train2017 Note: You also have to install the lvis Python API from https://github.com/lvis-dataset/lvis-api """ def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, min_area=None, split="train"): """ args: root - path to lvis root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 split - 'train' or 'val'. """ root = env_settings().lvis_dir if root is None else root super().__init__('LVIS', root, image_loader) self.img_pth = os.path.join(root, 'images', f'{split}2017/') self.anno_path = os.path.join(root, 'annotations', f'lvis_v0.5_{split}.json') # Load the LVIS set. self.lvis_set = lvis_pk.LVIS(self.anno_path) self.cats = self.lvis_set.cats self.class_list = self.get_class_list() # the parent class thing would happen in the sampler self.image_list = self._get_image_list(min_area=min_area) if data_fraction is not None: self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction)) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): im_list = list(self.lvis_set.anns.keys()) # No 'iscrowd' information in LVIS if min_area is not None: im_list = [s for s in im_list if self.lvis_set.anns[s]['area'] > min_area] return im_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'lvis' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def _build_im_per_class(self): im_per_class = {} for i, im in enumerate(self.image_list): class_name = self.cats[self.lvis_set.anns[im]['category_id']]['name'] if class_name not in im_per_class: im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4,) mask = torch.Tensor(self.lvis_set.ann_to_mask(anno)) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.lvis_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.lvis_set.load_imgs([self.lvis_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': None, # No 'supercategory' information available in LVIS 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/msra10k.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader, imread_indexed import torch from collections import OrderedDict from ltr.admin.environment import env_settings from ltr.data.bounding_box_utils import masks_to_bboxes class MSRA10k(BaseImageDataset): """ MSRA10k salient object detection dataset Publication: Global contrast based salient region detection Ming-Ming Cheng, Niloy J. Mitra, Xiaolei Huang, Philip H. S. Torr, and Shi-Min Hu TPAMI, 2015 https://mmcheng.net/mftp/Papers/SaliencyTPAMI.pdf Download dataset from https://mmcheng.net/msra10k/ """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None): """ args: root - path to MSRA10k root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 """ root = env_settings().msra10k_dir if root is None else root super().__init__('MSRA10k', root, image_loader) self.image_list = self._load_dataset(min_area=min_area) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, min_area=None): files_list = os.listdir(os.path.join(self.root, 'Imgs')) image_list = [f[:-4] for f in files_list if f[-3:] == 'jpg'] images = [] for f in image_list: a = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(f))) if min_area is None or (a > 0).sum() > min_area: images.append(f) return images def get_name(self): return 'msra10k' def has_segmentation_info(self): return True def get_image_info(self, im_id): mask = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(self.image_list[im_id]))) mask = torch.Tensor(mask == 255) bbox = masks_to_bboxes(mask, fmt='t').view(4,) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): frame = self.image_loader(os.path.join(self.root, 'Imgs', '{}.jpg'.format(self.image_list[image_id]))) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/sbd.py ================================================ from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader_w_failsafe import torch from collections import OrderedDict import os from scipy.io import loadmat from ltr.data.bounding_box_utils import masks_to_bboxes from ltr.admin.environment import env_settings class SBD(BaseImageDataset): """ Semantic Boundaries Dataset and Benchmark (SBD) Publication: Semantic contours from inverse detectors Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji and Jitendra Malik ICCV, 2011 http://home.bharathh.info/pubs/pdfs/BharathICCV2011.pdf Download dataset from: http://home.bharathh.info/pubs/codes/SBD/download.html """ def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, split="train"): """ args: root - path to SBD root folder image_loader - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default split - dataset split ("train", "train_noval", "val") """ root = env_settings().sbd_dir if root is None else root super().__init__('SBD', root, image_loader) assert split in ["train", "train_noval", "val"] self.root = root self.image_path_list, self.anno_file_list = self._load_dataset(split) # Load mat fine anno_list = [loadmat(a) for a in self.anno_file_list] self.image_list = self._construct_image_list(anno_list) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, split): split_f = os.path.join(self.root, split.rstrip('\n') + '.txt') with open(os.path.join(split_f), "r") as f: file_names = [x.strip() for x in f.readlines()] image_list = [os.path.join(self.root, 'img', x + ".jpg") for x in file_names] anno_list = [os.path.join(self.root, 'inst', x + ".mat") for x in file_names] assert (len(image_list) == len(anno_list)) return image_list, anno_list def _get_mask_from_mat(self, mat): return torch.tensor(mat['GTinst'][0]['Segmentation'][0]) def _construct_image_list(self, anno_list): image_list = [] for im_id, a in enumerate(anno_list): mask = self._get_mask_from_mat(a) for instance_id in range(1, mask.max().item() + 1): image_list.append((im_id, instance_id)) return image_list def get_name(self): return 'sbd' def has_segmentation_info(self): return True def get_image_info(self, im_id): image_id, instance_id = self.image_list[im_id] anno_mat = loadmat(self.anno_file_list[image_id]) mask = self._get_mask_from_mat(anno_mat) mask = (mask == instance_id).float() bbox = masks_to_bboxes(mask, fmt='t') valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_image(self, im_id): image_id, _ = self.image_list[im_id] img = self.image_loader(self.image_path_list[image_id]) return img def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): image = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return image, anno, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/synthetic_video.py ================================================ from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.bounding_box_utils import masks_to_bboxes class SyntheticVideo(BaseVideoDataset): """ Create a synthetic video dataset from an image dataset by applying a random transformation to images. """ def __init__(self, base_image_dataset, transform=None): """ args: base_image_dataset - Image dataset used for generating synthetic videos transform - Set of transforms to be applied to the images to generate synthetic video. """ super().__init__(base_image_dataset.get_name() + '_syn_vid', base_image_dataset.root, base_image_dataset.image_loader) self.base_image_dataset = base_image_dataset self.transform = transform def get_name(self): return self.name def is_video_sequence(self): return False def has_class_info(self): return self.base_image_dataset.has_class_info() def has_occlusion_info(self): return True def get_num_sequences(self): return self.base_image_dataset.get_num_images() def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.get_images_in_class[class_name] def get_sequence_info(self, seq_id): image_info = self.base_image_dataset.get_image_info(seq_id) image_info = {k: v.unsqueeze(0) for k, v in image_info.items()} return image_info def get_class_name(self, seq_id): return self.base_image_dataset.get_class_name(seq_id) def get_frames(self, seq_id, frame_ids, anno=None): frame, anno, object_meta = self.base_image_dataset.get_image(seq_id, anno=anno) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0].clone() for f_id in frame_ids] if self.transform is not None: if 'mask' in anno_frames.keys(): frame_list, anno_frames['bbox'], anno_frames['mask'] = self.transform(image=frame_list, bbox=anno_frames['bbox'], mask=anno_frames['mask'], joint=False) anno_frames['bbox'] = [masks_to_bboxes(m, fmt='t') for m in anno_frames['mask']] else: frame_list, anno_frames['bbox'] = self.transform(image=frame_list, bbox=anno_frames['bbox'], joint=False) object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id), 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/synthetic_video_blend.py ================================================ from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.bounding_box_utils import masks_to_bboxes import random import torch class SyntheticVideoBlend(BaseVideoDataset): """ Create a synthetic video by applying random transformations to an object (foreground) and pasting it in a background image. Currently, the foreground object is pasted at random locations in different frames. """ def __init__(self, foreground_image_dataset, background_image_dataset, foreground_transform=None, background_transform=None): """ args: foreground_image_dataset - A segmentation dataset from which foreground objects are cropped using the segmentation mask background_image_dataset - Dataset used to sample background image for the synthetic video foreground_transform - Random transformations to be applied to the foreground object in every frame background_transform - Random transformations to be applied to the background image in every frame """ assert foreground_image_dataset.has_segmentation_info() super().__init__(foreground_image_dataset.get_name() + '_syn_vid_blend', foreground_image_dataset.root, foreground_image_dataset.image_loader) self.foreground_image_dataset = foreground_image_dataset self.background_image_dataset = background_image_dataset self.foreground_transform = foreground_transform self.background_transform = background_transform def get_name(self): return self.name def is_video_sequence(self): return False def has_class_info(self): return self.foreground_image_dataset.has_class_info() def has_occlusion_info(self): return True def get_num_sequences(self): return self.foreground_image_dataset.get_num_images() def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.get_images_in_class[class_name] def get_sequence_info(self, seq_id): image_info = self.foreground_image_dataset.get_image_info(seq_id) image_info = {k: v.unsqueeze(0) for k, v in image_info.items()} return image_info def get_class_name(self, seq_id): return self.foreground_image_dataset.get_class_name(seq_id) def _paste_target(self, fg_image, fg_box, fg_mask, bg_image, paste_loc): fg_mask = fg_mask.view(fg_mask.shape[0], fg_mask.shape[1], 1) fg_box = fg_box.long().tolist() x1 = int(paste_loc[0] - 0.5 * fg_box[2]) x2 = x1 + fg_box[2] y1 = int(paste_loc[1] - 0.5 * fg_box[3]) y2 = y1 + fg_box[3] x1_pad = max(-x1, 0) y1_pad = max(-y1, 0) x2_pad = max(x2 - bg_image.shape[1], 0) y2_pad = max(y2 - bg_image.shape[0], 0) bg_mask = torch.zeros((bg_image.shape[0], bg_image.shape[1], 1), dtype=fg_mask.dtype, device=fg_mask.device) if x1_pad >= fg_mask.shape[1] or x2_pad >= fg_mask.shape[1] or y1_pad >= fg_mask.shape[0] or y2_pad >= \ fg_mask.shape[0]: return bg_image, bg_mask.squeeze(-1) fg_mask_patch = fg_mask[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad, fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :] fg_image_patch = fg_image[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad, fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :] bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = \ bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] * (1 - fg_mask_patch.numpy()) \ + fg_mask_patch.numpy() * fg_image_patch bg_mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = fg_mask_patch return bg_image, bg_mask.squeeze(-1) def get_frames(self, seq_id, frame_ids, anno=None): # Handle foreground fg_frame, fg_anno, fg_object_meta = self.foreground_image_dataset.get_image(seq_id, anno=anno) fg_frame_list = [fg_frame.copy() for _ in frame_ids] fg_anno_frames = {} for key, value in fg_anno.items(): fg_anno_frames[key] = [value[0].clone() for f_id in frame_ids] if self.foreground_transform is not None: fg_frame_list, fg_anno_frames['bbox'], fg_anno_frames['mask'] = self.foreground_transform( image=fg_frame_list, bbox=fg_anno_frames['bbox'], mask=fg_anno_frames['mask'], joint=False) # Sample a random background bg_seq_id = random.randint(0, self.background_image_dataset.get_num_images() - 1) bg_frame, bg_anno, _ = self.background_image_dataset.get_image(bg_seq_id) bg_frame_list = [bg_frame.copy() for _ in frame_ids] bg_anno_frames = {} for key, value in bg_anno.items(): # Note: Since we get bg anno from image dataset, it does not has frame dimension bg_anno_frames[key] = [value.clone() for f_id in frame_ids] if self.background_transform is not None: if 'mask' in bg_anno_frames.keys(): bg_frame_list, bg_anno_frames['bbox'], bg_anno_frames['mask'] = self.background_transform( image=bg_frame_list, bbox=bg_anno_frames['bbox'], mask=bg_anno_frames['mask'], joint=False) else: bg_frame_list, bg_anno_frames['bbox'] = self.background_transform( image=bg_frame_list, bbox=bg_anno_frames['bbox'], joint=False) for i in range(len(frame_ids)): # To be safe, get target bb for the mask bbox = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t') loc_y = random.randint(0, bg_frame_list[i].shape[0] - 1) loc_x = random.randint(0, bg_frame_list[i].shape[1] - 1) paste_loc = (loc_x, loc_y) fg_frame_list[i], fg_anno_frames['mask'][i] = self._paste_target(fg_frame_list[i], bbox, fg_anno_frames['mask'][i], bg_frame_list[i], paste_loc) fg_anno_frames['bbox'][i] = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t') object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id), 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return fg_frame_list, fg_anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/tracking_net.py ================================================ import torch import os import os.path import numpy as np import pandas import random from collections import OrderedDict from ltr.data.image_loader import jpeg4py_loader from .base_video_dataset import BaseVideoDataset from ltr.admin.environment import env_settings def list_sequences(root, set_ids): """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name) args: root: Root directory to TrackingNet set_ids: Sets (0-11) which are to be used returns: list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence """ sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, "TRAIN_" + str(s), "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list class TrackingNet(BaseVideoDataset): """ TrackingNet dataset. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None): """ args: root - The path to the TrackingNet folder, containing the training sets. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the sets (0 - 11) will be used. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().trackingnet_dir if root is None else root super().__init__('TrackingNet', root, image_loader) if set_ids is None: set_ids = [i for i in range(12)] self.set_ids = set_ids # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and # video_name for each sequence self.sequence_list = list_sequences(self.root, self.set_ids) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction)) self.seq_to_class_map, self.seq_per_class = self._load_class_info() # we do not have the class_lists for the tracking net self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def _load_class_info(self): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt') with open(class_map_path, 'r') as f: seq_to_class_map = {seq_class.split('\t')[0]: seq_class.rstrip().split('\t')[1] for seq_class in f} seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = seq_to_class_map[seq[1]] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_to_class_map, seq_per_class def get_name(self): return 'trackingnet' def has_class_info(self): return True def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] bb_anno_file = os.path.join(self.root, "TRAIN_" + str(set_id), "anno", vid_name + ".txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def get_sequence_info(self, seq_id): bbox = self._read_bb_anno(seq_id) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame(self, seq_id, frame_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] frame_path = os.path.join(self.root, "TRAIN_" + str(set_id), "frames", vid_name, str(frame_id) + ".jpg") return self.image_loader(frame_path) def _get_class(self, seq_id): seq_name = self.sequence_list[seq_id][1] return self.seq_to_class_map[seq_name] def get_class_name(self, seq_id): obj_class = self._get_class(seq_id) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): frame_list = [self._get_frame(seq_id, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] obj_class = self._get_class(seq_id) object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/vos_base.py ================================================ import torch from pathlib import Path from collections import OrderedDict, defaultdict import json import numpy as np import os from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader, imread_indexed from ltr.data.bounding_box_utils import masks_to_bboxes class VOSMeta: def __init__(self, data=None, filename=None): if filename is not None: self.load(filename) elif data is not None: self._data = data else: raise ValueError("Must set either data or filename parameter") def save(self, gen_meta: Path): gen_meta.parent.mkdir(exist_ok=True, parents=True) json.dump(self._data, open(gen_meta, "w")) def load(self, gen_meta: Path): if not gen_meta.exists(): print("Generated metadata file %s is not found." % gen_meta) print("Find and run VOSMeta.generate() to create it.") raise FileNotFoundError(gen_meta) self._data = json.load(open(gen_meta), object_pairs_hook=OrderedDict) @classmethod def generate(cls, dset_name: str, dset_images_path: Path, dset_annos_path: Path): """ Count the annotation mask pixels per object, per frame, in all sequences in a dataset :param dset_name: Dataset name, for printing the progress bar. :param dset_annos_path: Path to annotations directory, containing sequence directories, with annotation frames in them. :return: Dataset meta dict: {'sequence0': { 'shape': (height, width) 'obj_sizes': # Object pixels per frame {'frame0': {'object0': px_count, 'object1': px_count, ...}, 'frame1': {'object0': px_count, 'object1': px_count, ...}, ... }, 'bboxes': # Bounding boxes per frame {'frame0': {'object0': bbox, 'object1': bbox, ...}, 'frame1': {'object0': bbox, 'object1': bbox, ...}, ... }, ... } """ assert(dset_annos_path.exists()) dset_meta = OrderedDict() sequences = [p.stem for p in sorted(dset_annos_path.glob("*")) if p.is_dir()] try: from tqdm import tqdm except: def tqdm(x, *args, **kwargs): return x for seq in tqdm(sequences, desc=dset_name, unit="seq"): obj_sizes2 = defaultdict(OrderedDict) bboxes = defaultdict(OrderedDict) shape = None frame_names = [file.stem for file in sorted((dset_images_path / seq).glob("*.jpg"))] anno_paths = list(sorted((dset_annos_path / seq).glob("*.png"))) # Extract information from the given label frames for path in anno_paths: f_id = path.stem # Count label-pixels per frame labels = imread_indexed(path) # labels = np.array(Image.open(path)) obj_ids, obj_sizes = np.unique(labels, return_counts=True) obj_ids = [str(oid) for oid in obj_ids] obj_sizes = obj_sizes.tolist() if '0' in obj_ids: # Remove background id obj_ids = obj_ids[1:] obj_sizes = obj_sizes[1:] obj_sizes2[f_id] = OrderedDict(zip(obj_ids, obj_sizes)) # Generate per-label bounding boxes for obj_id in obj_ids: bboxes[f_id][obj_id] = cls._mask_to_bbox(labels == int(obj_id)) if shape is None: shape = labels.shape[:2] # Format result dset_meta[seq] = dict(shape=shape, obj_sizes=obj_sizes2, bboxes=bboxes, frame_names=frame_names) return VOSMeta(dset_meta) @staticmethod def _mask_to_bbox(mask: np.ndarray): mask = mask.astype(int) xs = mask.sum(axis=-2).nonzero()[0].tolist() ys = mask.sum(axis=-1).nonzero()[0].tolist() if len(ys) > 0 and len(xs) > 0: x, y, w, h = xs[0], ys[0], xs[-1] - xs[0], ys[-1] - ys[0] else: x, y, w, h = 0, 0, 0, 0 return [x, y, w, h] @staticmethod def _transpose_nested_dict(d): """ Permute a 2-level nested dict such that the inner and outer keys swap places. """ d2 = defaultdict(OrderedDict) for key1, inner in d.items(): for key2, value in inner.items(): d2[key2][key1] = value return d2 def select_split(self, dataset_name, split): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') sequences = set([s.strip() for s in open(os.path.join(ltr_path, 'data_specs', dataset_name + '_' + split + '.txt')).readlines()]) all_sequences = set(self._data.keys()) to_remove = all_sequences.difference(sequences) for seq_name in to_remove: self._data.pop(seq_name) def get_sequence_names(self): return list(self._data.keys()) def get_shape(self, seq_name): """ Sequence image shape (h,w) """ h, w = self._data[seq_name]['shape'] return h, w def get_obj_ids(self, seq_name): """ All objects in the sequence """ return list(self.get_obj_sizes_per_object(seq_name).keys()) def get_frame_names(self, seq_name): """ All filename stems of the frames in the sequence """ return self._data[seq_name]['frame_names'] def enable_all_frames(self, dset_images_path): """ For YouTubeVOS: Update the frame names with (jpeg) files from the _all_frames set :param dset_images_path: /path/to/train_all_frames/JPEGImages (or valid or test) :param seq: Sequence name :return: """ # Try load the cached index idx_file = dset_images_path.parent / "frame_names.json" if idx_file.exists(): print('Loading cached frame names from %s' % idx_file) all_frame_names = json.load(open(idx_file)) else: # Cache the data to the user's home directory (guaranteed to be writable) all_frame_names = dict() user_idx_file = Path.home() / (dset_images_path.parent.stem + "_frame_names.json") print('Indexing YouTubeVOS "all_frames" frame names to %s' % user_idx_file) for seq in self._data: all_frame_names[seq] = [file.stem for file in sorted((dset_images_path / seq).glob("*.jpg"))] json.dump(all_frame_names, open(user_idx_file, "w")) print('Done. Move %s to %s to load faster next time.' % (user_idx_file, idx_file)) for seq, frame_names in all_frame_names.items(): self._data[seq]['frame_names'] = frame_names def get_aspect_ratio(self, seq_name): """ Sequence aspect ratio """ h, w = self._data[seq_name]['shape'] return w / h def get_obj_sizes_per_frame(self, seq_name): """ Get object pixel counts, grouped by frame names """ return self._data[seq_name]['obj_sizes'] def get_bboxes_per_frame(self, seq_name): """ Object bounding boxes, grouped by frame names """ return self._data[seq_name]['bboxes'] def get_obj_sizes_per_object(self, seq_name): """ Object pixel counts, grouped by object """ return self._transpose_nested_dict(self.get_obj_sizes_per_frame(seq_name)) def get_bboxes_per_object(self, seq_name): """ Object bounding boxes, grouped by object """ return self._transpose_nested_dict(self.get_bboxes_per_frame(seq_name)) @staticmethod def generate_datasets_meta(src, dst=Path("~/vosdataset_meta").expanduser()): VOSMeta.generate("SyntheticCoco", src / "JPEGImages", src / "Annotations").save(src / "generated_meta.json") class VOSDatasetBase(BaseVideoDataset): """ Generic VOS dataset reader base class, for both DAVIS and YouTubeVOS """ def __init__(self, name: str, root: Path, version=None, split='train', multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader): """ :param root: Dataset root path, eg /path/to/DAVIS or /path/to/YouTubeVOS/ Note: YouTubeVOS 2018 and 2019 are expected to be in /path/to/YouTubeVOS/2018 and /path/to/YouTubeVOS/2019, respectively :param name: 'DAVIS' or 'YouTubeVOS' (case sensitive) :param version: DAVIS: '2016', '2017, YouTubeVOS: '2018' or '2019' :param split: DAVIS: Any name in DAVIS/ImageSets/, YouTubeVOS: 'test', 'train', 'valid' or 'jjtrain', 'jjvalid' :param multiobj: Whether the dataset will return all objects in a sequence or multiple sequences with one object in each. :param vis_threshold: Minimum number of pixels required to consider a target object "visible". :param image_loader: Image loader. """ assert root.exists() and root.is_dir() super().__init__(name, root, image_loader) self.version = version self.split = split self.vis_threshold = vis_threshold self.multiobj = multiobj def _load_image(self, path): im = self.image_loader(str(path)) assert im is not None im = np.atleast_3d(im) return im @staticmethod def _load_anno(path): if not path.exists(): return None # im = np.atleast_3d(np.array(Image.open(path))) im = imread_indexed(path) return im def get_num_sequences(self): return len(self._samples) def get_sequence_info(self, sample_id): """ Get sample meta data. :param sample_id: Sample to query. :return: dict of metadata: sequence: Sequence name frame_shape: (height, width) of the images frame_names: List of frame filename stems in the sequence object_ids: Id numbers of all objects occurring in the sequence obj_sizes: Matrix shape=(frames, object) of the number of pixels for each object in each frame Coordinates in this matrix relate to the frame_names and object_ids visible: Boolean matrix of the same shape as obj_sizes. Entries with more pixels than self.visible_threshold are True. """ m = self.gmeta seq_name, obj_ids = self._samples[sample_id] f_names = m.get_frame_names(seq_name) # All frames f2i = {f: i for i, f in enumerate(f_names)} # Frame name to matrix index o2i = {o: i for i, o in enumerate(obj_ids)} # Object id to matrix index # Get a matrix of object sizes: shape=(frames, objects) obj_sizes = torch.zeros((len(f_names), len(obj_ids)), dtype=torch.int) sizes_per_object = m.get_obj_sizes_per_object(seq_name) for obj_id in obj_ids: frames = sizes_per_object[obj_id] oid = o2i[obj_id] for f, sz in frames.items(): obj_sizes[f2i[f], oid] = sz visible = (obj_sizes > self.vis_threshold).byte() return dict(sequence=seq_name, frame_shape=m.get_shape(seq_name), frame_names=f_names, object_ids=obj_ids, object_sizes=obj_sizes, visible=visible, valid=visible) def get_paths_and_bboxes(self, sequence_info): seq_name = sequence_info['sequence'] annos_root = self._anno_path / seq_name images_root = self._jpeg_path / seq_name frame_names = sequence_info['frame_names'] f2i = {f: i for i, f in enumerate(frame_names)} images = [str(images_root / (f + ".jpg")) for f in frame_names] # Find the frames where ground truth is available and # get the bounding boxes and segmentation labels of those frames all_bboxes = self.gmeta.get_bboxes_per_frame(seq_name) gt_labels = [str(annos_root / (f + ".png")) if f in all_bboxes.keys() else None for f in frame_names] gt_bboxes = OrderedDict() for obj_id in sequence_info['object_ids']: gt_bboxes[obj_id] = np.array([all_bboxes.get(frame, {}).get(obj_id, [-1, -1, -1, -1]) for frame in frame_names]) return images, gt_labels, gt_bboxes def _construct_sequence(self, sequence_info): raise NotImplementedError def get_sequence_list(self): if len(self.sequence_list) > 0: return self.sequence_list self.sequence_list = [self._construct_sequence(self.get_sequence_info(i)) for i in range(len(self._samples))] return self.sequence_list def __len__(self): return len(self._samples) def _get_image_path(self, meta, frame_id): return self._jpeg_path / meta['sequence'] / (meta['frame_names'][frame_id] + ".jpg") def _get_anno_path(self, meta, frame_id): return self._anno_path / meta['sequence'] / (meta['frame_names'][frame_id] + ".png") def get_frames(self, sample_id, frame_ids, anno=None): """ Fetch frames with the given ids. :param sample_id: Sample to get. :param frame_ids: List of frame indices in the sequence belonging to the sample_id :return: dict of metadata and data: sequence: Sequence name images: List of images. No entries may be None labels: List of label/mask images. Entries may be None if the data is missing bboxes: List of bounding boxes. Entries may be None if the data is missing """ seq_name, obj_ids = self._samples[sample_id] meta = self.get_sequence_info(sample_id) if anno is None else anno frame_names = meta['frame_names'] images = [self._load_image(self._jpeg_path / seq_name / (frame_names[f] + ".jpg")) for f in frame_ids] labels = [self._load_anno(self._anno_path / seq_name / (frame_names[f] + ".png")) for f in frame_ids] # Generate bounding boxes for the requested objects bboxes = [] for lb in labels: lb = torch.from_numpy(lb.squeeze()) frame_bbs = {} for obj_id in obj_ids: bbox = masks_to_bboxes(lb == int(obj_id), fmt='t') if bbox[3] == 0 or bbox[2] == 0: print("!") frame_bbs[obj_id] = bbox bboxes.append(frame_bbs) # Insert empty bboxes for missing object ids for bbox in bboxes: for obj_id in obj_ids: if obj_id not in bbox: bbox[obj_id] = torch.zeros(4, dtype=torch.float32) # Remap to object id 1, if requested - for training if not self.multiobj: assert len(obj_ids) == 1 obj_id = obj_ids[0] labels = [torch.Tensor(lb == int(obj_id)) for lb in labels] bboxes = [bbox[obj_id] for bbox in bboxes] else: labels = [torch.Tensor(lb) for lb in labels] object_meta = {key: meta[key] for key in ['sequence', 'frame_shape', 'frame_names', 'object_ids']} anno_frames = dict(bbox=bboxes, mask=labels) for key in ['object_sizes', 'visible', 'valid']: value = meta[key] anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return images, anno_frames, object_meta def get_name(self): return "%s/%s/%s" % (self.name, self.version, self.split) def has_class_info(self): return False def has_occlusion_info(self): return True def get_num_classes(self): return 0 def get_class_list(self): return [] def get_sequences_in_class(self, class_name): raise [] def has_segmentation_info(self): return True ================================================ FILE: artrackv2_mindspore/external/AR/ltr/dataset/youtubevos.py ================================================ from pathlib import Path import os from ltr.dataset.vos_base import VOSDatasetBase, VOSMeta from pytracking.evaluation import Sequence import json from ltr.admin.environment import env_settings from ltr.data.image_loader import jpeg4py_loader class YouTubeVOSMeta: """ Thin wrapper for YouTubeVOS meta data meta.json { "videos": { "": { "objects": { "": { "category": "", "frames": [ "", "", ] } } } } } # is the same as the pixel values of object in annotated segmentation PNG files. # is the 5-digit index of frame in video, and not necessary to start from 0. """ def __init__(self, dset_split_path): self._data = json.load(open(dset_split_path / 'meta.json'))['videos'] def sequences(self): return list(self._data.keys()) def seq_frames(self, seq_name): """ All filename stems of the frames in the sequence """ frames = set() for obj_id in self.object_ids(seq_name): for f in self.object_frames(seq_name, obj_id): frames.add(f) return list(sorted(frames)) def object_ids(self, seq_name): """ All objects in the sequence """ return list(self._data[seq_name]['objects'].keys()) def object_category(self, seq_name, obj_id): return self._data[seq_name]['objects'][str(obj_id)]['category'] def object_frames(self, seq_name, obj_id): return self._data[seq_name]['objects'][str(obj_id)]['frames'] def object_first_frame(self, seq_name, obj_id): return self.object_frames(seq_name, obj_id)[0] class YouTubeVOS(VOSDatasetBase): """ YoutubeVOS video object segmentation dataset. Publication: YouTube-VOS: A Large-Scale Video Object Segmentation Benchmark Ning Xu, Linjie Yang, Yuchen Fan, Dingcheng Yue, Yuchen Liang, Jianchao Yang, and Thomas Huang ECCV, 2018 https://arxiv.org/pdf/1809.03327.pdf Download dataset from: https://youtube-vos.org/dataset/ """ def __init__(self, root=None, version='2019', split='train', cleanup=None, all_frames=False, sequences=None, multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader): """ args: root - Dataset root path. If unset, it uses the path in your local.py config. version - '2018' or '2019' split - 'test', 'train', 'valid', or 'jjtrain', 'jjvalid'. 'jjvalid' corresponds to a custom validation dataset consisting of 300 videos randomly sampled from the train set. 'jjtrain' contains the remaining videos used for training. cleanup - List of actions to take to to clean up known problems in the dataset. 'aspects': remove frames with weird aspect ratios, 'starts': fix up start frames from original meta data all_frames - Whether to use an "all_frames" split. sequences - List of sequence names. Limit to a subset of sequences if not None. multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one object in each. vis_threshold - Minimum number of pixels required to consider a target object "visible". image_loader - Image loader. """ root = env_settings().youtubevos_dir if root is None else root super().__init__(name="YouTubeVOS", root=Path(root), version=version, split=split, multiobj=multiobj, vis_threshold=vis_threshold, image_loader=image_loader) split_folder = self.split if self.split.startswith("jj"): split_folder = "train" dset_path = self.root / self.version / split_folder self._anno_path = dset_path / 'Annotations' if all_frames: self._jpeg_path = self.root / self.version / (split_folder + "_all_frames") / 'JPEGImages' else: self._jpeg_path = dset_path / 'JPEGImages' self.meta = YouTubeVOSMeta(dset_path) meta_path = dset_path / "generated_meta.json" if meta_path.exists(): self.gmeta = VOSMeta(filename=meta_path) else: self.gmeta = VOSMeta.generate('YouTubeVOS', self._jpeg_path, self._anno_path) self.gmeta.save(meta_path) if all_frames: self.gmeta.enable_all_frames(self._jpeg_path) if self.split not in ['train', 'valid', 'test']: self.gmeta.select_split('youtubevos', self.split) if sequences is None: sequences = self.gmeta.get_sequence_names() to_remove = set() cleanup = {} if cleanup is None else set(cleanup) if 'aspect' in cleanup: # Remove sequences with unusual aspect ratios for seq_name in sequences: a = self.gmeta.get_aspect_ratio(seq_name) if a < 1.45 or a > 1.9: to_remove.add(seq_name) if 'starts' in cleanup: # Fix incorrect start frames for some objects found with ytvos_start_frames_test() bad_start_frames = [("0e27472bea", '2', ['00055', '00060'], '00065'), ("5937b08d69", '4', ['00000'], '00005'), ("5e1ce354fd", '5', ['00010', '00015'], '00020'), ("7053e4f41e", '2', ['00000', '00005', '00010', '00015'], '00020'), ("720e3fa04c", '2', ['00050'], '00055'), ("c73c8e747f", '2', ['00035'], '00040')] for seq_name, obj_id, bad_frames, good_frame in bad_start_frames: # bad_frames is from meta.json included with the dataset # good_frame is from the generated meta - and the first actual frame where the object was seen. if seq_name in self.meta._data: frames = self.meta.object_frames(seq_name, obj_id) for f in bad_frames: frames.remove(f) assert frames[0] == good_frame sequences = [seq for seq in sequences if seq not in to_remove] self.sequence_names = sequences self._samples = [] for seq in sequences: obj_ids = self.meta.object_ids(seq) if self.multiobj: # Multiple objects per sample self._samples.append((seq, obj_ids)) else: # One object per sample self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids]) print("%s loaded." % self.get_name()) if len(to_remove) > 0: print(" %d sequences were removed, (%d remaining)." % (len(to_remove), len(sequences))) def _construct_sequence(self, sequence_info): seq_name = sequence_info['sequence'] frame_names = sequence_info['frame_names'] fname_to_fid = {f: i for i, f in enumerate(frame_names)} images, gt_segs, gt_bboxes = self.get_paths_and_bboxes(sequence_info) init_data = dict() for obj_id in sequence_info['object_ids']: if obj_id == '0': print("!") f_name = self.meta.object_first_frame(seq_name, obj_id) f_id = fname_to_fid[f_name] if f_id not in init_data: init_data[f_id] = {'object_ids': [obj_id], 'bbox': {obj_id: gt_bboxes[obj_id][f_id,:]}, 'mask': os.path.join(os.path.dirname(gt_segs[f_id]), (f_name + ".png"))} assert init_data[f_id]['mask'] in gt_segs # If this fails, some file is missing else: init_data[f_id]['object_ids'].append(obj_id) init_data[f_id]['bbox'][obj_id] = gt_bboxes[obj_id][f_id,:] return Sequence(name=seq_name, frames=images, dataset='YouTubeVOS', ground_truth_rect=gt_bboxes, init_data=init_data, ground_truth_seg=gt_segs, object_ids=sequence_info['object_ids'], multiobj_mode=self.multiobj) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/.gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .vim-template* # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/LICENSE ================================================ MIT License Copyright (c) 2018 Jiayuan Mao Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/README.md ================================================ # PreciseRoIPooling This repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation). **Acquisition of Localization Confidence for Accurate Object Detection** _Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.) https://arxiv.org/abs/1807.11590 ## Brief In short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is: - different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates. - different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous. For a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.
## Implementation PrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome. ## Usage (PyTorch 1.0) In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented). Since we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` ## Usage (PyTorch 0.4) **!!! Please first checkout to the branch pytorch0.4.** In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented). To use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` Here, - RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor. - `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`. - The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`. ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore ================================================ *.o /_prroi_pooling ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : __init__.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. from .prroi_pool import * ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : functional.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch import torch.autograd as ag __all__ = ['prroi_pool2d'] _prroi_pooling = None def _import_prroi_pooling(): global _prroi_pooling if _prroi_pooling is None: try: from os.path import join as pjoin, dirname from torch.utils.cpp_extension import load as load_extension root_dir = pjoin(dirname(__file__), 'src') _prroi_pooling = load_extension( '_prroi_pooling', [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], verbose=True ) except ImportError: raise ImportError('Can not compile Precise RoI Pooling library.') return _prroi_pooling class PrRoIPool2DFunction(ag.Function): @staticmethod def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): _prroi_pooling = _import_prroi_pooling() assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) pooled_height = int(pooled_height) pooled_width = int(pooled_width) spatial_scale = float(spatial_scale) features = features.contiguous() rois = rois.contiguous() params = (pooled_height, pooled_width, spatial_scale) if features.is_cuda: output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) ctx.params = params # everything here is contiguous. ctx.save_for_backward(features, rois, output) else: raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') return output @staticmethod def backward(ctx, grad_output): _prroi_pooling = _import_prroi_pooling() features, rois, output = ctx.saved_tensors grad_input = grad_coor = None if features.requires_grad: grad_output = grad_output.contiguous() grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) if rois.requires_grad: grad_output = grad_output.contiguous() grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) return grad_input, grad_coor, None, None, None prroi_pool2d = PrRoIPool2DFunction.apply ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : prroi_pool.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch.nn as nn from .functional import prroi_pool2d __all__ = ['PrRoIPool2D'] class PrRoIPool2D(nn.Module): def __init__(self, pooled_height, pooled_width, spatial_scale): super().__init__() self.pooled_height = int(pooled_height) self.pooled_width = int(pooled_width) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) def extra_repr(self): return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c ================================================ /* * File : prroi_pooling_gpu.c * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include #include #include #include #include #include "prroi_pooling_gpu_impl.cuh" at::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) { int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options()); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingForwardGpu( stream, features.data(), rois.data(), output.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count ); THCudaCheck(cudaGetLastError()); return output; } at::Tensor prroi_pooling_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto features_diff = at::zeros_like(features); int nr_rois = rois.size(0); int batch_size = features.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = batch_size * nr_channels * height * width; if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return features_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), features_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); THCudaCheck(cudaGetLastError()); return features_diff; } at::Tensor prroi_pooling_coor_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto coor_diff = at::zeros_like(rois); int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = nr_rois * 5; if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return coor_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingCoorBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), coor_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); THCudaCheck(cudaGetLastError()); return coor_diff; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("prroi_pooling_forward_cuda", &prroi_pooling_forward_cuda, "PRRoIPooling_forward"); m.def("prroi_pooling_backward_cuda", &prroi_pooling_backward_cuda, "PRRoIPooling_backward"); m.def("prroi_pooling_coor_backward_cuda", &prroi_pooling_coor_backward_cuda, "PRRoIPooling_backward_coor"); } ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h ================================================ /* * File : prroi_pooling_gpu.h * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); int prroi_pooling_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scale ); int prroi_pooling_coor_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scal ); ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py ================================================ # -*- coding: utf-8 -*- # File : test_prroi_pooling2d.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 18/02/2018 # # This file is part of Jacinle. import unittest import torch import torch.nn as nn import torch.nn.functional as F from jactorch.utils.unittest import TorchTestCase from prroi_pool import PrRoIPool2D class TestPrRoIPool2D(TorchTestCase): def test_forward(self): pool = PrRoIPool2D(7, 7, spatial_scale=0.5) features = torch.rand((4, 16, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 14, 14], [1, 14, 14, 28, 28], ]).float().cuda() out = pool(features, rois) out_gold = F.avg_pool2d(features, kernel_size=2, stride=1) self.assertTensorClose(out, torch.stack(( out_gold[0, :, :7, :7], out_gold[1, :, 7:14, 7:14], ), dim=0)) def test_backward_shapeonly(self): pool = PrRoIPool2D(2, 2, spatial_scale=0.5) features = torch.rand((4, 2, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 4, 4], [1, 14, 14, 18, 18], ]).float().cuda() features.requires_grad = rois.requires_grad = True out = pool(features, rois) loss = out.sum() loss.backward() self.assertTupleEqual(features.size(), features.grad.size()) self.assertTupleEqual(rois.size(), rois.grad.size()) if __name__ == '__main__': unittest.main() ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/AR_seg_mask/AR_seg_mask.py ================================================ import torch.nn as nn from ltr.models.neck import CorrNL from ltr import model_constructor import torch import ltr.models.backbone.resnet_seg as resnet_seg from ltr.models.head import seg_network from easydict import EasyDict as edict '''2020.4.14 replace mask head with frtm for higher-quality mask''' '''2020.4.22 Only use the mask branch''' class ARnet_seg_mask(nn.Module): """ Scale Estimation network module with three branches: bbox, coner and mask. """ def __init__(self, feature_extractor, neck_module, head_module, used_layers, extractor_grad=True,output_size=(256,256)): """ args: feature_extractor - backbone feature extractor bb_regressor - IoU prediction module bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to bb_regressor extractor_grad - Bool indicating whether backbone feature extractor requires gradients """ super(ARnet_seg_mask, self).__init__() self.feature_extractor = feature_extractor self.neck = neck_module self.refiner = head_module self.used_layers = used_layers self.output_size = output_size if not extractor_grad: for p in self.feature_extractor.parameters(): p.requires_grad_(False) def forward(self, train_imgs, test_imgs, train_bb, mode='train'): """ Forward pass Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col] """ self.forward_ref(train_imgs, train_bb) pred_dict = self.forward_test(test_imgs, mode) return pred_dict def forward_ref(self, train_imgs, train_bb): """ Forward pass of reference branch. size of train_imgs is (1,batch,3,H,W), train_bb is (1,batch,4)""" num_sequences = train_imgs.shape[-4] # batch num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 # 1 # Extract backbone features '''train_feat OrderedDict, key:'layer4' ''' train_feat_dict = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:])) # 输入size是(batch,3,256,256) train_feat_list = [feat for feat in train_feat_dict.values()] #list,其中每个元素对应一层输出的特征(tensor) # get reference feature self.neck.get_ref_kernel(train_feat_list, train_bb.view(num_train_images, num_sequences, 4)) def forward_test(self, test_imgs, mode='train'): """ Forward pass of test branch. size of test_imgs is (1,batch,3,256,256)""" output = {} # Extract backbone features test_feat_dict = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]), layers=['layer1','layer2','layer3','layer4','layer5'])# 输入size是(batch,3,256,256) '''list,tensor''' # Save low-level feature list # Lfeat_list = [feat for name, feat in test_feat_dict.items() if name != 'layer3'] # fuse feature from two branches fusion_feat = self.neck.fuse_feat([test_feat_dict['layer4']]) # Obtain bbox prediction if mode=='train': output['mask'] = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size)) elif mode == 'mask': output = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size)) else: raise ValueError("mode should be train or test") return output def extract_backbone_features(self, im, layers=None): if layers is None: layers = self.used_layers return self.feature_extractor(im, layers) def extract_features(self, im, layers): return self.feature_extractor(im, layers) @model_constructor def ARnet_seg_mask_resnet50(backbone_pretrained=True,used_layers=('layer4',),pool_size=None): # backbone backbone_net = resnet_seg.resnet50(pretrained=backbone_pretrained) # neck neck_net = CorrNL.CorrNL(pool_size=pool_size) # multiple heads '''create segnet''' in_channels = 1024 # disc_params = edict(layer="layer4", in_channels=in_channels, c_channels=96, out_channels=64) # non-local feat (64 channels rather than 1) '''2020.4.22 change "out_channels" to pool_size * pool_size''' disc_params = edict(layer="layer4", in_channels=in_channels, c_channels=96, out_channels=pool_size*pool_size) # non-local feat (64 channels rather than 1) refnet_params = edict( layers=("layer5", "layer4", "layer3", "layer2"), nchannels=64, use_batch_norm=True) disc_params.in_channels = backbone_net.get_out_channels()[disc_params.layer] p = refnet_params refinement_layers_channels = {L: nch for L, nch in backbone_net.get_out_channels().items() if L in p.layers} refiner = seg_network.SegNetwork(disc_params.out_channels, p.nchannels, refinement_layers_channels, p.use_batch_norm) '''create Alpha-Refine''' net = ARnet_seg_mask(feature_extractor=backbone_net, neck_module=neck_net, head_module=refiner, used_layers=used_layers, extractor_grad=True, output_size=(int(pool_size*2*16),int(pool_size*2*16))) return net ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/AR_seg_mask/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/backbone/__init__.py ================================================ from .resnet import resnet18, resnet50, resnet_baby from .resnet18_vggm import resnet18_vggmconv1 ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/backbone/base.py ================================================ import torch import torch.nn as nn class Backbone(nn.Module): """Base class for backbone networks. Handles freezing layers etc. args: frozen_layers - Name of layers to freeze. Either list of strings, 'none' or 'all'. Default: 'none'. """ def __init__(self, frozen_layers=()): super().__init__() if isinstance(frozen_layers, str): if frozen_layers.lower() == 'none': frozen_layers = () elif frozen_layers.lower() != 'all': raise ValueError('Unknown option for frozen layers: \"{}\". Should be \"all\", \"none\" or list of layer names.'.format(frozen_layers)) self.frozen_layers = frozen_layers self._is_frozen_nograd = False def train(self, mode=True): super().train(mode) if mode == True: self._set_frozen_to_eval() if not self._is_frozen_nograd: self._set_frozen_to_nograd() self._is_frozen_nograd = True def _set_frozen_to_eval(self): if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all': self.eval() else: for layer in self.frozen_layers: getattr(self, layer).eval() def _set_frozen_to_nograd(self): if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all': for p in self.parameters(): p.requires_grad_(False) else: for layer in self.frozen_layers: for p in getattr(self, layer).parameters(): p.requires_grad_(False) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/backbone/resnet.py ================================================ import math import torch.nn as nn from collections import OrderedDict import torch.utils.model_zoo as model_zoo from torchvision.models.resnet import model_urls from .base import Backbone def conv3x3(in_planes, out_planes, stride=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_bn=True): super(BasicBlock, self).__init__() self.use_bn = use_bn self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation) if use_bn: self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes, dilation=dilation) if use_bn: self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) if self.use_bn: out = self.bn1(out) out = self.relu(out) out = self.conv2(out) if self.use_bn: out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(Backbone): """ ResNet network module. Allows extracting specific feature blocks.""" def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1, frozen_layers=()): self.inplanes = inplanes super(ResNet, self).__init__(frozen_layers=frozen_layers) self.output_layers = output_layers self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) stride = [1 + (dilation_factor < l) for l in (8, 4, 2)] self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1)) self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1)) self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1)) self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor) out_feature_strides = {'conv1': 4, 'layer1': 4, 'layer2': 4*stride[0], 'layer3': 4*stride[0]*stride[1], 'layer4': 4*stride[0]*stride[1]*stride[2]} # TODO better way? if isinstance(self.layer1[0], BasicBlock): out_feature_channels = {'conv1': inplanes, 'layer1': inplanes, 'layer2': inplanes*2, 'layer3': inplanes*4, 'layer4': inplanes*8} elif isinstance(self.layer1[0], Bottleneck): base_num_channels = 4 * inplanes out_feature_channels = {'conv1': inplanes, 'layer1': base_num_channels, 'layer2': base_num_channels * 2, 'layer3': base_num_channels * 4, 'layer4': base_num_channels * 8} else: raise Exception('block not supported') self._out_feature_strides = out_feature_strides self._out_feature_channels = out_feature_channels # self.avgpool = nn.AvgPool2d(7, stride=1) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def out_feature_strides(self, layer=None): if layer is None: return self._out_feature_strides else: return self._out_feature_strides[layer] def out_feature_channels(self, layer=None): if layer is None: return self._out_feature_channels else: return self._out_feature_channels[layer] def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _add_output_and_check(self, name, x, outputs, output_layers): if name in output_layers: outputs[name] = x return len(output_layers) == len(outputs) def forward(self, x, output_layers=None): """ Forward pass with input x. The output_layers specify the feature blocks which must be returned """ outputs = OrderedDict() if output_layers is None: output_layers = self.output_layers x = self.conv1(x) x = self.bn1(x) x = self.relu(x) if self._add_output_and_check('conv1', x, outputs, output_layers): return outputs x = self.maxpool(x) x = self.layer1(x) if self._add_output_and_check('layer1', x, outputs, output_layers): return outputs x = self.layer2(x) if self._add_output_and_check('layer2', x, outputs, output_layers): return outputs x = self.layer3(x) if self._add_output_and_check('layer3', x, outputs, output_layers): return outputs x = self.layer4(x) if self._add_output_and_check('layer4', x, outputs, output_layers): return outputs x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self._add_output_and_check('fc', x, outputs, output_layers): return outputs if len(output_layers) == 1 and output_layers[0] == 'default': return x raise ValueError('output_layer is wrong.') def resnet_baby(output_layers=None, pretrained=False, inplanes=16, **kwargs): """Constructs a ResNet-18 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, inplanes=inplanes, **kwargs) if pretrained: raise NotImplementedError return model def resnet18(output_layers=None, pretrained=False, **kwargs): """Constructs a ResNet-18 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet50(output_layers=None, pretrained=False, **kwargs): """Constructs a ResNet-50 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/backbone/resnet18_vggm.py ================================================ import math import torch import torch.nn as nn from collections import OrderedDict from torchvision.models.resnet import BasicBlock from .base import Backbone class SpatialCrossMapLRN(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super(SpatialCrossMapLRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average=nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size-1.0)/2), 0, 0)) else: self.average=nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size-1.0)/2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(self.k).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(self.k).pow(self.beta) x = x.div(div) return x class ResNetVGGm1(Backbone): def __init__(self, block, layers, output_layers, num_classes=1000, frozen_layers=()): self.inplanes = 64 super(ResNetVGGm1, self).__init__(frozen_layers=frozen_layers) self.output_layers = output_layers self.vggmconv1 = nn.Conv2d(3,96,(7, 7),(2, 2), padding=3) self.vgglrn = SpatialCrossMapLRN(5, 0.0005, 0.75, 2) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # self.avgpool = nn.AvgPool2d(7, stride=1) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _add_output_and_check(self, name, x, outputs, output_layers): if name in output_layers: outputs[name] = x return len(output_layers) == len(outputs) def forward(self, x, output_layers=None): outputs = OrderedDict() if output_layers is None: output_layers = self.output_layers if 'vggconv1' in output_layers: c1 = self.vgglrn(self.relu(self.vggmconv1(x))) if self._add_output_and_check('vggconv1', c1, outputs, output_layers): return outputs x = self.conv1(x) x = self.bn1(x) x = self.relu(x) if self._add_output_and_check('conv1', x, outputs, output_layers): return outputs x = self.maxpool(x) x = self.layer1(x) if self._add_output_and_check('layer1', x, outputs, output_layers): return outputs x = self.layer2(x) if self._add_output_and_check('layer2', x, outputs, output_layers): return outputs x = self.layer3(x) if self._add_output_and_check('layer3', x, outputs, output_layers): return outputs x = self.layer4(x) if self._add_output_and_check('layer4', x, outputs, output_layers): return outputs x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self._add_output_and_check('fc', x, outputs, output_layers): return outputs if len(output_layers) == 1 and output_layers[0] == 'default': return x raise ValueError('output_layer is wrong.') def resnet18_vggmconv1(output_layers=None, path=None, **kwargs): """Constructs a ResNet-18 model with first-layer VGGm features. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNetVGGm1(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs) if path is not None: model.load_state_dict(torch.load(path), strict=False) return model ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/backbone/resnet_seg.py ================================================ import math import torch.nn as nn from collections import OrderedDict import torch.utils.model_zoo as model_zoo from torchvision.models.resnet import model_urls '''2020.4.14 newly added''' from collections import OrderedDict as odict from ltr.models.head.utils import get_out_channels def conv3x3(in_planes, out_planes, stride=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): """ ResNet network module. Allows extracting specific feature blocks.""" def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1): self.inplanes = inplanes super(ResNet, self).__init__() self.output_layers = output_layers self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) stride = [1 + (dilation_factor < l) for l in (8, 4, 2)] self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1)) self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1)) self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1)) self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor) # self.avgpool = nn.AvgPool2d(7, stride=1) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() '''2020.4.14 newly added''' self._out_channels = odict( # Deep-to-shallow order is required by SegNetwork layer5=get_out_channels(self.layer4), layer4=get_out_channels(self.layer3), layer3=get_out_channels(self.layer2), layer2=get_out_channels(self.layer1), layer1=get_out_channels(self.conv1)) def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _add_output_and_check(self, name, x, outputs, output_layers): if name in output_layers: outputs[name] = x return len(output_layers) == len(outputs) def forward(self, x, output_layers=None): """ Forward pass with input x. The output_layers specify the feature blocks which must be returned """ outputs = OrderedDict() if output_layers is None: output_layers = self.output_layers x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x)# conv1: (batch,64,128,128) '''2020.4.14 change names for every layers''' if self._add_output_and_check('layer1', x, outputs, output_layers): return outputs x = self.layer1(x) # (batch,256,64,64) if self._add_output_and_check('layer2', x, outputs, output_layers): return outputs x = self.layer2(x) # (batch,512,32,32) if self._add_output_and_check('layer3', x, outputs, output_layers): return outputs x = self.layer3(x) # (batch,1024,16,16) if self._add_output_and_check('layer4', x, outputs, output_layers): return outputs x = self.layer4(x) if self._add_output_and_check('layer5', x, outputs, output_layers): return outputs x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self._add_output_and_check('fc', x, outputs, output_layers): return outputs if len(output_layers) == 1 and output_layers[0] == 'default': return x raise ValueError('output_layer is wrong.') '''2020.4.14 newly added''' def get_out_channels(self): return self._out_channels def resnet18(output_layers=None, pretrained=False, dilation_factor=1): """Constructs a ResNet-18 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, dilation_factor=dilation_factor) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet50(output_layers=None, pretrained=False, dilation_factor=1): """Constructs a ResNet-50 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, dilation_factor=dilation_factor) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model '''newly added''' def resnet101(output_layers=None, pretrained=False, dilation_factor=1): """Constructs a ResNet-101 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(Bottleneck, [3, 4, 23, 3], output_layers, dilation_factor=dilation_factor) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/bbreg/__init__.py ================================================ from .atom_iou_net import AtomIoUNet ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/bbreg/atom.py ================================================ import torch.nn as nn import ltr.models.backbone as backbones import ltr.models.bbreg as bbmodels from ltr import model_constructor class ATOMnet(nn.Module): """ ATOM network module""" def __init__(self, feature_extractor, bb_regressor, bb_regressor_layer, extractor_grad=True): """ args: feature_extractor - backbone feature extractor bb_regressor - IoU prediction module bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to bb_regressor extractor_grad - Bool indicating whether backbone feature extractor requires gradients """ super(ATOMnet, self).__init__() self.feature_extractor = feature_extractor self.bb_regressor = bb_regressor self.bb_regressor_layer = bb_regressor_layer if not extractor_grad: for p in self.feature_extractor.parameters(): p.requires_grad_(False) def forward(self, train_imgs, test_imgs, train_bb, test_proposals): """ Forward pass Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col] """ num_sequences = train_imgs.shape[-4] num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 num_test_images = test_imgs.shape[0] if test_imgs.dim() == 5 else 1 # Extract backbone features train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:])) test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:])) train_feat_iou = [feat for feat in train_feat.values()] test_feat_iou = [feat for feat in test_feat.values()] # Obtain iou prediction iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb.reshape(num_train_images, num_sequences, 4), test_proposals.reshape(num_train_images, num_sequences, -1, 4)) return iou_pred def extract_backbone_features(self, im, layers=None): if layers is None: layers = self.bb_regressor_layer return self.feature_extractor(im, layers) def extract_features(self, im, layers): return self.feature_extractor(im, layers) @model_constructor def atom_resnet18(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True): # backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained) # Bounding box regressor iou_predictor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'], extractor_grad=False) return net @model_constructor def atom_resnet50(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True): # backbone backbone_net = backbones.resnet50(pretrained=backbone_pretrained) # Bounding box regressor iou_predictor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'], extractor_grad=False) return net ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/bbreg/atom_iou_net.py ================================================ import torch.nn as nn import torch from ltr.models.layers.blocks import LinearBlock from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) class AtomIoUNet(nn.Module): """Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture. It uses two backbone feature layers as input. args: input_dim: Feature dimensionality of the two input backbone layers. pred_input_dim: Dimensionality input the the prediction network. pred_inter_dim: Intermediate dimensionality in the prediction network.""" def __init__(self, input_dim=(128,256), pred_input_dim=(256,256), pred_inter_dim=(256,256)): super().__init__() # _r for reference, _t for test self.conv3_1r = conv(input_dim[0], 128, kernel_size=3, stride=1) self.conv3_1t = conv(input_dim[0], 256, kernel_size=3, stride=1) self.conv3_2t = conv(256, pred_input_dim[0], kernel_size=3, stride=1) self.prroi_pool3r = PrRoIPool2D(3, 3, 1/8) self.prroi_pool3t = PrRoIPool2D(5, 5, 1/8) self.fc3_1r = conv(128, 256, kernel_size=3, stride=1, padding=0) self.conv4_1r = conv(input_dim[1], 256, kernel_size=3, stride=1) self.conv4_1t = conv(input_dim[1], 256, kernel_size=3, stride=1) self.conv4_2t = conv(256, pred_input_dim[1], kernel_size=3, stride=1) self.prroi_pool4r = PrRoIPool2D(1, 1, 1/16) self.prroi_pool4t = PrRoIPool2D(3, 3, 1 / 16) self.fc34_3r = conv(256 + 256, pred_input_dim[0], kernel_size=1, stride=1, padding=0) self.fc34_4r = conv(256 + 256, pred_input_dim[1], kernel_size=1, stride=1, padding=0) self.fc3_rt = LinearBlock(pred_input_dim[0], pred_inter_dim[0], 5) self.fc4_rt = LinearBlock(pred_input_dim[1], pred_inter_dim[1], 3) self.iou_predictor = nn.Linear(pred_inter_dim[0]+pred_inter_dim[1], 1, bias=True) # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight.data, mode='fan_in') if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): # In earlier versions batch norm parameters was initialized with default initialization, # which changed in pytorch 1.2. In 1.1 and earlier the weight was set to U(0,1). # So we use the same initialization here. # m.weight.data.fill_(1) m.weight.data.uniform_() m.bias.data.zero_() def forward(self, feat1, feat2, bb1, proposals2): """Runs the ATOM IoUNet during training operation. This forward pass is mainly used for training. Call the individual functions during tracking instead. args: feat1: Features from the reference frames (4 or 5 dims). feat2: Features from the test frames (4 or 5 dims). bb1: Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4). proposals2: Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).""" assert bb1.dim() == 3 assert proposals2.dim() == 4 num_images = proposals2.shape[0] num_sequences = proposals2.shape[1] # Extract first train sample feat1 = [f[0,...] if f.dim()==5 else f.reshape(-1, num_sequences, *f.shape[-3:])[0,...] for f in feat1] bb1 = bb1[0,...] # Get modulation vector modulation = self.get_modulation(feat1, bb1) iou_feat = self.get_iou_feat(feat2) modulation = [f.reshape(1, num_sequences, -1).repeat(num_images, 1, 1).reshape(num_sequences*num_images, -1) for f in modulation] proposals2 = proposals2.reshape(num_sequences*num_images, -1, 4) pred_iou = self.predict_iou(modulation, iou_feat, proposals2) return pred_iou.reshape(num_images, num_sequences, -1) def predict_iou(self, modulation, feat, proposals): """Predicts IoU for the give proposals. args: modulation: Modulation vectors for the targets. Dims (batch, feature_dim). feat: IoU features (from get_iou_feat) for test images. Dims (batch, feature_dim, H, W). proposals: Proposal boxes for which the IoU will be predicted (batch, num_proposals, 4).""" fc34_3_r, fc34_4_r = modulation c3_t, c4_t = feat batch_size = c3_t.size()[0] # Modulation c3_t_att = c3_t * fc34_3_r.reshape(batch_size, -1, 1, 1) c4_t_att = c4_t * fc34_4_r.reshape(batch_size, -1, 1, 1) # Add batch_index to rois batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(c3_t.device) # Push the different rois for the same image along the batch dimension num_proposals_per_batch = proposals.shape[1] # input proposals2 is in format xywh, convert it to x0y0x1y1 format proposals_xyxy = torch.cat((proposals[:, :, 0:2], proposals[:, :, 0:2] + proposals[:, :, 2:4]), dim=2) # Add batch index roi2 = torch.cat((batch_index.reshape(batch_size, -1, 1).expand(-1, num_proposals_per_batch, -1), proposals_xyxy), dim=2) roi2 = roi2.reshape(-1, 5).to(proposals_xyxy.device) roi3t = self.prroi_pool3t(c3_t_att, roi2) roi4t = self.prroi_pool4t(c4_t_att, roi2) fc3_rt = self.fc3_rt(roi3t) fc4_rt = self.fc4_rt(roi4t) fc34_rt_cat = torch.cat((fc3_rt, fc4_rt), dim=1) iou_pred = self.iou_predictor(fc34_rt_cat).reshape(batch_size, num_proposals_per_batch) return iou_pred def get_modulation(self, feat, bb): """Get modulation vectors for the targets. args: feat: Backbone features from reference images. Dims (batch, feature_dim, H, W). bb: Target boxes (x,y,w,h) in image coords in the reference samples. Dims (batch, 4).""" feat3_r, feat4_r = feat c3_r = self.conv3_1r(feat3_r) # Add batch_index to rois batch_size = bb.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(bb.device) # input bb is in format xywh, convert it to x0y0x1y1 format bb = bb.clone() bb[:, 2:4] = bb[:, 0:2] + bb[:, 2:4] roi1 = torch.cat((batch_index, bb), dim=1) roi3r = self.prroi_pool3r(c3_r, roi1) c4_r = self.conv4_1r(feat4_r) roi4r = self.prroi_pool4r(c4_r, roi1) fc3_r = self.fc3_1r(roi3r) # Concatenate from block 3 and 4 fc34_r = torch.cat((fc3_r, roi4r), dim=1) fc34_3_r = self.fc34_3r(fc34_r) fc34_4_r = self.fc34_4r(fc34_r) return fc34_3_r, fc34_4_r def get_iou_feat(self, feat2): """Get IoU prediction features from a 4 or 5 dimensional backbone input.""" feat2 = [f.reshape(-1, *f.shape[-3:]) if f.dim()==5 else f for f in feat2] feat3_t, feat4_t = feat2 c3_t = self.conv3_2t(self.conv3_1t(feat3_t)) c4_t = self.conv4_2t(self.conv4_1t(feat4_t)) return c3_t, c4_t ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/head/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/head/seg_network.py ================================================ import torch from torch import nn as nn from torch.nn import functional as F from ltr.models.head.utils import conv, relu, interpolate, adaptive_cat class TSE(nn.Module): def __init__(self, fc, ic, oc): super().__init__() nc = ic + oc self.reduce = nn.Sequential(conv(fc, oc, 1), relu(), conv(oc, oc, 1)) self.transform = nn.Sequential(conv(nc, nc, 3), relu(), conv(nc, nc, 3), relu(), conv(nc, oc, 3), relu()) def forward(self, ft, score, x=None): h = self.reduce(ft) hpool = F.adaptive_avg_pool2d(h, (1, 1)) if x is None else x h = adaptive_cat((h, score), dim=1, ref_tensor=0) h = self.transform(h) return h, hpool class CAB(nn.Module): def __init__(self, oc, deepest): super().__init__() self.convreluconv = nn.Sequential(conv(2 * oc, oc, 1), relu(), conv(oc, oc, 1)) self.deepest = deepest def forward(self, deeper, shallower, att_vec=None): shallow_pool = F.adaptive_avg_pool2d(shallower, (1, 1)) deeper_pool = deeper if self.deepest else F.adaptive_avg_pool2d(deeper, (1, 1)) if att_vec is not None: global_pool = torch.cat([shallow_pool, deeper_pool, att_vec], dim=1) else: global_pool = torch.cat((shallow_pool, deeper_pool), dim=1) conv_1x1 = self.convreluconv(global_pool) inputs = shallower * torch.sigmoid(conv_1x1) out = inputs + interpolate(deeper, inputs.shape[-2:]) return out class RRB(nn.Module): def __init__(self, oc, use_bn=False): super().__init__() self.conv1x1 = conv(oc, oc, 1) if use_bn: self.bblock = nn.Sequential(conv(oc, oc, 3), nn.BatchNorm2d(oc), relu(), conv(oc, oc, 3, bias=False)) else: self.bblock = nn.Sequential(conv(oc, oc, 3), relu(), conv(oc, oc, 3, bias=False)) # Basic block def forward(self, x): h = self.conv1x1(x) return F.relu(h + self.bblock(h)) class Upsampler(nn.Module): def __init__(self, in_channels=64): super().__init__() self.conv1 = conv(in_channels, in_channels // 2, 3) self.conv2 = conv(in_channels // 2, 1, 3) def forward(self, x, image_size): print(x.shape) x = F.interpolate(x, (2 * x.shape[-2], 2 * x.shape[-1]), mode='bicubic', align_corners=False) x = F.relu(self.conv1(x)) x = F.interpolate(x, image_size[-2:], mode='bicubic', align_corners=False) x = self.conv2(x) return x class PyrUpBicubic2d(nn.Module): def __init__(self, channels): super().__init__() self.channels = channels def kernel(d): x = d + torch.arange(-1, 3, dtype=torch.float32) x = torch.abs(x) a = -0.75 f = (x < 1).float() * ((a + 2) * x * x * x - (a + 3) * x * x + 1) + \ ((x >= 1) * (x < 2)).float() * (a * x * x * x - 5 * a * x * x + 8 * a * x - 4 * a) W = f.reshape(1, 1, 1, len(x)).float() Wt = W.permute(0, 1, 3, 2) return W, Wt We, We_t = kernel(-0.25) Wo, Wo_t = kernel(-0.25 - 0.5) # Building non-separable filters for now. It would make sense to # have separable filters if it proves to be faster. # .contiguous() is needed until a bug is fixed in nn.Conv2d. self.W00 = (We_t @ We).expand(channels, 1, 4, 4).contiguous() self.W01 = (We_t @ Wo).expand(channels, 1, 4, 4).contiguous() self.W10 = (Wo_t @ We).expand(channels, 1, 4, 4).contiguous() self.W11 = (Wo_t @ Wo).expand(channels, 1, 4, 4).contiguous() def forward(self, input): if input.device != self.W00.device: self.W00 = self.W00.to(input.device) self.W01 = self.W01.to(input.device) self.W10 = self.W10.to(input.device) self.W11 = self.W11.to(input.device) a = F.pad(input, (2, 2, 2, 2), 'replicate') I00 = F.conv2d(a, self.W00, groups=self.channels) I01 = F.conv2d(a, self.W01, groups=self.channels) I10 = F.conv2d(a, self.W10, groups=self.channels) I11 = F.conv2d(a, self.W11, groups=self.channels) n, c, h, w = I11.shape J0 = torch.stack((I00, I01), dim=-1).view(n, c, h, 2 * w) J1 = torch.stack((I10, I11), dim=-1).view(n, c, h, 2 * w) out = torch.stack((J0, J1), dim=-2).view(n, c, 2 * h, 2 * w) out = F.pad(out, (-1, -1, -1, -1)) return out class BackwardCompatibleUpsampler(nn.Module): """ Upsampler with bicubic interpolation that works with Pytorch 1.0.1 """ def __init__(self, in_channels=64): super().__init__() self.conv1 = conv(in_channels, in_channels // 2, 3) self.up1 = PyrUpBicubic2d(in_channels) self.conv2 = conv(in_channels // 2, 1, 3) self.up2 = PyrUpBicubic2d(in_channels // 2) def forward(self, x, image_size): x = self.up1(x) x = F.relu(self.conv1(x)) x = self.up2(x) x = F.interpolate(x, image_size[-2:], mode='bilinear', align_corners=False) x = self.conv2(x) return x class SegNetwork(nn.Module): def __init__(self, in_channels=1, out_channels=32, ft_channels=None, use_bn=False): super().__init__() assert ft_channels is not None self.ft_channels = ft_channels self.TSE = nn.ModuleDict() self.RRB1 = nn.ModuleDict() self.CAB = nn.ModuleDict() self.RRB2 = nn.ModuleDict() ic = in_channels oc = out_channels for L, fc in self.ft_channels.items(): self.TSE[L] = TSE(fc, ic, oc) self.RRB1[L] = RRB(oc, use_bn=use_bn) self.CAB[L] = CAB(oc, L == 'layer5') self.RRB2[L] = RRB(oc, use_bn=use_bn) #if torch.__version__ == '1.0.1' self.project = BackwardCompatibleUpsampler(out_channels) #self.project = Upsampler(out_channels) def forward(self, scores, features, image_size): num_targets = scores.shape[0] num_fmaps = features[next(iter(self.ft_channels))].shape[0] if num_targets > num_fmaps: multi_targets = True else: multi_targets = False x = None for i, L in enumerate(self.ft_channels): ft = features[L] s = interpolate(scores, ft.shape[-2:]) # Resample scores to match features size if multi_targets: h, hpool = self.TSE[L](ft.repeat(num_targets, 1, 1, 1), s, x) else: h, hpool = self.TSE[L](ft, s, x) h = self.RRB1[L](h) h = self.CAB[L](hpool, h) x = self.RRB2[L](h) x = self.project(x, image_size) return x ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/head/utils.py ================================================ from collections import OrderedDict as odict import numpy as np import torch from torch import nn as nn from torch.nn import functional as F def text_bargraph(values): blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o')) nsteps = len(blocks) - 2 - 1 hstep = 1 / (2 * nsteps) values = np.array(values) nans = np.isnan(values) values[nans] = 0 # '░' indices = ((values + hstep) * nsteps + 1).astype(np.int) indices[values < 0] = 0 indices[values > 1] = len(blocks) - 1 graph = blocks[indices] graph[nans] = '░' graph = str.join('', graph) return graph class ModuleWrapper: """ A wrapper for hiding modules from PyTorch, so that the same module can be used in multiple places. and yet saved only once in a checkpoint, or not at all. """ # https://stackoverflow.com/questions/1466676/create-a-wrapper-class-to-call-a-pre-and-post-function-around-existing-functions def __init__(self, wrapped_module): self.__wrapped_module__ = wrapped_module def __getattr__(self, attr): orig_attr = self.__wrapped_module__.__getattribute__(attr) if callable(orig_attr): def hooked(*args, **kwargs): result = orig_attr(*args, **kwargs) # prevent wrapped_class from becoming unwrapped if result == self.__wrapped_module__: return self return result return hooked else: return orig_attr def __call__(self, *args, **kwargs): return self.__wrapped_module__(*args, **kwargs) def conv(ic, oc, ksize, bias=True, dilation=1, stride=1): return nn.Conv2d(ic, oc, ksize, padding=ksize // 2, bias=bias, dilation=dilation, stride=stride) def relu(negative_slope=0.0, inplace=False): return nn.LeakyReLU(negative_slope, inplace=inplace) def interpolate(t, sz): sz = sz.tolist() if torch.is_tensor(sz) else sz return F.interpolate(t, sz, mode='bilinear', align_corners=False) if t.shape[-2:] != sz else t def adaptive_cat(seq, dim=0, ref_tensor=0): sz = seq[ref_tensor].shape[-2:] t = torch.cat([interpolate(t, sz) for t in seq], dim=dim) return t def get_out_channels(layer): if hasattr(layer, 'out_channels'): oc = layer.out_channels elif hasattr(layer, '_modules'): oc = get_out_channels(layer._modules) else: ocs = [] for key in reversed(layer): ocs.append(get_out_channels(layer[key])) oc = 0 for elem in ocs: if elem: return elem return oc def is_finite(t): return (torch.isnan(t) + torch.isinf(t)) == 0 class AverageMeter: """Computes and stores the average and current value""" def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.seq_avg = [] def reset(self): self.__init__() def update(self, val, n=1): if not np.isnan(val): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def update_multi(self, val): val = np.array(val) v = val[~np.isnan(val)] n = len(v) self.val = val self.sum += np.nansum(v) self.count += n self.avg = self.sum / self.count ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/activation.py ================================================ import math import torch import torch.nn as nn import torch.nn.functional as F def softmax_reg(x: torch.Tensor, dim, reg=None): """Softmax with optinal denominator regularization.""" if reg is None: return torch.softmax(x, dim=dim) dim %= x.dim() if isinstance(reg, (float, int)): reg = x.new_tensor([reg]) reg = reg.expand([1 if d==dim else x.shape[d] for d in range(x.dim())]) x = torch.cat((x, reg), dim=dim) return torch.softmax(x, dim=dim)[[slice(-1) if d==dim else slice(None) for d in range(x.dim())]] class MLU(nn.Module): r"""MLU activation """ def __init__(self, min_val, inplace=False): super().__init__() self.min_val = min_val self.inplace = inplace def forward(self, input): return F.elu(F.leaky_relu(input, 1/self.min_val, inplace=self.inplace), self.min_val, inplace=self.inplace) class LeakyReluPar(nn.Module): r"""LeakyRelu parametric activation """ def forward(self, x, a): return (1.0 - a)/2.0 * torch.abs(x) + (1.0 + a)/2.0 * x class LeakyReluParDeriv(nn.Module): r"""Derivative of the LeakyRelu parametric activation, wrt x. """ def forward(self, x, a): return (1.0 - a)/2.0 * torch.sign(x.detach()) + (1.0 + a)/2.0 class BentIdentPar(nn.Module): r"""BentIdent parametric activation """ def __init__(self, b=1.0): super().__init__() self.b = b def forward(self, x, a): return (1.0 - a)/2.0 * (torch.sqrt(x*x + 4.0*self.b*self.b) - 2.0*self.b) + (1.0 + a)/2.0 * x class BentIdentParDeriv(nn.Module): r"""BentIdent parametric activation deriv """ def __init__(self, b=1.0): super().__init__() self.b = b def forward(self, x, a): return (1.0 - a)/2.0 * (x / torch.sqrt(x*x + 4.0*self.b*self.b)) + (1.0 + a)/2.0 ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/blocks.py ================================================ from torch import nn def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, batch_norm=True, relu=True, padding_mode='zeros'): layers = [] assert padding_mode == 'zeros' or padding_mode == 'replicate' if padding_mode == 'replicate' and padding > 0: assert isinstance(padding, int) layers.append(nn.ReflectionPad2d(padding)) padding = 0 layers.append(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)) if batch_norm: layers.append(nn.BatchNorm2d(out_planes)) if relu: layers.append(nn.ReLU(inplace=True)) return nn.Sequential(*layers) class LinearBlock(nn.Module): def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True): super().__init__() self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias) self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None self.relu = nn.ReLU(inplace=True) if relu else None def forward(self, x): x = self.linear(x.reshape(x.shape[0], -1)) if self.bn is not None: x = self.bn(x.reshape(x.shape[0], x.shape[1], 1, 1)) if self.relu is not None: x = self.relu(x) return x.reshape(x.shape[0], -1) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/distance.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F class DistanceMap(nn.Module): """Generate a distance map from a origin center location. args: num_bins: Number of bins in the map. bin_displacement: Displacement of the bins. """ def __init__(self, num_bins, bin_displacement=1.0): super().__init__() self.num_bins = num_bins self.bin_displacement = bin_displacement def forward(self, center, output_sz): """Create the distance map. args: center: Torch tensor with (y,x) center position. Dims (batch, 2) output_sz: Size of output distance map. 2-dimensional tuple.""" center = center.view(-1,2) bin_centers = torch.arange(self.num_bins, dtype=torch.float32, device=center.device).view(1, -1, 1, 1) k0 = torch.arange(output_sz[0], dtype=torch.float32, device=center.device).view(1,1,-1,1) k1 = torch.arange(output_sz[1], dtype=torch.float32, device=center.device).view(1,1,1,-1) d0 = k0 - center[:,0].view(-1,1,1,1) d1 = k1 - center[:,1].view(-1,1,1,1) dist = torch.sqrt(d0*d0 + d1*d1) bin_diff = dist / self.bin_displacement - bin_centers bin_val = torch.cat((F.relu(1.0 - torch.abs(bin_diff[:,:-1,:,:]), inplace=True), (1.0 + bin_diff[:,-1:,:,:]).clamp(0, 1)), dim=1) return bin_val ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/filter.py ================================================ import torch import torch.nn.functional as F def apply_filter(feat, filter, dilation_factors=None): """Applies the filter on the input features (feat). The number of groups is automatically calculated. args: feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W) filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW) or (sequences, filters, feat_dim/groups, fH, fW) output: scores: Output of filtering. Dimensions (images_in_sequence, sequences, yH, yW) or (images_in_sequence, sequences, filters, yH, yW) """ multiple_filters = (filter.dim() == 5) padding = (filter.shape[-2] // 2, filter.shape[-1] // 2) num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 num_filters = filter.shape[1] if multiple_filters else 1 num_channels = feat.shape[-3] groups = num_channels // filter.shape[-3] assert num_filters % groups == 0 and num_channels % groups == 0 if multiple_filters: if dilation_factors is None: scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter.view(-1, *filter.shape[-3:]), padding=padding, groups=num_sequences*groups) return scores.view(num_images, num_sequences, -1, scores.shape[-2], scores.shape[-1]) else: scores_all = [] start_id = 0 for d_factor, num_filters_with_d in dilation_factors.items(): f_d = filter[:, start_id:start_id+num_filters_with_d, ...].contiguous() padding_d = [p+d_factor-1 for p in padding] scores_d = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), f_d.view(-1, *f_d.shape[-3:]), padding=padding_d, groups=num_sequences * groups, dilation=d_factor) scores_d = scores_d.view(num_images, num_sequences, -1, scores_d.shape[-2], scores_d.shape[-1]) scores_all.append(scores_d) start_id += num_filters_with_d scores = torch.cat(scores_all, dim=2) return scores scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter, padding=padding, groups=num_sequences) return scores.view(num_images, num_sequences, scores.shape[-2], scores.shape[-1]) def apply_feat_transpose(feat, input, filter_ksz, training=True, groups=1): """Applies the transposed operation off apply_filter w.r.t. filter itself. Can be used to compute the filter gradient. args: feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W) input: Input activation (e.g. residuals). Must have dimensions (images_in_sequence, sequences, yH, yW) or (images_in_sequence, sequences, filters, yH, yW) training: Choose the faster implementation whether training or not. output: Output of transposed operation. Dimensions (sequences, feat_dim, fH, fW) """ if groups != 1: raise NotImplementedError('Not implemented other values of group.') if training or input.dim() == 5: return _apply_feat_transpose_v3(feat, input, filter_ksz) return _apply_feat_transpose_v2(feat, input, filter_ksz) def _apply_feat_transpose_v1(feat, input, filter_ksz): """This one is slow as hell!!!!""" num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 feat_sz = (feat.shape[-2], feat.shape[-1]) if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) # trans_pad = sz + padding - filter_ksz trans_pad = [sz + ksz//2 - ksz for sz, ksz in zip(feat_sz, filter_ksz)] filter_grad = F.conv_transpose2d(input.flip((2, 3)).view(1, -1, input.shape[-2], input.shape[-1]), feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0) def _apply_feat_transpose_v2(feat, input, filter_ksz): """Fast forward and slow backward""" multiple_filters = (input.dim() == 5) num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 num_filters = input.shape[2] if multiple_filters else 1 if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) trans_pad = [(ksz-1)//2 for ksz in filter_ksz] if multiple_filters: filter_grad = F.conv2d(input.reshape(-1, num_filters, input.shape[-2], input.shape[-1]).permute(1,0,2,3), feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) if num_images == 1: return filter_grad.view(num_filters, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).flip((3,4)).permute(1,0,2,3,4) return filter_grad.view(num_filters, num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).flip((3,4)).permute(1,0,2,3,4) filter_grad = F.conv2d(input.reshape(1, -1, input.shape[-2], input.shape[-1]), feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0).flip((2,3)) def _apply_feat_transpose_v3(feat, input, filter_ksz): """Slow forward fast backward""" multiple_filters = (input.dim() == 5) num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 num_filters = input.shape[2] if multiple_filters else 1 if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) trans_pad = [ksz//2 for ksz in filter_ksz] filter_grad = F.conv2d(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]).permute(1,0,2,3), input.reshape(-1, 1, input.shape[-2], input.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) if multiple_filters: if num_images == 1: return filter_grad.view(-1, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).permute(1,2,0,3,4) return filter_grad.view(-1, num_images, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,2,0,3,4) if num_images == 1: return filter_grad.permute(1,0,2,3) return filter_grad.view(-1, num_images, num_sequences, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,0,2,3) def _apply_feat_transpose_v4(feat, input, filter_ksz): """Slow forward fast backward""" num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) trans_pad = [ksz//2 for ksz in filter_ksz] filter_grad = F.conv2d(feat.permute(2,1,0,3,4).reshape(feat.shape[-3], -1, feat.shape[-2], feat.shape[-1]), input.permute(1,0,2,3), padding=trans_pad, groups=num_sequences) return filter_grad.permute(1,0,2,3) def filter_gradient(feat, filter, label=None, training=True): """Computes gradient of the filter when applied on the input features and ground truth label. args: feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W) filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW) label: Ground truth label in the L2 loss. Dimensions (images_in_sequence, sequences, yH, yW) output: filter_gradient: Dimensions same as input filter (sequences, feat_dim, fH, fW) """ residuals = apply_filter(feat, filter) if label is not None: residuals = residuals - label filter_ksz = (filter.shape[-2], filter.shape[-1]) return apply_feat_transpose(feat, residuals, filter_ksz, training=training) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/normalization.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F class InstanceL2Norm(nn.Module): """Instance L2 normalization. """ def __init__(self, size_average=True, eps=1e-5, scale=1.0): super().__init__() self.size_average = size_average self.eps = eps self.scale = scale def forward(self, input): if self.size_average: return input * (self.scale * ((input.shape[1] * input.shape[2] * input.shape[3]) / ( torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps)).sqrt()) else: return input * (self.scale / (torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps).sqrt()) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/layers/transform.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict def interpolate(x, sz): """Interpolate 4D tensor x to size sz.""" sz = sz.tolist() if torch.is_tensor(sz) else sz return F.interpolate(x, sz, mode='bilinear', align_corners=False) if x.shape[-2:] != sz else x class InterpCat(nn.Module): """Interpolate and concatenate features of different resolutions.""" def forward(self, input): if isinstance(input, (dict, OrderedDict)): input = list(input.values()) output_shape = None for x in input: if output_shape is None or output_shape[0] > x.shape[-2]: output_shape = x.shape[-2:] return torch.cat([interpolate(x, output_shape) for x in input], dim=-3) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/loss/__init__.py ================================================ from .target_classification import LBHinge ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/loss/kl_regression.py ================================================ import math import torch import torch.nn as nn from torch.nn import functional as F class KLRegression(nn.Module): """KL-divergence loss for probabilistic regression. It is computed using Monte Carlo (MC) samples from an arbitrary distribution.""" def __init__(self, eps=0.0): super().__init__() self.eps = eps def forward(self, scores, sample_density, gt_density, mc_dim=-1): """Args: scores: predicted score values sample_density: probability density of the sample distribution gt_density: probability density of the ground truth distribution mc_dim: dimension of the MC samples""" exp_val = scores - torch.log(sample_density + self.eps) L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \ torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim) return L.mean() class MLRegression(nn.Module): """Maximum likelihood loss for probabilistic regression. It is computed using Monte Carlo (MC) samples from an arbitrary distribution.""" def __init__(self, eps=0.0): super().__init__() self.eps = eps def forward(self, scores, sample_density, gt_density=None, mc_dim=-1): """Args: scores: predicted score values. First sample must be ground-truth sample_density: probability density of the sample distribution gt_density: not used mc_dim: dimension of the MC samples. Only mc_dim=1 supported""" assert mc_dim == 1 assert (sample_density[:,0,...] == -1).all() exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps) L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...] loss = L.mean() return loss class KLRegressionGrid(nn.Module): """KL-divergence loss for probabilistic regression. It is computed using the grid integration strategy.""" def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0): """Args: scores: predicted score values gt_density: probability density of the ground truth distribution grid_dim: dimension(s) of the grid grid_scale: area of one grid cell""" score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim) L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr return L.mean() ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/loss/target_classification.py ================================================ import torch.nn as nn import torch from torch.nn import functional as F class LBHinge(nn.Module): """Loss that uses a 'hinge' on the lower bound. This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is also smaller than that threshold. args: error_matric: What base loss to use (MSE by default). threshold: Threshold to use for the hinge. clip: Clip the loss if it is above this value. """ def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None): super().__init__() self.error_metric = error_metric self.threshold = threshold if threshold is not None else -100 self.clip = clip def forward(self, prediction, label, target_bb=None): negative_mask = (label < self.threshold).float() positive_mask = (1.0 - negative_mask) prediction = negative_mask * F.relu(prediction) + positive_mask * prediction loss = self.error_metric(prediction, positive_mask * label) if self.clip is not None: loss = torch.min(loss, torch.tensor([self.clip], device=loss.device)) return loss ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/meta/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/meta/steepestdescent.py ================================================ import math import torch import torch.nn as nn from pytracking import TensorList from ltr.models.layers import activation class GNSteepestDescent(nn.Module): """General module for steepest descent based meta learning.""" def __init__(self, residual_module, num_iter=1, compute_losses=False, detach_length=float('Inf'), parameter_batch_dim=0, residual_batch_dim=0, steplength_reg=0.0, filter_dilation_factors=None): super().__init__() self.residual_module = residual_module self.num_iter = num_iter self.compute_losses = compute_losses self.detach_length = detach_length self.steplength_reg = steplength_reg self._parameter_batch_dim = parameter_batch_dim self._residual_batch_dim = residual_batch_dim self.filter_dilation_factors = filter_dilation_factors def _sqr_norm(self, x: TensorList, batch_dim=0): sum_keep_batch_dim = lambda e: e.sum(dim=[d for d in range(e.dim()) if d != batch_dim]) return sum((x * x).apply(sum_keep_batch_dim)) def _compute_loss(self, res): return sum((res * res).sum()) / sum(res.numel()) def forward(self, meta_parameter: TensorList, num_iter=None, *args, **kwargs): # Make sure grad is enabled torch_grad_enabled = torch.is_grad_enabled() torch.set_grad_enabled(True) num_iter = self.num_iter if num_iter is None else num_iter meta_parameter_iterates = [meta_parameter] losses = [] for i in range(num_iter): if i > 0 and i % self.detach_length == 0: meta_parameter = meta_parameter.detach() meta_parameter.requires_grad_(True) # Compute residual vector r = self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs) if self.compute_losses: losses.append(self._compute_loss(r)) # Compute gradient of loss u = r.clone() g = TensorList(torch.autograd.grad(r, meta_parameter, u, create_graph=True)) # Multiply gradient with Jacobian h = TensorList(torch.autograd.grad(g, u, g, create_graph=True)) # Compute squared norms ip_gg = self._sqr_norm(g, batch_dim=self._parameter_batch_dim) ip_hh = self._sqr_norm(h, batch_dim=self._residual_batch_dim) # Compute step length alpha = ip_gg / (ip_hh + self.steplength_reg * ip_gg).clamp(1e-8) # Compute optimization step step = g.apply(lambda e: alpha.reshape([-1 if d==self._parameter_batch_dim else 1 for d in range(e.dim())]) * e) # Add step to parameter meta_parameter = meta_parameter - step meta_parameter_iterates.append(meta_parameter) if self.compute_losses: losses.append(self._compute_loss(self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs))) # Reset the grad enabled flag torch.set_grad_enabled(torch_grad_enabled) if not torch_grad_enabled: meta_parameter.detach_() for w in meta_parameter_iterates: w.detach_() for l in losses: l.detach_() return meta_parameter, meta_parameter_iterates, losses ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/neck/CorrNL.py ================================================ import torch.nn as nn import torch from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D from torch.nn import functional as F from ltr.models.neck.neck_utils import * class CorrNL(nn.Module): """Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture. It uses two backbone feature layers as input. args: input_dim: Feature dimensionality of the two input backbone layers. pred_input_dim: Dimensionality input the the prediction network. pred_inter_dim: Intermediate dimensionality in the prediction network.""" def __init__(self, pool_size=8, use_NL=True): super().__init__() self.prroi_pool = PrRoIPool2D(pool_size, pool_size, 1/16) num_corr_channel = pool_size*pool_size self.channel_attention = SEModule(num_corr_channel,reduction=4) self.spatial_attention = NONLocalBlock2D(in_channels=num_corr_channel) self.use_NL = use_NL def forward(self, feat1, feat2, bb1): """Runs the ATOM IoUNet during training operation. This forward pass is mainly used for training. Call the individual functions during tracking instead. args: feat1: Features from the reference frames (4 or 5 dims). feat2: Features from the test frames (4 or 5 dims). bb1: Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4). proposals2: Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).""" assert bb1.dim() == 3 # num_images, num_sequences = bb1.size()[:2] # 1, 64 # Extract first train sample if len(feat1)==1: feat1 = feat1[0] # size为(64,C,H,W) feat2 = feat2[0] # size为(64,C,H,W) bb1 = bb1[0,...] # (64,4) else: raise ValueError("Only support single-layer feature map") '''get PrRoIPool feature ''' # Add batch_index to rois batch_size = bb1.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device) # (64,1) # input bb is in format xywh, convert it to x0y0x1y1 format bb1 = bb1.clone() bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4] roi1 = torch.cat((batch_index, bb1), dim=1) #(64,1),(64,4) ---> (64,5) feat_roi1 = self.prroi_pool(feat1, roi1) # (64,C,H,W) feat_corr,_ = self.corr_fun(feat_roi1, feat2) # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr) '''channel attention: Squeeze and Excitation''' feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征 '''spatial attention: Non-local 2D''' feat_sa = self.spatial_attention(feat_ca) return feat_sa def get_ref_kernel(self, feat1, bb1): assert bb1.dim() == 3 # num_images, num_sequences = bb1.size()[:2] # 1, 64 # Extract first train sample if len(feat1) == 1: feat1 = feat1[0] # size为(64,C,H,W) bb1 = bb1[0, ...] # (64,4) else: raise ValueError("Only support single-layer feature map") '''get PrRoIPool feature ''' # Add batch_index to rois batch_size = bb1.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device) # (64,1) # input bb is in format xywh, convert it to x0y0x1y1 format bb1 = bb1.clone() bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4] roi1 = torch.cat((batch_index, bb1), dim=1) # (64,1),(64,4) ---> (64,5) '''注意: feat1 and roi1 must be cuda tensor''' self.ref_kernel = self.prroi_pool(feat1.float(), roi1) # (64,C,H,W) # self.ref_kernel.half() def fuse_feat(self, feat2): '''fuse features from reference and test branch''' if len(feat2) == 1: feat2 = feat2[0] '''Step1: pixel-wise correlation''' feat_corr,_ = self.corr_fun(self.ref_kernel, feat2) # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr) (batch,64,16,16) '''Step2: channel attention: Squeeze and Excitation''' feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征 if not self.use_NL: # print('not use non-local') return feat_ca else: '''Step3: spatial attention: Non-local 2D''' feat_sa = self.spatial_attention(feat_ca) return feat_sa def corr_fun(self, Kernel_tmp, Feature, KERs=None): size = Kernel_tmp.size() CORR = [] Kernel = [] for i in range(len(Feature)): ker = Kernel_tmp[i:i + 1] fea = Feature[i:i + 1] ker = ker.view(size[1], size[2] * size[3]).transpose(0, 1) ker = ker.unsqueeze(2).unsqueeze(3) if not (type(KERs) == type(None)): ker = torch.cat([ker, KERs[i]], 0) co = F.conv2d(fea, ker.contiguous()) CORR.append(co) ker = ker.unsqueeze(0) Kernel.append(ker) corr = torch.cat(CORR, 0) Kernel = torch.cat(Kernel, 0) return corr, Kernel ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/neck/neck_utils.py ================================================ import torch.nn as nn import torch from torch.nn import functional as F def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) '''Channel attention module''' class SEModule(nn.Module): def __init__(self, channels, reduction=4): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x '''Non-local module''' class _NonLocalBlockND(nn.Module): def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True): """ :param in_channels: :param inter_channels: :param dimension: :param sub_sample: :param bn_layer: """ super(_NonLocalBlockND, self).__init__() assert dimension in [1, 2, 3] self.dimension = dimension self.sub_sample = sub_sample self.in_channels = in_channels self.inter_channels = inter_channels if self.inter_channels is None: self.inter_channels = in_channels // 2 if self.inter_channels == 0: self.inter_channels = 1 if dimension == 3: conv_nd = nn.Conv3d max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) bn = nn.BatchNorm3d elif dimension == 2: conv_nd = nn.Conv2d max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) bn = nn.BatchNorm2d else: conv_nd = nn.Conv1d max_pool_layer = nn.MaxPool1d(kernel_size=(2)) bn = nn.BatchNorm1d self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) if bn_layer: self.W = nn.Sequential( conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0), bn(self.in_channels) ) nn.init.constant_(self.W[1].weight, 0) nn.init.constant_(self.W[1].bias, 0) else: self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0) nn.init.constant_(self.W.weight, 0) nn.init.constant_(self.W.bias, 0) self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) if sub_sample: self.g = nn.Sequential(self.g, max_pool_layer) self.phi = nn.Sequential(self.phi, max_pool_layer) def forward(self, x, return_nl_map=False): """ :param x: (b, c, t, h, w) :param return_nl_map: if True return z, nl_map, else only return z. :return: """ batch_size = x.size(0) g_x = self.g(x).view(batch_size, self.inter_channels, -1) g_x = g_x.permute(0, 2, 1) theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) theta_x = theta_x.permute(0, 2, 1) phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) f = torch.matmul(theta_x, phi_x) f_div_C = F.softmax(f, -1) y = torch.matmul(f_div_C, g_x) y = y.permute(0, 2, 1).contiguous() y = y.view(batch_size, self.inter_channels, *x.size()[2:]) W_y = self.W(y) z = W_y + x if return_nl_map: return z, f_div_C return z class NONLocalBlock2D(_NonLocalBlockND): def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True): super(NONLocalBlock2D, self).__init__(in_channels, inter_channels=inter_channels, dimension=2, sub_sample=sub_sample, bn_layer=bn_layer,) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/target_classifier/__init__.py ================================================ from .linear_filter import LinearFilter ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/target_classifier/features.py ================================================ import torch from torch import nn import torch.nn.functional as F from torchvision.models.resnet import BasicBlock, Bottleneck from ltr.models.layers.normalization import InstanceL2Norm from ltr.models.layers.transform import InterpCat def residual_basic_block(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, interp_cat=False, final_relu=False, init_pool=False): """Construct a network block based on the BasicBlock used in ResNet 18 and 34.""" if out_dim is None: out_dim = feature_dim feat_layers = [] if interp_cat: feat_layers.append(InterpCat()) if init_pool: feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) for i in range(num_blocks): odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim feat_layers.append(BasicBlock(feature_dim, odim)) if final_conv: feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False)) if final_relu: feat_layers.append(nn.ReLU(inplace=True)) if l2norm: feat_layers.append(InstanceL2Norm(scale=norm_scale)) return nn.Sequential(*feat_layers) def residual_basic_block_pool(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, pool=True): """Construct a network block based on the BasicBlock used in ResNet.""" if out_dim is None: out_dim = feature_dim feat_layers = [] for i in range(num_blocks): odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim feat_layers.append(BasicBlock(feature_dim, odim)) if final_conv: feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False)) if pool: feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) if l2norm: feat_layers.append(InstanceL2Norm(scale=norm_scale)) return nn.Sequential(*feat_layers) def residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, interp_cat=False, final_relu=False, final_pool=False): """Construct a network block based on the Bottleneck block used in ResNet.""" if out_dim is None: out_dim = feature_dim feat_layers = [] if interp_cat: feat_layers.append(InterpCat()) for i in range(num_blocks): planes = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim // 4 feat_layers.append(Bottleneck(4*feature_dim, planes)) if final_conv: feat_layers.append(nn.Conv2d(4*feature_dim, out_dim, kernel_size=3, padding=1, bias=False)) if final_relu: feat_layers.append(nn.ReLU(inplace=True)) if final_pool: feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) if l2norm: feat_layers.append(InstanceL2Norm(scale=norm_scale)) return nn.Sequential(*feat_layers) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/target_classifier/initializer.py ================================================ import torch.nn as nn import torch import torch.nn.functional as F from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D from ltr.models.layers.blocks import conv_block import math class FilterPool(nn.Module): """Pool the target region in a feature map. args: filter_size: Size of the filter. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region.""" def __init__(self, filter_size=1, feature_stride=16, pool_square=False): super().__init__() self.prroi_pool = PrRoIPool2D(filter_size, filter_size, 1/feature_stride) self.pool_square = pool_square def forward(self, feat, bb): """Pool the regions in bb. args: feat: Input feature maps. Dims (num_samples, feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (num_samples, 4). returns: pooled_feat: Pooled features. Dims (num_samples, feat_dim, wH, wW).""" # Add batch_index to rois bb = bb.reshape(-1,4) num_images_total = bb.shape[0] batch_index = torch.arange(num_images_total, dtype=torch.float32).reshape(-1, 1).to(bb.device) # input bb is in format xywh, convert it to x0y0x1y1 format pool_bb = bb.clone() if self.pool_square: bb_sz = pool_bb[:, 2:4].prod(dim=1, keepdim=True).sqrt() pool_bb[:, :2] += pool_bb[:, 2:]/2 - bb_sz/2 pool_bb[:, 2:] = bb_sz pool_bb[:, 2:4] = pool_bb[:, 0:2] + pool_bb[:, 2:4] roi1 = torch.cat((batch_index, pool_bb), dim=1) return self.prroi_pool(feat, roi1) class FilterInitializer(nn.Module): """Initializes a target classification filter by applying a number of conv layers before and after pooling the target region. args: filter_size: Size of the filter. feature_dim: Input feature dimentionality. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region. filter_norm: Normalize the output filter with its size in the end. num_filter_pre_convs: Conv layers before pooling. num_filter_post_convs: Conv layers after pooling.""" def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True, num_filter_pre_convs=1, num_filter_post_convs=0): super().__init__() self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square) self.filter_norm = filter_norm # Make pre conv pre_conv_layers = [] for i in range(num_filter_pre_convs): pre_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=3, padding=1)) self.filter_pre_layers = nn.Sequential(*pre_conv_layers) if pre_conv_layers else None # Make post conv post_conv_layers = [] for i in range(num_filter_post_convs): post_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=1, padding=0)) post_conv_layers.append(nn.Conv2d(feature_dim, feature_dim, kernel_size=1, padding=0)) self.filter_post_layers = nn.Sequential(*post_conv_layers) # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_images = bb.shape[0] if bb.dim() == 3 else 1 if self.filter_pre_layers is not None: feat = self.filter_pre_layers(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1])) feat_post = self.filter_pool(feat, bb) weights = self.filter_post_layers(feat_post) if num_images > 1: weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0) if self.filter_norm: weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3]) return weights class FilterInitializerLinear(nn.Module): """Initializes a target classification filter by applying a linear conv layer and then pooling the target region. args: filter_size: Size of the filter. feature_dim: Input feature dimentionality. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region. filter_norm: Normalize the output filter with its size in the end. conv_ksz: Kernel size of the conv layer before pooling.""" def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True, conv_ksz=3, init_weights='default'): super().__init__() self.filter_conv = nn.Conv2d(feature_dim, feature_dim, kernel_size=conv_ksz, padding=conv_ksz // 2) self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square) self.filter_norm = filter_norm # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d): if init_weights == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif init_weights == 'zero': m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_images = feat.shape[0] feat = self.filter_conv(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1])) weights = self.filter_pool(feat, bb) # If multiple input images, compute the initial filter as the average filter. if num_images > 1: weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0) if self.filter_norm: weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3]) return weights class FilterInitializerZero(nn.Module): """Initializes a target classification filter with zeros. args: filter_size: Size of the filter. feature_dim: Input feature dimentionality.""" def __init__(self, filter_size=1, feature_dim=256): super().__init__() self.filter_size = (feature_dim, filter_size, filter_size) def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_sequences = feat.shape[1] if feat.dim() == 5 else 1 return feat.new_zeros(num_sequences, self.filter_size[0], self.filter_size[1], self.filter_size[2]) class FilterInitializerSiamese(nn.Module): """Initializes a target classification filter by only pooling the target region (similar to Siamese trackers). args: filter_size: Size of the filter. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region. filter_norm: Normalize the output filter with its size in the end.""" def __init__(self, filter_size=1, feature_stride=16, pool_square=False, filter_norm=True): super().__init__() self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square) self.filter_norm = filter_norm # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_images = feat.shape[0] feat = feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]) weights = self.filter_pool(feat, bb) if num_images > 1: weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0) if self.filter_norm: weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3]) return weights ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/target_classifier/linear_filter.py ================================================ import torch.nn as nn import ltr.models.layers.filter as filter_layer import math class LinearFilter(nn.Module): """Target classification filter module. args: filter_size: Size of filter (int). filter_initialize: Filter initializer module. filter_optimizer: Filter optimizer module. feature_extractor: Feature extractor module applied to the input backbone features.""" def __init__(self, filter_size, filter_initializer, filter_optimizer=None, feature_extractor=None): super().__init__() self.filter_size = filter_size # Modules self.filter_initializer = filter_initializer self.filter_optimizer = filter_optimizer self.feature_extractor = feature_extractor # Init weights for m in self.feature_extractor.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, train_feat, test_feat, train_bb, *args, **kwargs): """Learns a target classification filter based on the train samples and return the resulting classification scores on the test samples. The forward function is ONLY used for training. Call the individual functions during tracking. args: train_feat: Backbone features for the train samples (4 or 5 dims). test_feat: Backbone features for the test samples (4 or 5 dims). trian_bb: Target boxes (x,y,w,h) for the train samples in image coordinates. Dims (images, sequences, 4). *args, **kwargs: These are passed to the optimizer module. returns: test_scores: Classification scores on the test samples.""" assert train_bb.dim() == 3 num_sequences = train_bb.shape[1] if train_feat.dim() == 5: train_feat = train_feat.reshape(-1, *train_feat.shape[-3:]) if test_feat.dim() == 5: test_feat = test_feat.reshape(-1, *test_feat.shape[-3:]) # Extract features train_feat = self.extract_classification_feat(train_feat, num_sequences) test_feat = self.extract_classification_feat(test_feat, num_sequences) # Train filter filter, filter_iter, losses = self.get_filter(train_feat, train_bb, *args, **kwargs) # Classify samples using all return filters test_scores = [self.classify(f, test_feat) for f in filter_iter] return test_scores def extract_classification_feat(self, feat, num_sequences=None): """Extract classification features based on the input backbone features.""" if self.feature_extractor is None: return feat if num_sequences is None: return self.feature_extractor(feat) output = self.feature_extractor(feat) return output.reshape(-1, num_sequences, *output.shape[-3:]) def classify(self, weights, feat): """Run classifier (filter) on the features (feat).""" scores = filter_layer.apply_filter(feat, weights) return scores def get_filter(self, feat, bb, *args, **kwargs): """Outputs the learned filter based on the input features (feat) and target boxes (bb) by running the filter initializer and optimizer. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). *args, **kwargs: These are passed to the optimizer module. returns: weights: The final oprimized weights. Dims (sequences, feat_dim, wH, wW). weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" weights = self.filter_initializer(feat, bb) if self.filter_optimizer is not None: weights, weights_iter, losses = self.filter_optimizer(weights, feat=feat, bb=bb, *args, **kwargs) else: weights_iter = [weights] losses = None return weights, weights_iter, losses def train_classifier(self, backbone_feat, bb): num_sequences = bb.shape[1] if backbone_feat.dim() == 5: backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:]) # Extract features train_feat = self.extract_classification_feat(backbone_feat, num_sequences) # Get filters from each iteration final_filter, _, train_losses = self.get_filter(train_feat, bb) return final_filter, train_losses def track_frame(self, filter_weights, backbone_feat): if backbone_feat.dim() == 5: num_sequences = backbone_feat.shape[1] backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:]) else: num_sequences = None test_feat = self.extract_classification_feat(backbone_feat, num_sequences) scores = filter_layer.apply_filter(test_feat, filter_weights) return scores ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/target_classifier/optimizer.py ================================================ import torch.nn as nn import torch import torch.nn.functional as F import ltr.models.layers.filter as filter_layer import ltr.models.layers.activation as activation from ltr.models.layers.distance import DistanceMap import math class DiMPSteepestDescentGN(nn.Module): """Optimizer module for DiMP. It unrolls the steepest descent with Gauss-Newton iterations to optimize the target filter. Moreover it learns parameters in the loss itself, as described in the DiMP paper. args: num_iter: Number of default optimization iterations. feat_stride: The stride of the input feature. init_step_length: Initial scaling of the step length (which is then learned). init_filter_reg: Initial filter regularization weight (which is then learned). init_gauss_sigma: The standard deviation to use for the initialization of the label function. num_dist_bins: Number of distance bins used for learning the loss label, mask and weight. bin_displacement: The displacement of the bins (level of discritization). mask_init_factor: Parameter controlling the initialization of the target mask. score_act: Type of score activation (target mask computation) to use. The default 'relu' is what is described in the paper. act_param: Parameter for the score_act. min_filter_reg: Enforce a minimum value on the regularization (helps stability sometimes). mask_act: What activation to do on the output of the mask computation ('sigmoid' or 'linear'). detach_length: Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'. alpha_eps: Term in the denominator of the steepest descent that stabalizes learning. """ def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, score_act='relu', act_param=None, min_filter_reg=1e-3, mask_act='sigmoid', detach_length=float('Inf'), alpha_eps=0): super().__init__() self.num_iter = num_iter self.feat_stride = feat_stride self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1)) self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.distance_map = DistanceMap(num_dist_bins, bin_displacement) self.min_filter_reg = min_filter_reg self.detach_length = detach_length self.alpha_eps = alpha_eps # Distance coordinates d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement if init_gauss_sigma == 0: init_gauss = torch.zeros_like(d) init_gauss[0,0,0,0] = 1 else: init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2) # Module that predicts the target label function (y in the paper) self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.label_map_predictor.weight.data = init_gauss - init_gauss.min() # Module that predicts the target mask (m in the paper) mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)] if mask_act == 'sigmoid': mask_layers.append(nn.Sigmoid()) init_bias = 0.0 elif mask_act == 'linear': init_bias = 0.5 else: raise ValueError('Unknown activation') self.target_mask_predictor = nn.Sequential(*mask_layers) self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias # Module that predicts the residual weights (v in the paper) self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.spatial_weight_predictor.weight.data.fill_(1.0) # The score actvation and its derivative if score_act == 'bentpar': self.score_activation = activation.BentIdentPar(act_param) self.score_activation_deriv = activation.BentIdentParDeriv(act_param) elif score_act == 'relu': self.score_activation = activation.LeakyReluPar() self.score_activation_deriv = activation.LeakyReluParDeriv() else: raise ValueError('Unknown score activation') def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True): """Runs the optimizer module. Note that [] denotes an optional dimension. args: weights: Initial weights. Dims (sequences, feat_dim, wH, wW). feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). sample_weight: Optional weight for each sample. Dims: (images_in_sequence, [sequences]). num_iter: Number of iterations to run. compute_losses: Whether to compute the (train) loss in each iteration. returns: weights: The final oprimized weights. weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" # Sizes num_iter = self.num_iter if num_iter is None else num_iter num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (weights.shape[-2], weights.shape[-1]) output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2) # Get learnable scalars step_length_factor = torch.exp(self.log_step_length) reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2) # Compute distance map dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0 center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,)) - dmap_offset dist_map = self.distance_map(center, output_sz) # Compute label map masks and weight label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:]) target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:]) spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:]) # Get total sample weights if sample_weight is None: sample_weight = math.sqrt(1.0 / num_images) * spatial_weight elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1) * spatial_weight backprop_through_learning = (self.detach_length > 0) weight_iterates = [weights] losses = [] for i in range(num_iter): if not backprop_through_learning or (i > 0 and i % self.detach_length == 0): weights = weights.detach() # Compute residuals scores = filter_layer.apply_filter(feat, weights) scores_act = self.score_activation(scores, target_mask) score_mask = self.score_activation_deriv(scores, target_mask) residuals = sample_weight * (scores_act - label_map) if compute_losses: losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences) # Compute gradient residuals_mapped = score_mask * (sample_weight * residuals) weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \ reg_weight * weights # Map the gradient with the Jacobian scores_grad = filter_layer.apply_filter(feat, weights_grad) scores_grad = sample_weight * (score_mask * scores_grad) # Compute optimal step length alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3)) alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8) alpha = alpha_num / alpha_den # Update filter weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad # Add the weight iterate weight_iterates.append(weights) if compute_losses: scores = filter_layer.apply_filter(feat, weights) scores = self.score_activation(scores, target_mask) losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences) return weights, weight_iterates, losses class DiMPL2SteepestDescentGN(nn.Module): """A simpler optimizer module that uses L2 loss. args: num_iter: Number of default optimization iterations. feat_stride: The stride of the input feature. init_step_length: Initial scaling of the step length (which is then learned). gauss_sigma: The standard deviation of the label function. hinge_threshold: Threshold for the hinge-based loss (see DiMP paper). init_filter_reg: Initial filter regularization weight (which is then learned). min_filter_reg: Enforce a minimum value on the regularization (helps stability sometimes). detach_length: Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'. alpha_eps: Term in the denominator of the steepest descent that stabalizes learning. """ def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, gauss_sigma=1.0, hinge_threshold=-999, init_filter_reg=1e-2, min_filter_reg=1e-3, detach_length=float('Inf'), alpha_eps=0.0): super().__init__() self.num_iter = num_iter self.feat_stride = feat_stride self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1)) self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.min_filter_reg = min_filter_reg self.detach_length = detach_length self.hinge_threshold = hinge_threshold self.gauss_sigma = gauss_sigma self.alpha_eps = alpha_eps def get_label(self, center, output_sz): center = center.reshape(center.shape[0], -1, center.shape[-1]) k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device) k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device) g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2) g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2) gauss = g0 * g1 return gauss def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True): """Runs the optimizer module. Note that [] denotes an optional dimension. args: weights: Initial weights. Dims (sequences, feat_dim, wH, wW). feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). sample_weight: Optional weight for each sample. Dims: (images_in_sequence, [sequences]). num_iter: Number of iterations to run. compute_losses: Whether to compute the (train) loss in each iteration. returns: weights: The final oprimized weights. weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" # Sizes num_iter = self.num_iter if num_iter is None else num_iter num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (weights.shape[-2], weights.shape[-1]) output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2) # Get learnable scalars step_length_factor = torch.exp(self.log_step_length) reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2) # Compute distance map dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0 center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - dmap_offset label_map = self.get_label(center, output_sz) target_mask = (label_map > self.hinge_threshold).float() label_map *= target_mask # Get total sample weights if sample_weight is None: sample_weight = math.sqrt(1.0 / num_images) elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1) weight_iterates = [weights] losses = [] for i in range(num_iter): if i > 0 and i % self.detach_length == 0: weights = weights.detach() # Compute residuals scores = filter_layer.apply_filter(feat, weights) scores_act = target_mask * scores + (1.0 - target_mask) * F.relu(scores) score_mask = target_mask + (1.0 - target_mask) * (scores.detach() > 0).float() residuals = sample_weight * (scores_act - label_map) if compute_losses: losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences) # Compute gradient residuals_mapped = score_mask * (sample_weight * residuals) weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \ reg_weight * weights # Map the gradient with the Jacobian scores_grad = filter_layer.apply_filter(feat, weights_grad) scores_grad = sample_weight * (score_mask * scores_grad) # Compute optimal step length alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3)) alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8) alpha = alpha_num / alpha_den # Update filter weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad # Add the weight iterate weight_iterates.append(weights) if compute_losses: scores = filter_layer.apply_filter(feat, weights) scores = target_mask * scores + (1.0 - target_mask) * F.relu(scores) losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences) return weights, weight_iterates, losses class PrDiMPSteepestDescentNewton(nn.Module): """Optimizer module for PrDiMP. It unrolls the steepest descent with Newton iterations to optimize the target filter. See the PrDiMP paper. args: num_iter: Number of default optimization iterations. feat_stride: The stride of the input feature. init_step_length: Initial scaling of the step length (which is then learned). init_filter_reg: Initial filter regularization weight (which is then learned). gauss_sigma: The standard deviation to use for the label density function. min_filter_reg: Enforce a minimum value on the regularization (helps stability sometimes). detach_length: Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'. alpha_eps: Term in the denominator of the steepest descent that stabalizes learning. init_uni_weight: Weight of uniform label distribution. normalize_label: Wheter to normalize the label distribution. label_shrink: How much to shrink to label distribution. softmax_reg: Regularization in the denominator of the SoftMax. label_threshold: Threshold probabilities smaller than this. """ def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, init_filter_reg=1e-2, gauss_sigma=1.0, min_filter_reg=1e-3, detach_length=float('Inf'), alpha_eps=0.0, init_uni_weight=None, normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0.0): super().__init__() self.num_iter = num_iter self.feat_stride = feat_stride self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1)) self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.gauss_sigma = gauss_sigma self.min_filter_reg = min_filter_reg self.detach_length = detach_length self.alpha_eps = alpha_eps self.uni_weight = 0 if init_uni_weight is None else init_uni_weight self.normalize_label = normalize_label self.label_shrink = label_shrink self.softmax_reg = softmax_reg self.label_threshold = label_threshold def get_label_density(self, center, output_sz): center = center.reshape(center.shape[0], -1, center.shape[-1]) k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device) k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device) dist0 = (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2 dist1 = (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2 if self.gauss_sigma == 0: dist0_view = dist0.reshape(-1, dist0.shape[-2]) dist1_view = dist1.reshape(-1, dist1.shape[-1]) one_hot0 = torch.zeros_like(dist0_view) one_hot1 = torch.zeros_like(dist1_view) one_hot0[torch.arange(one_hot0.shape[0]), dist0_view.argmin(dim=-1)] = 1.0 one_hot1[torch.arange(one_hot1.shape[0]), dist1_view.argmin(dim=-1)] = 1.0 gauss = one_hot0.reshape(dist0.shape) * one_hot1.reshape(dist1.shape) else: g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist0) g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist1) gauss = (g0 / (2*math.pi*self.gauss_sigma**2)) * g1 gauss = gauss * (gauss > self.label_threshold).float() if self.normalize_label: gauss /= (gauss.sum(dim=(-2,-1), keepdim=True) + 1e-8) label_dens = (1.0 - self.label_shrink)*((1.0 - self.uni_weight) * gauss + self.uni_weight / (output_sz[0]*output_sz[1])) return label_dens def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True): """Runs the optimizer module. Note that [] denotes an optional dimension. args: weights: Initial weights. Dims (sequences, feat_dim, wH, wW). feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). sample_weight: Optional weight for each sample. Dims: (images_in_sequence, [sequences]). num_iter: Number of iterations to run. compute_losses: Whether to compute the (train) loss in each iteration. returns: weights: The final oprimized weights. weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" # Sizes num_iter = self.num_iter if num_iter is None else num_iter num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (weights.shape[-2], weights.shape[-1]) output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2) # Get learnable scalars step_length_factor = torch.exp(self.log_step_length) reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2) # Compute label density offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0 center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - offset label_density = self.get_label_density(center, output_sz) # Get total sample weights if sample_weight is None: sample_weight = torch.Tensor([1.0 / num_images]).to(feat.device) elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.reshape(num_images, num_sequences, 1, 1) exp_reg = 0 if self.softmax_reg is None else math.exp(self.softmax_reg) def _compute_loss(scores, weights): return torch.sum(sample_weight.reshape(sample_weight.shape[0], -1) * (torch.log(scores.exp().sum(dim=(-2, -1)) + exp_reg) - (label_density * scores).sum(dim=(-2, -1)))) / num_sequences +\ reg_weight * (weights ** 2).sum() / num_sequences weight_iterates = [weights] losses = [] for i in range(num_iter): if i > 0 and i % self.detach_length == 0: weights = weights.detach() # Compute "residuals" scores = filter_layer.apply_filter(feat, weights) scores_softmax = activation.softmax_reg(scores.reshape(num_images, num_sequences, -1), dim=2, reg=self.softmax_reg).reshape(scores.shape) res = sample_weight*(scores_softmax - label_density) if compute_losses: losses.append(_compute_loss(scores, weights)) # Compute gradient weights_grad = filter_layer.apply_feat_transpose(feat, res, filter_sz, training=self.training) + \ reg_weight * weights # Map the gradient with the Hessian scores_grad = filter_layer.apply_filter(feat, weights_grad) sm_scores_grad = scores_softmax * scores_grad hes_scores_grad = sm_scores_grad - scores_softmax * torch.sum(sm_scores_grad, dim=(-2,-1), keepdim=True) grad_hes_grad = (scores_grad * hes_scores_grad).reshape(num_images, num_sequences, -1).sum(dim=2).clamp(min=0) grad_hes_grad = (sample_weight.reshape(sample_weight.shape[0], -1) * grad_hes_grad).sum(dim=0) # Compute optimal step length alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3)) alpha_den = (grad_hes_grad + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8) alpha = alpha_num / alpha_den # Update filter weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad # Add the weight iterate weight_iterates.append(weights) if compute_losses: scores = filter_layer.apply_filter(feat, weights) losses.append(_compute_loss(scores, weights)) return weights, weight_iterates, losses ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/target_classifier/residual_modules.py ================================================ import torch import torch.nn as nn import math import ltr.models.layers.filter as filter_layer import ltr.models.layers.activation as activation from ltr.models.layers.distance import DistanceMap from pytracking import TensorList class LinearFilterLearnGen(nn.Module): def __init__(self, feat_stride=16, init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, score_act='bentpar', act_param=None, mask_act='sigmoid'): super().__init__() self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.feat_stride = feat_stride self.distance_map = DistanceMap(num_dist_bins, bin_displacement) # Distance coordinates d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement if init_gauss_sigma == 0: init_gauss = torch.zeros_like(d) init_gauss[0,0,0,0] = 1 else: init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2) self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.label_map_predictor.weight.data = init_gauss - init_gauss.min() mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)] if mask_act == 'sigmoid': mask_layers.append(nn.Sigmoid()) init_bias = 0.0 elif mask_act == 'linear': init_bias = 0.5 else: raise ValueError('Unknown activation') self.target_mask_predictor = nn.Sequential(*mask_layers) self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.spatial_weight_predictor.weight.data.fill_(1.0) if score_act == 'bentpar': self.score_activation = activation.BentIdentPar(act_param) elif score_act == 'relu': self.score_activation = activation.LeakyReluPar() else: raise ValueError('Unknown activation') def forward(self, meta_parameter: TensorList, feat, bb, sample_weight=None, is_distractor=None): filter = meta_parameter[0] num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (filter.shape[-2], filter.shape[-1]) # Compute scores scores = filter_layer.apply_filter(feat, filter) # Compute distance map center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,)) if is_distractor is not None: center[is_distractor.reshape(-1), :] = 99999 dist_map = self.distance_map(center, scores.shape[-2:]) # Compute label map masks and weight label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1]) target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1]) spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1]) if sample_weight is None: sample_weight = math.sqrt(1.0 / num_images) * spatial_weight elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.sqrt().reshape(-1, 1, 1, 1) * spatial_weight # Compute data residual scores_act = self.score_activation(scores, target_mask) data_residual = sample_weight * (scores_act - label_map) # Compute regularization residual. Put batch in second dimension reg_residual = self.filter_reg*filter.reshape(1, num_sequences, -1) return TensorList([data_residual, reg_residual]) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/tracking/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/models/tracking/dimpnet.py ================================================ import math import torch import torch.nn as nn from collections import OrderedDict from ltr.models.meta import steepestdescent import ltr.models.target_classifier.linear_filter as target_clf import ltr.models.target_classifier.features as clf_features import ltr.models.target_classifier.initializer as clf_initializer import ltr.models.target_classifier.optimizer as clf_optimizer import ltr.models.bbreg as bbmodels import ltr.models.backbone as backbones from ltr import model_constructor class DiMPnet(nn.Module): """The DiMP network. args: feature_extractor: Backbone feature extractor network. Must return a dict of feature maps classifier: Target classification module. bb_regressor: Bounding box regression module. classification_layer: Name of the backbone feature layer to use for classification. bb_regressor_layer: Names of the backbone layers to use for bounding box regression.""" def __init__(self, feature_extractor, classifier, bb_regressor, classification_layer, bb_regressor_layer): super().__init__() self.feature_extractor = feature_extractor self.classifier = classifier self.bb_regressor = bb_regressor self.classification_layer = [classification_layer] if isinstance(classification_layer, str) else classification_layer self.bb_regressor_layer = bb_regressor_layer self.output_layers = sorted(list(set(self.classification_layer + self.bb_regressor_layer))) def forward(self, train_imgs, test_imgs, train_bb, test_proposals, *args, **kwargs): """Runs the DiMP network the way it is applied during training. The forward function is ONLY used for training. Call the individual functions during tracking. args: train_imgs: Train image samples (images, sequences, 3, H, W). test_imgs: Test image samples (images, sequences, 3, H, W). trian_bb: Target boxes (x,y,w,h) for the train images. Dims (images, sequences, 4). test_proposals: Proposal boxes to use for the IoUNet (bb_regressor) module. *args, **kwargs: These are passed to the classifier module. returns: test_scores: Classification scores on the test samples. iou_pred: Predicted IoU scores for the test_proposals.""" assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs' # Extract backbone features train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:])) test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:])) # Classification features train_feat_clf = self.get_backbone_clf_feat(train_feat) test_feat_clf = self.get_backbone_clf_feat(test_feat) # Run classifier module target_scores = self.classifier(train_feat_clf, test_feat_clf, train_bb, *args, **kwargs) # Get bb_regressor features train_feat_iou = self.get_backbone_bbreg_feat(train_feat) test_feat_iou = self.get_backbone_bbreg_feat(test_feat) # Run the IoUNet module iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb, test_proposals) return target_scores, iou_pred def get_backbone_clf_feat(self, backbone_feat): feat = OrderedDict({l: backbone_feat[l] for l in self.classification_layer}) if len(self.classification_layer) == 1: return feat[self.classification_layer[0]] return feat def get_backbone_bbreg_feat(self, backbone_feat): return [backbone_feat[l] for l in self.bb_regressor_layer] def extract_classification_feat(self, backbone_feat): return self.classifier.extract_classification_feat(self.get_backbone_clf_feat(backbone_feat)) def extract_backbone_features(self, im, layers=None): if layers is None: layers = self.output_layers return self.feature_extractor(im, layers) def extract_features(self, im, layers=None): if layers is None: layers = self.bb_regressor_layer + ['classification'] if 'classification' not in layers: return self.feature_extractor(im, layers) backbone_layers = sorted(list(set([l for l in layers + self.classification_layer if l != 'classification']))) all_feat = self.feature_extractor(im, backbone_layers) all_feat['classification'] = self.extract_classification_feat(all_feat) return OrderedDict({l: all_feat[l] for l in layers}) @model_constructor def dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=256, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), score_act='relu', act_param=None, target_mask_act='sigmoid', detach_length=float('Inf'), frozen_backbone_layers=()): # Backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim) # Optimizer for the DiMP classifier optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma, num_dist_bins=num_dist_bins, bin_displacement=bin_displacement, mask_init_factor=mask_init_factor, score_act=score_act, act_param=act_param, mask_act=target_mask_act, detach_length=detach_length) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def dimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=512, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), score_act='relu', act_param=None, target_mask_act='sigmoid', detach_length=float('Inf'), frozen_backbone_layers=()): # Backbone backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features if classification_layer == 'layer3': feature_dim = 256 elif classification_layer == 'layer4': feature_dim = 512 else: raise Exception clf_feature_extractor = clf_features.residual_bottleneck(feature_dim=feature_dim, num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim) # Optimizer for the DiMP classifier optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma, num_dist_bins=num_dist_bins, bin_displacement=bin_displacement, mask_init_factor=mask_init_factor, score_act=score_act, act_param=act_param, mask_act=target_mask_act, detach_length=detach_length) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def L2dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=256, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), detach_length=float('Inf'), hinge_threshold=-999, gauss_sigma=1.0, alpha_eps=0): # Backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim) # Optimizer for the DiMP classifier optimizer = clf_optimizer.DiMPL2SteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, hinge_threshold=hinge_threshold, init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma, detach_length=detach_length, alpha_eps=alpha_eps) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def klcedimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=256, gauss_sigma=1.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True, init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, init_pool_square=False, frozen_backbone_layers=()): if not train_feature_extractor: frozen_backbone_layers = 'all' # Backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim, final_relu=final_relu) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim, init_weights=init_initializer, pool_square=init_pool_square) # Optimizer for the DiMP classifier optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma, detach_length=detach_length, alpha_eps=alpha_eps, init_uni_weight=init_uni_weight, min_filter_reg=optim_min_reg, normalize_label=normalize_label, label_shrink=label_shrink, softmax_reg=softmax_reg, label_threshold=label_threshold) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def klcedimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=512, gauss_sigma=1.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True, init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, frozen_backbone_layers=()): if not train_feature_extractor: frozen_backbone_layers = 'all' # Backbone backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_bottleneck(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim, final_relu=final_relu) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim, init_weights=init_initializer) # Optimizer for the DiMP classifier optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma, detach_length=detach_length, alpha_eps=alpha_eps, init_uni_weight=init_uni_weight, min_filter_reg=optim_min_reg, normalize_label=normalize_label, label_shrink=label_shrink, softmax_reg=softmax_reg, label_threshold=label_threshold) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net ================================================ FILE: artrackv2_mindspore/external/AR/ltr/run_training.py ================================================ import os import sys import argparse import importlib import multiprocessing import cv2 as cv import torch.backends.cudnn env_path = os.path.join(os.path.dirname(__file__), '..') if env_path not in sys.path: sys.path.append(env_path) import ltr.admin.settings as ws_settings def run_training(train_module, train_name, cudnn_benchmark=True): """Run a train scripts in train_settings. args: train_module: Name of module in the "train_settings/" folder. train_name: Name of the train settings file. cudnn_benchmark: Use cudnn benchmark or not (default is True). """ # This is needed to avoid strange crashes related to opencv cv.setNumThreads(0) torch.backends.cudnn.benchmark = cudnn_benchmark print('Training: {} {}'.format(train_module, train_name)) settings = ws_settings.Settings() settings.module_name = train_module settings.script_name = train_name settings.project_path = 'ltr/{}/{}'.format(train_module, train_name) expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name)) expr_func = getattr(expr_module, 'run') expr_func(settings) def main(): parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.') parser.add_argument('train_module', type=str, help='Name of module in the "train_settings/" folder.') parser.add_argument('train_name', type=str, help='Name of the train settings file.') parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).') args = parser.parse_args() run_training(args.train_module, args.train_name, args.cudnn_benchmark) if __name__ == '__main__': multiprocessing.set_start_method('spawn', force=True) main() ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = nn.MSELoss() actor = actors.AtomActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom_gmm_sampl.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM IoUNet using the baseline ATOM* settings in [https://arxiv.org/abs/1909.12297].' \ 'Unlike standard ATOM, it employs the GMM-based proposal sampling and minor parameter changes.' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'proposal_method': 'gmm', 'boxes_per_frame': 128, 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = nn.MSELoss() actor = actors.AtomActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom_paper.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM IoUNet with default settings according to the paper.' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets trackingnet_val = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11,12))) # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = nn.MSELoss() actor = actors.AtomActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom_prob_ml.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.bbreg as bbreg_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM using the probabilistic maximum likelihood trained regression model for bounding-box' \ 'regression presented in [https://arxiv.org/abs/1909.12297].' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0, 0), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)], 'add_mean_box': True} data_processing_train = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = klreg_losses.MLRegression() actor = bbreg_actors.AtomBBKLActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/dimp/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/dimp/dimp18.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for DiMP with ResNet18 as backbone.' settings.batch_size = 26 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.dimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, final_conv=True, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)} loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400} actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.parameters()}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/dimp/dimp50.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for DiMP with ResNet50 as backbone.' settings.batch_size = 10 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/clf_ce', 'ClfTrain/test_loss'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)} loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400} actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5}, {'params': actor.net.bb_regressor.parameters()}, {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/dimp/prdimp18.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.tracking as tracking_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for PrDiMP with ResNet18 as backbone.' settings.batch_size = 26 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True} data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.klcedimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, final_conv=True, optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05, gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()} loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0} actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.parameters()}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/dimp/prdimp50.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.tracking as tracking_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for PrDiMP with ResNet50 as backbone.' settings.batch_size = 10 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True} data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.klcedimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05, gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()} loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0} actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/train_settings/dimp/super_dimp.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.tracking as tracking_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'SuperDiMP: Combines the DiMP classifier with the PrDiMP bounding box regressor and better' \ 'training settings (larger batch size, inside_major cropping, and flipping augmentation.' \ 'Gives results significantly better than both DiMP-50 and PrDiMP-50.' settings.batch_size = 20 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 6.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 22 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 5.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, crop_type='inside_major', max_scale_change=1.5, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, crop_type='inside_major', max_scale_change=1.5, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=40000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=10000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu', frozen_backbone_layers=['conv1', 'bn1', 'layer1', 'layer2']) # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'bb_ce': klreg_losses.KLRegression(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)} loss_weight = {'bb_ce': 0.01, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400} actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.layer3.parameters(), 'lr': 2e-5}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/external/AR/ltr/trainers/__init__.py ================================================ from .base_trainer import BaseTrainer from .ltr_trainer import LTRTrainer ================================================ FILE: artrackv2_mindspore/external/AR/ltr/trainers/base_trainer.py ================================================ import os import glob import torch import traceback from ltr.admin import loading, multigpu class BaseTrainer: """Base trainer class. Contains functions for training and saving/loading chackpoints. Trainer classes should inherit from this one and overload the train_epoch function.""" def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ self.actor = actor self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.loaders = loaders self.update_settings(settings) self.epoch = 0 self.stats = {} self.device = getattr(settings, 'device', None) if self.device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() and settings.use_gpu else "cpu") self.actor.to(self.device) def update_settings(self, settings=None): """Updates the trainer settings. Must be called to update internal settings.""" if settings is not None: self.settings = settings if self.settings.env.workspace_dir is not None: self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir) self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints') if not os.path.exists(self._checkpoint_dir): os.makedirs(self._checkpoint_dir) else: self._checkpoint_dir = None def train(self, max_epochs, load_latest=False, fail_safe=True): """Do training for the given number of epochs. args: max_epochs - Max number of training epochs, load_latest - Bool indicating whether to resume from latest epoch. fail_safe - Bool indicating whether the training to automatically restart in case of any crashes. """ epoch = -1 num_tries = 10 for i in range(num_tries): try: if load_latest: self.load_checkpoint() for epoch in range(self.epoch+1, max_epochs+1): self.epoch = epoch self.train_epoch() if self.lr_scheduler is not None: self.lr_scheduler.step() if self._checkpoint_dir: self.save_checkpoint() except: print('Training crashed at epoch {}'.format(epoch)) if fail_safe: self.epoch -= 1 load_latest = True print('Traceback for the error!') print(traceback.format_exc()) print('Restarting training from last epoch ...') else: raise print('Finished training!') def train_epoch(self): raise NotImplementedError def save_checkpoint(self): """Saves a checkpoint of the network and other variables.""" net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ state = { 'epoch': self.epoch, 'actor_type': actor_type, 'net_type': net_type, 'net': net.state_dict(), 'net_info': getattr(net, 'info', None), 'constructor': getattr(net, 'constructor', None), 'optimizer': self.optimizer.state_dict(), 'stats': self.stats, 'settings': self.settings } directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path) if not os.path.exists(directory): os.makedirs(directory) # First save as a tmp file tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch) torch.save(state, tmp_file_path) file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch) # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure os.rename(tmp_file_path, file_path) def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False): """Loads a network checkpoint file. Can be called in three different ways: load_checkpoint(): Loads the latest epoch from the workspace. Use this to continue training. load_checkpoint(epoch_num): Loads the network at the given epoch number (int). load_checkpoint(path_to_checkpoint): Loads the file from the given absolute path (str). """ net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ if checkpoint is None: # Load most recent checkpoint checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: print('No matching checkpoint file found') return elif isinstance(checkpoint, int): # Checkpoint is the epoch number checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type, checkpoint) elif isinstance(checkpoint, str): # checkpoint is the path if os.path.isdir(checkpoint): checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No checkpoint found') else: checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network checkpoint_dict = loading.torch_load_legacy(checkpoint_path) assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.' if fields is None: fields = checkpoint_dict.keys() if ignore_fields is None: ignore_fields = ['settings'] # Never load the scheduler. It exists in older checkpoints. ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info']) # Load all fields for key in fields: if key in ignore_fields: continue if key == 'net': net.load_state_dict(checkpoint_dict[key]) elif key == 'optimizer': self.optimizer.load_state_dict(checkpoint_dict[key]) else: setattr(self, key, checkpoint_dict[key]) # Set the net info if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None: net.constructor = checkpoint_dict['constructor'] if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None: net.info = checkpoint_dict['net_info'] # Update the epoch in lr scheduler if 'epoch' in fields: self.lr_scheduler.last_epoch = self.epoch return True ================================================ FILE: artrackv2_mindspore/external/AR/ltr/trainers/ltr_trainer.py ================================================ import os from collections import OrderedDict from ltr.trainers import BaseTrainer from ltr.admin.stats import AverageMeter, StatValue from ltr.admin.tensorboard import TensorboardWriter import torch import time class LTRTrainer(BaseTrainer): def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ super().__init__(actor, loaders, optimizer, settings, lr_scheduler) self._set_default_settings() # Initialize statistics variables self.stats = OrderedDict({loader.name: None for loader in self.loaders}) # Initialize tensorboard tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path) self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders]) self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True) def _set_default_settings(self): # Dict of all default values default = {'print_interval': 10, 'print_stats': None, 'description': ''} for param, default_value in default.items(): if getattr(self.settings, param, None) is None: setattr(self.settings, param, default_value) def cycle_dataset(self, loader): """Do a cycle of training or validation.""" self.actor.train(loader.training) torch.set_grad_enabled(loader.training) self._init_timing() for i, data in enumerate(loader, 1): # get inputs if self.move_data_to_gpu: data = data.to(self.device) data['epoch'] = self.epoch data['settings'] = self.settings # forward pass loss, stats = self.actor(data) # backward pass and update weights if loader.training: self.optimizer.zero_grad() loss.backward() self.optimizer.step() # update statistics batch_size = data['train_images'].shape[loader.stack_dim] self._update_stats(stats, batch_size, loader) # print statistics self._print_stats(i, loader, batch_size) def train_epoch(self): """Do one epoch for each loader.""" for loader in self.loaders: if self.epoch % loader.epoch_interval == 0: self.cycle_dataset(loader) self._stats_new_epoch() self._write_tensorboard() def _init_timing(self): self.num_frames = 0 self.start_time = time.time() self.prev_time = self.start_time def _update_stats(self, new_stats: OrderedDict, batch_size, loader): # Initialize stats if not initialized yet if loader.name not in self.stats.keys() or self.stats[loader.name] is None: self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()}) for name, val in new_stats.items(): if name not in self.stats[loader.name].keys(): self.stats[loader.name][name] = AverageMeter() self.stats[loader.name][name].update(val, batch_size) def _print_stats(self, i, loader, batch_size): self.num_frames += batch_size current_time = time.time() batch_fps = batch_size / (current_time - self.prev_time) average_fps = self.num_frames / (current_time - self.start_time) self.prev_time = current_time if i % self.settings.print_interval == 0 or i == loader.__len__(): print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()) print_str += 'FPS: %.1f (%.1f) , ' % (average_fps, batch_fps) for name, val in self.stats[loader.name].items(): if (self.settings.print_stats is None or name in self.settings.print_stats) and hasattr(val, 'avg'): print_str += '%s: %.5f , ' % (name, val.avg) print(print_str[:-5]) def _stats_new_epoch(self): # Record learning rate for loader in self.loaders: if loader.training: lr_list = self.lr_scheduler.get_lr() for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for loader_stats in self.stats.values(): if loader_stats is None: continue for stat_value in loader_stats.values(): if hasattr(stat_value, 'new_epoch'): stat_value.new_epoch() def _write_tensorboard(self): if self.epoch == 1: self.tensorboard_writer.write_info(self.settings.module_name, self.settings.script_name, self.settings.description) self.tensorboard_writer.write_epoch(self.stats, self.epoch) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/ARcm_seg.py ================================================ import os import sys import torch import numpy as np import cv2 import torch.nn as nn from external.AR.pytracking.utils.loading import load_network from external.AR.ltr.data.processing_utils_SE import sample_target_SE, transform_image_to_crop_SE, map_mask_back env_path = os.path.join(os.path.dirname(__file__), '..') if env_path not in sys.path: sys.path.append(env_path) def mask_torch2numpy(Pmask): Pmask_arr = np.array(Pmask.squeeze().cpu()) # (H,W) (0,1) return Pmask_arr class ARcm_seg(object): def __init__(self, refine_net_dir, search_factor=2.0, input_sz=256): self.refine_network = self.get_network(refine_net_dir) self.search_factor = search_factor self.input_sz = input_sz self.mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3)) self.std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3)) def initialize(self, frame1, bbox1): ''' :param frame1: cv array (H,W,3) :param bbox1: ndarray (4,) :return: ''' '''Step1: get cropped patch(tensor)''' patch1, h_f, w_f = sample_target_SE(frame1, bbox1, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT) patch1_tensor = self.img_preprocess(patch1) '''Step2: get GT's cooridinate on the cropped patch(tensor)''' crop_sz = torch.Tensor((self.input_sz, self.input_sz)) bbox1_tensor = self.gt_preprocess(bbox1) # (4,) bbox1_crop_tensor = transform_image_to_crop_SE(bbox1_tensor, bbox1_tensor, h_f, w_f, crop_sz).cuda() '''Step3: forward prop (reference branch)''' with torch.no_grad(): self.refine_network.forward_ref(patch1_tensor, bbox1_crop_tensor) '''refine''' def get_mask(self, Cframe, Cbbox, dtm=None, vis=False): ''' :param Cframe: Current frame(cv2 array) :param Cbbox: Current bbox (ndarray) (x1,y1,w,h) :return: mask ''' '''Step1: get cropped patch(tensor)''' Cpatch, h_f, w_f = sample_target_SE(Cframe, Cbbox, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT) Cpatch_tensor = self.img_preprocess(Cpatch) '''Step2: forward prop (test branch)''' with torch.no_grad(): if dtm is not None: '''2020.4.26 support input dtm''' pred = self.refine_network.forward_test(Cpatch_tensor, dtm, mode='mask') else: pred = self.refine_network.forward_test(Cpatch_tensor,mode='mask') Pmask_arr = mask_torch2numpy(pred) mask_arr = map_mask_back(Cframe, Cbbox, self.search_factor, Pmask_arr, mode=cv2.BORDER_CONSTANT) if vis: return mask_arr, Cpatch, Pmask_arr else: return mask_arr def get_network(self,checkpoint_dir): network = load_network(checkpoint_dir) network.cuda() network.eval() return network def img_preprocess(self,img_arr): '''---> Pytorch tensor(RGB),Normal(-1 to 1,subtract mean, divide std) input img_arr (H,W,3) output (1,1,3,H,W) ''' norm_img = ((img_arr/255.0) - self.mean)/(self.std) img_f32 = norm_img.astype(np.float32) img_tensor = torch.from_numpy(img_f32).cuda() img_tensor = img_tensor.permute((2,0,1)) return img_tensor.unsqueeze(dim=0).unsqueeze(dim=0) def gt_preprocess(self,gt_arr): ''' :param gt: ndarray (4,) :return: torch tensor (4,) ''' return torch.from_numpy(gt_arr.astype(np.float32)) def add_frame_mask(frame, mask, threshold=0.5): mask_new = (mask>threshold)*255 #(H,W) frame_new = frame.copy().astype(np.float) frame_new[...,1] += 0.3*mask_new frame_new = frame_new.clip(0,255).astype(np.uint8) return frame_new def add_frame_bbox(frame, refined_box, color): x1, y1, w, h = refined_box.tolist() cv2.rectangle(frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2) return frame ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT/tracker_DiMP.m ================================================ % Set path to the python in the pytracking conda environment python_path = 'PATH_TO_CONDA_INSTALLATION/envs/pytracking/bin/python'; % Set path to pytracking pytracking_path = 'PATH_TO_VISIONML/pytracking'; % Set path to trax installation. Check % https://trax.readthedocs.io/en/latest/tutorial_compiling.html for % compilation information trax_path = 'PATH_TO_VOT_TOOLKIT/native/trax'; tracker_name = 'dimp'; % Name of the tracker to evaluate runfile_name = 'dimp18_vot'; % Name of the parameter file to use debug = 0; %% tracker_label = [tracker_name, '_', runfile_name]; % Generate python command tracker_command = sprintf(['%s -c "import sys; sys.path.append(''%s'');', ... 'sys.path.append(''%s/support/python'');', ... 'import run_vot;', ... 'run_vot.run_vot(''%s'', ''%s'', debug=%d)"'],... python_path, pytracking_path, trax_path, ... tracker_name, runfile_name, debug); tracker_interpreter = python_path; tracker_linkpath = {[trax_path, '/build'],... [trax_path, '/build/support/client'],... [trax_path, '/build/support/opencv']}; ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT/trackers.ini ================================================ [DiMP] # label = DiMP protocol = traxpython command = run_vot; run_vot.run_vot2020('dimp', 'dimp50') # Set the tracker name and the parameter name # Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths) paths = PATH_TO_PYTRACKING # Additional environment paths #env_PATH = ;${PATH} ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT/vot.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016, 2019 """ import sys import copy import collections try: import trax except ImportError: raise Exception('TraX support not found. Please add trax module to Python path.') Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format, channels=None): """ Constructor Args: region_format: Region format options """ assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON]) if channels is None: channels = ['color'] elif channels == 'rgbd': channels = ['color', 'depth'] elif channels == 'rgbt': channels = ['color', 'ir'] elif channels == 'ir': channels = ['ir'] else: raise Exception('Illegal configuration {}.'.format(channels)) self._trax = trax.Server([region_format], [trax.Image.PATH], channels) request = self._trax.wait() assert(request.type == 'initialize') if isinstance(request.region, trax.Polygon): self._region = Polygon([Point(x[0], x[1]) for x in request.region]) else: self._region = Rectangle(*request.region.bounds()) self._image = [str(x) for k, x in request.image.items()] if len(self._image) == 1: self._image = self._image[0] self._trax.status(request.region) def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = None): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, Rectangle) or isinstance(region, Polygon)) if isinstance(region, Polygon): tregion = trax.Polygon.create([(x.x, x.y) for x in region.points]) else: tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height) properties = {} if not confidence is None: properties['confidence'] = confidence self._trax.status(tregion, properties) def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if hasattr(self, "_image"): image = self._image del self._image return tuple(image) request = self._trax.wait() if request.type == 'frame': image = [str(x) for k, x in request.image.items()] if len(image) == 1: image = image[0] return tuple(image) else: return None def quit(self): if hasattr(self, '_trax'): self._trax.quit() def __del__(self): self.quit() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_065.py ================================================ from pytracking.VOT2020_super_only_mask_384_HP.dimp_alpha_seg_class import run_vot_exp import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' # run_vot_exp('dimp','dimp50_vot19','SEbcm',0.60,VIS=False) run_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=False) # run_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=True) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_seg_class.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import torch import vot import sys import time '''Refine module & Pytracking base trackers''' import os from pytracking.evaluation import Tracker from pytracking.ARcm_seg import ARcm_seg from pytracking.vot20_utils import * '''''' '''DiMP-alpha class''' class DIMP_ALPHA(object): def __init__(self, tracker_name='dimp', para_name='dimp50_vot19', refine_model_name='ARcm_coco_seg', threshold=0.15): self.THRES = threshold '''create tracker''' '''DIMP''' tracker_info = Tracker(tracker_name, para_name, None) params = tracker_info.get_parameters() params.visualization = False params.debug = False params.visdom_info = {'use_visdom': False, 'server': '127.0.0.1', 'port': 8097} self.dimp = tracker_info.tracker_class(params) '''Alpha-Refine''' project_path = os.path.join(os.path.dirname(__file__), '..', '..') refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/') refine_path = os.path.join(refine_root, refine_model_name) '''2020.4.25 input size: 384x384''' self.alpha = ARcm_seg(refine_path, input_sz=384) def initialize(self, img_RGB, mask): region = rect_from_mask(mask) self.H, self.W, _ = img_RGB.shape gt_bbox_np = np.array(region).astype(np.float32) '''Initialize dimp for specific video''' gt_bbox_torch = torch.from_numpy(gt_bbox_np) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = self.dimp.initialize(img_RGB, init_info) '''initilize refinement module for specific video''' self.alpha.initialize(img_RGB, np.array(gt_bbox_np)) def track(self, img_RGB): '''TRACK''' '''base tracker''' outputs = self.dimp.track(img_RGB) pred_bbox = outputs['target_bbox'] '''Step1: Post-Process''' x1, y1, w, h = pred_bbox # add boundary and min size limit x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (self.H, self.W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy(np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy(np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / self.dimp.base_target_sz.prod()) ##### update self.dimp.pos = new_pos.clone() self.dimp.target_sz = new_target_sz self.dimp.target_scale = new_scale bbox_new = [x1, y1, w, h] '''Step2: Mask report''' pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(bbox_new), vis=True) final_mask = (pred_mask > self.THRES).astype(np.uint8) search_region = search.astype(np.uint8) search_mask = (search_mask > self.THRES).astype(np.uint8) return bbox_new, final_mask, search_region, search_mask def run_vot_exp(tracker_name, para_name, refine_model_name, threshold, VIS=False): torch.set_num_threads(1) # torch.cuda.set_device(CUDA_ID) # set GPU id save_root = os.path.join('', para_name) if VIS and (not os.path.exists(save_root)): os.mkdir(save_root) tracker = DIMP_ALPHA(tracker_name=tracker_name, para_name=para_name, refine_model_name=refine_model_name, threshold=threshold) handle = vot.VOT("mask") selection = handle.region() imagefile = handle.frame() if not imagefile: sys.exit(0) if VIS: '''for vis''' seq_name = imagefile.split('/')[-3] save_v_dir = os.path.join(save_root, seq_name) if not os.path.exists(save_v_dir): os.mkdir(save_v_dir) cur_time = int(time.time() % 10000) save_dir = os.path.join(save_v_dir, str(cur_time)) if not os.path.exists(save_dir): os.makedirs(save_dir) image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right # mask given by the toolkit ends with the target (zero-padding to the right and down is needed) mask = make_full_size(selection, (image.shape[1], image.shape[0])) tracker.initialize(image, mask) while True: imagefile = handle.frame() if not imagefile: break image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right b1, m, search, search_m = tracker.track(image) handle.report(m) if VIS: '''Visualization''' # original image image_ori = image[:, :, ::-1].copy() # RGB --> BGR image_name = imagefile.split('/')[-1] save_path = os.path.join(save_dir, image_name) cv2.imwrite(save_path, image_ori) # dimp box image_b = image_ori.copy() cv2.rectangle(image_b, (int(b1[0]), int(b1[1])), (int(b1[0] + b1[2]), int(b1[1] + b1[3])), (0, 0, 255), 2) image_b_name = image_name.replace('.jpg', '_bbox.jpg') save_path = os.path.join(save_dir, image_b_name) cv2.imwrite(save_path, image_b) # search region search_bgr = search[:, :, ::-1].copy() search_name = image_name.replace('.jpg', '_search.jpg') save_path = os.path.join(save_dir, search_name) cv2.imwrite(save_path, search_bgr) # search region mask search_bgr_m = search_bgr.astype(np.float32) search_bgr_m[:, :, 1] += 127.0 * search_m search_bgr_m[:, :, 2] += 127.0 * search_m contours, _ = cv2.findContours(search_m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) search_bgr_m = cv2.drawContours(search_bgr_m, contours, -1, (0, 255, 255), 4) search_bgr_m = search_bgr_m.clip(0, 255).astype(np.uint8) search_name_m = image_name.replace('.jpg', '_search_mask.jpg') save_path = os.path.join(save_dir, search_name_m) cv2.imwrite(save_path, search_bgr_m) # original image + mask image_m = image_ori.copy().astype(np.float32) image_m[:, :, 1] += 127.0 * m image_m[:, :, 2] += 127.0 * m contours, _ = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) image_m = cv2.drawContours(image_m, contours, -1, (0, 255, 255), 2) image_m = image_m.clip(0, 255).astype(np.uint8) image_mask_name_m = image_name.replace('.jpg', '_mask.jpg') save_path = os.path.join(save_dir, image_mask_name_m) cv2.imwrite(save_path, image_m) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_alpha_seg_class.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import torch import vot import sys import time import os import numpy as np from lib.test.tracker.mixformer_online import MixFormerOnline from pytracking.ARcm_seg import ARcm_seg from pytracking.vot20_utils import * import lib.test.parameter.mixformer_online as vot_params class MIXFORMER_ALPHA_SEG(object): def __init__(self, tracker, refine_model_name='ARcm_coco_seg', threshold=0.6): self.THRES = threshold self.tracker = tracker '''create tracker''' '''Alpha-Refine''' project_path = os.path.join(os.path.dirname(__file__), '..', '..') refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/') refine_path = os.path.join(refine_root, refine_model_name) '''2020.4.25 input size: 384x384''' self.alpha = ARcm_seg(refine_path, input_sz=384) def initialize(self, image, mask): region = rect_from_mask(mask) # init_info = {'init_bbox': region} # self.tracker.initialize(image, init_info) self.H, self.W, _ = image.shape gt_bbox_np = np.array(region).astype(np.float32) '''Initialize STARK for specific video''' init_info = {'init_bbox': list(gt_bbox_np)} self.tracker.initialize(image, init_info) '''initilize refinement module for specific video''' self.alpha.initialize(image, np.array(gt_bbox_np)) def track(self, img_RGB): '''TRACK''' '''base tracker''' outputs = self.tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''Step2: Mask report''' pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True) final_mask = (pred_mask > self.THRES).astype(np.uint8) return final_mask, 1 def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) refine_model_name = 'ARcm_coco_seg_only_mask_384' params = vot_params.parameters("baseline", model="mixformer_online_22k.pth.tar") # params = vot_params.parameters("baseline") mixformer = MixFormerOnline(params, "VOT20") tracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name) handle = vot.VOT("mask") selection = handle.region() imagefile = handle.frame() if not imagefile: sys.exit(0) image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right # mask given by the toolkit ends with the target (zero-padding to the right and down is needed) mask = make_full_size(selection, (image.shape[1], image.shape[0])) tracker.H = image.shape[0] tracker.W = image.shape[1] tracker.initialize(image, mask) while True: imagefile = handle.frame() if not imagefile: break image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right region, confidence = tracker.track(image) handle.report(region, confidence) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_large_alpha_seg_class.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import torch import vot import sys import time import os import numpy as np from lib.test.tracker.mixformer_online import MixFormerOnline from pytracking.ARcm_seg import ARcm_seg from pytracking.vot20_utils import * import lib.test.parameter.mixformer_online as vot_params class MIXFORMER_ALPHA_SEG(object): def __init__(self, tracker, refine_model_name='ARcm_coco_seg', threshold=0.6): self.THRES = threshold self.tracker = tracker '''create tracker''' '''Alpha-Refine''' project_path = os.path.join(os.path.dirname(__file__), '..', '..') refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/') refine_path = os.path.join(refine_root, refine_model_name) '''2020.4.25 input size: 384x384''' self.alpha = ARcm_seg(refine_path, input_sz=384) def initialize(self, image, mask): region = rect_from_mask(mask) # init_info = {'init_bbox': region} # self.tracker.initialize(image, init_info) self.H, self.W, _ = image.shape gt_bbox_np = np.array(region).astype(np.float32) '''Initialize STARK for specific video''' init_info = {'init_bbox': list(gt_bbox_np)} self.tracker.initialize(image, init_info) '''initilize refinement module for specific video''' self.alpha.initialize(image, np.array(gt_bbox_np)) def track(self, img_RGB): '''TRACK''' '''base tracker''' outputs = self.tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''Step2: Mask report''' pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True) final_mask = (pred_mask > self.THRES).astype(np.uint8) return final_mask, 1 def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) refine_model_name = 'ARcm_coco_seg_only_mask_384' # params = vot_params.parameters("baseline_large") params = vot_params.parameters("baseline_large", model="mixformerL_online_22k.pth.tar") mixformer = MixFormerOnline(params, "VOT20") tracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name) handle = vot.VOT("mask") selection = handle.region() imagefile = handle.frame() if not imagefile: sys.exit(0) image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right # mask given by the toolkit ends with the target (zero-padding to the right and down is needed) mask = make_full_size(selection, (image.shape[1], image.shape[0])) tracker.H = image.shape[0] tracker.W = image.shape[1] tracker.initialize(image, mask) while True: imagefile = handle.frame() if not imagefile: break image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right region, confidence = tracker.track(image) handle.report(region, confidence) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/vot.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016 """ import sys import copy import collections import numpy as np try: import trax except ImportError: raise Exception('TraX support not found. Please add trax module to Python path.') Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format, channels=None): """ Constructor Args: region_format: Region format options """ assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK]) if channels is None: channels = ['color'] elif channels == 'rgbd': channels = ['color', 'depth'] elif channels == 'rgbt': channels = ['color', 'ir'] elif channels == 'ir': channels = ['ir'] else: raise Exception('Illegal configuration {}.'.format(channels)) self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot="python")) request = self._trax.wait() assert(request.type == 'initialize') if isinstance(request.region, trax.Polygon): self._region = Polygon([Point(x[0], x[1]) for x in request.region]) elif isinstance(request.region, trax.Mask): self._region = request.region.array(True) else: self._region = Rectangle(*request.region.bounds()) self._image = [x.path() for k, x in request.image.items()] if len(self._image) == 1: self._image = self._image[0] self._trax.status(request.region) def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = None): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, (Rectangle, Polygon, np.ndarray))) if isinstance(region, Polygon): tregion = trax.Polygon.create([(x.x, x.y) for x in region.points]) elif isinstance(region, np.ndarray): tregion = trax.Mask.create(region) else: tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height) properties = {} if not confidence is None: properties['confidence'] = confidence self._trax.status(tregion, properties) def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if hasattr(self, "_image"): image = self._image del self._image return image request = self._trax.wait() if request.type == 'frame': image = [x.path() for k, x in request.image.items()] if len(image) == 1: return image[0] return image else: return None def quit(self): if hasattr(self, '_trax'): self._trax.quit() def __del__(self): self.quit() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/__init__.py ================================================ from pytracking.libs import TensorList, TensorDict ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/analysis/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/analysis/evaluate_vos.py ================================================ import os import numpy as np import torch import pandas as pd from collections import OrderedDict from ltr.data.image_loader import imread_indexed from pytracking.evaluation import get_dataset from pathlib import Path from pytracking.analysis.plot_results import generate_formatted_report import pytracking.analysis.vos_utils as utils # Originally db_eval_sequence() in the davis challenge toolkit: def evaluate_sequence(seq_name, segmentations, annotations, object_info, measure='J'): """ Evaluate video sequence results. Arguments: segmentations (dict of ndarray): segmentation labels. annotations (dict of ndarray): ground-truth labels. object_info dict: {object_id: first_frame_index} measure evaluation metric (J,F) """ results = dict(raw=OrderedDict()) _measures = {'J': utils.davis_jaccard_measure, 'F': utils.davis_f_measure} _statistics = {'decay': utils.decay, 'mean': utils.mean, 'recall': utils.recall, 'std': utils.std} for obj_id, first_frame in object_info.items(): r = np.ones((len(annotations))) * np.nan for i, (an, sg) in enumerate(zip(annotations, segmentations)): if list(annotations.keys()).index(first_frame) < i < len(annotations) - 1: r[i] = _measures[measure](annotations[an] == obj_id, segmentations[sg] == obj_id) results['raw'][obj_id] = r for stat, stat_fn in _statistics.items(): results[stat] = [float(stat_fn(r)) for r in results['raw'].values()] return results def evaluate_dataset(results_path, dset_name, measure='J', to_file=True, scores=False, sequences=None, quiet=False): dset = get_dataset(dset_name) results = OrderedDict() dset_scores = [] dset_decay = [] dset_recall = [] if to_file: f = open(results_path / ("evaluation-%s.txt" % measure), "w") def _print(msg): if not quiet: print(msg) if to_file: print(msg, file=f) if sequences is not None: sequences = [sequences] if not isinstance(sequences, (list, tuple)) else sequences target_names = [] for j, sequence in enumerate(dset): if (sequences is not None) and (sequence.name not in sequences): continue # Load all frames frames = sequence.ground_truth_seg annotations = OrderedDict() segmentations = OrderedDict() for f in frames: if f is None: continue file = Path(f) annotations[file.name] = imread_indexed(file) if not scores: segmentations[file.name] = imread_indexed(os.path.join(results_path, sequence.name, file.name)) else: raise NotImplementedError # Find object ids and starting frames object_info = dict() for f_id, d in sequence.init_data.items(): for obj_id in d['object_ids']: object_info[int(obj_id)] = Path(d['mask']).name if 0 in object_info: # Remove background object_info.pop(0) # Evaluate n_seqs = len(dset) n_objs = len(object_info) seq_name = sequence.name _print("%d/%d: %s: %d object%s" % (j + 1, n_seqs, seq_name, n_objs, "s" if n_objs > 1 else "")) r = evaluate_sequence(seq_name, segmentations, annotations, object_info, measure=measure) results[seq_name] = r # Print scores, per frame and object, ignoring NaNs per_obj_score = [] # Per-object accuracies, averaged over the sequence per_frame_score = [] # Per-frame accuracies, averaged over the objects for obj_id, score in r['raw'].items(): target_names.append('{}_{}'.format(seq_name, obj_id)) per_frame_score.append(score) s = utils.mean(score) # Sequence average for one object per_obj_score.append(s) if n_objs > 1: _print("joint {obj}: acc {score:.3f} ┊{apf}┊".format(obj=obj_id, score=s, apf=utils.text_bargraph(score))) # Print mean object score per frame and final score dset_decay.extend(r['decay']) dset_recall.extend(r['recall']) dset_scores.extend(per_obj_score) seq_score = utils.mean(per_obj_score) # Final score seq_mean_score = utils.nanmean(np.array(per_frame_score), axis=0) # Mean object score per frame # Print sequence results _print("final : acc {seq:.3f} ({dset:.3f}) ┊{apf}┊".format( seq=seq_score, dset=np.mean(dset_scores), apf=utils.text_bargraph(seq_mean_score))) _print("%s: %.3f, recall: %.3f, decay: %.3f" % (measure, utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay))) if to_file: f.close() return target_names, dset_scores, dset_recall, dset_decay def evaluate_vos(trackers, dataset='yt2019_jjval', force=False): """ evaluate a list of trackers on a vos dataset. args: trackers - list of trackers to evaluate dataset - name of the dataset force - Force re-evaluation. If False, the pre-computed results are loaded if available """ csv_name_global = f'{dataset}_global_results.csv' csv_name_per_sequence = f'{dataset}_per-sequence_results.csv' table_g_all = [] table_seq_all = [] scores = {'J-Mean': [], 'J-Recall': [], 'J-Decay': []} display_names = [] for t in trackers: if t.display_name is not None: disp_name = t.display_name elif t.run_id is not None: disp_name = '{} {}_{:03d}'.format(t.name, t.parameter_name, t.run_id) else: disp_name = '{} {}'.format(t.name, t.parameter_name) display_names.append(disp_name) results_path = t.segmentation_dir csv_name_global_path = os.path.join(results_path, csv_name_global) csv_name_per_sequence_path = os.path.join(results_path, csv_name_per_sequence) if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path) and not force: table_g = pd.read_csv(csv_name_global_path) table_seq = pd.read_csv(csv_name_per_sequence_path) else: seq_names, dset_scores, dset_recall, dset_decay = evaluate_dataset(results_path, dataset, measure='J', to_file=False, scores=False, sequences=None) g_measures = ['J-Mean', 'J-Recall', 'J-Decay'] g_res = np.array([utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay)]) g_res = np.reshape(g_res, [1, len(g_res)]) table_g = pd.DataFrame(data=g_res, columns=g_measures) with open(csv_name_global_path, 'w') as f: table_g.to_csv(f, index=False, float_format="%.3f") seq_measures = ['Sequence', 'J-Mean', 'J-Recall', 'J-Decay'] table_seq = pd.DataFrame(data=list(zip(seq_names, dset_scores, dset_recall, dset_decay)), columns=seq_measures) with open(csv_name_per_sequence_path, 'w') as f: table_seq.to_csv(f, index=False, float_format="%.3f") scores['J-Mean'].append(table_g['J-Mean'].values[0]*100) scores['J-Recall'].append(table_g['J-Recall'].values[0]*100) scores['J-Decay'].append(table_g['J-Decay'].values[0]*100) table_g_all.append(table_g) table_seq_all.append(table_seq) report = generate_formatted_report(display_names, scores) print(report) return table_g_all, table_seq_all ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/analysis/extract_results.py ================================================ import os import sys import importlib import numpy as np from pytracking.utils.load_text import load_text import torch import pickle from tqdm import tqdm env_path = os.path.join(os.path.dirname(__file__), '../..') if env_path not in sys.path: sys.path.append(env_path) from pytracking.evaluation.environment import env_settings def calc_err_center(pred_bb, anno_bb, normalized=False): pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0) anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0) if normalized: pred_center = pred_center / anno_bb[:, 2:] anno_center = anno_center / anno_bb[:, 2:] err_center = ((pred_center - anno_center)**2).sum(1).sqrt() return err_center def calc_iou_overlap(pred_bb, anno_bb): tl = torch.max(pred_bb[:, :2], anno_bb[:, :2]) br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0, anno_bb[:, :2] + anno_bb[:, 2:] - 1.0) sz = (br - tl + 1.0).clamp(0) # Area intersection = sz.prod(dim=1) union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection return intersection / union def calc_seq_err_robust(pred_bb, anno_bb, dataset, target_visible=None): pred_bb = pred_bb.clone() # Check if invalid values are present if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any(): raise Exception('Error: Invalid results') if torch.isnan(anno_bb).any(): if dataset == 'uav': pass else: raise Exception('Warning: NaNs in annotation') if (pred_bb[:, 2:] == 0.0).any(): for i in range(1, pred_bb.shape[0]): if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any(): pred_bb[i, :] = pred_bb[i-1, :] if pred_bb.shape[0] != anno_bb.shape[0]: if dataset == 'lasot': if pred_bb.shape[0] > anno_bb.shape[0]: # For monkey-17, there is a mismatch for some trackers. pred_bb = pred_bb[:anno_bb.shape[0], :] else: raise Exception('Mis-match in tracker prediction and GT lengths') else: # print('Warning: Mis-match in tracker prediction and GT lengths') if pred_bb.shape[0] > anno_bb.shape[0]: pred_bb = pred_bb[:anno_bb.shape[0], :] else: pad = torch.zeros((anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb) pred_bb = torch.cat((pred_bb, pad), dim=0) pred_bb[0, :] = anno_bb[0, :] if target_visible is not None: target_visible = target_visible.bool() valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible else: valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) err_center = calc_err_center(pred_bb, anno_bb) err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True) err_overlap = calc_iou_overlap(pred_bb, anno_bb) # handle invalid anno cases if dataset in ['uav']: err_center[~valid] = -1.0 else: err_center[~valid] = float("Inf") err_center_normalized[~valid] = -1.0 err_overlap[~valid] = -1.0 if dataset == 'lasot': err_center_normalized[~target_visible] = float("Inf") err_center[~target_visible] = float("Inf") if torch.isnan(err_overlap).any(): raise Exception('Nans in calculated overlap') return err_overlap, err_center, err_center_normalized, valid def extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05, exclude_invalid_frames=False): settings = env_settings() eps = 1e-16 result_plot_path = os.path.join(settings.result_plot_path, report_name) if not os.path.exists(result_plot_path): os.makedirs(result_plot_path) threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64) threshold_set_center = torch.arange(0, 51, dtype=torch.float64) threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0 avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64) ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()), dtype=torch.float32) ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()), dtype=torch.float32) ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()), dtype=torch.float32) valid_sequence = torch.ones(len(dataset), dtype=torch.uint8) for seq_id, seq in enumerate(tqdm(dataset)): # Load anno anno_bb = torch.tensor(seq.ground_truth_rect) target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None for trk_id, trk in enumerate(trackers): # Load results base_results_path = '{}/{}'.format(trk.results_dir, seq.name) results_path = '{}.txt'.format(base_results_path) if os.path.isfile(results_path): pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\t', ','), dtype=np.float64)) else: if skip_missing_seq: valid_sequence[seq_id] = 0 break else: raise Exception('Result not found. {}'.format(results_path)) # Calculate measures err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust( pred_bb, anno_bb, seq.dataset, target_visible) avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean() if exclude_invalid_frames: seq_length = valid_frame.long().sum() else: seq_length = anno_bb.shape[0] if seq_length <= 0: raise Exception('Seq length zero') ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length print('\n\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) # Prepare dictionary for saving data seq_names = [s.name for s in dataset] tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name} for t in trackers] eval_data = {'sequences': seq_names, 'trackers': tracker_names, 'valid_sequence': valid_sequence.tolist(), 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(), 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(), 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(), 'avg_overlap_all': avg_overlap_all.tolist(), 'threshold_set_overlap': threshold_set_overlap.tolist(), 'threshold_set_center': threshold_set_center.tolist(), 'threshold_set_center_norm': threshold_set_center_norm.tolist()} with open(result_plot_path + '/eval_data.pkl', 'wb') as fh: pickle.dump(eval_data, fh) return eval_data ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/analysis/playback_results.py ================================================ import os import sys import importlib import numpy as np import torch import time import matplotlib.patches as patches import cv2 as cv import matplotlib.pyplot as plt from pytracking.analysis.plot_results import get_plot_draw_styles from pytracking.utils.plotting import draw_figure from pytracking.evaluation import get_dataset, trackerlist env_path = os.path.join(os.path.dirname(__file__), '../..') if env_path not in sys.path: sys.path.append(env_path) class Display: def __init__(self, sequence_length, plot_draw_styles, sequence_name): self.active = True self.frame_number = 0 self.pause_mode = True self.step_size = 0 self.step_direction = 'forward' self.fig, self.ax = plt.subplots(1) self.fig.canvas.mpl_connect('key_press_event', self.key_callback_fn) plt.tight_layout() self.sequence_length = sequence_length self.sequence_name = sequence_name self.plot_draw_styles = plot_draw_styles def key_callback_fn(self, event): if event.key == ' ': self.pause_mode = not self.pause_mode self.step_size = 0 self.step_direction = 'forward' elif event.key == 'right': if self.pause_mode: self.frame_number += 1 if self.frame_number >= self.sequence_length: self.frame_number = self.sequence_length - 1 elif self.step_direction == 'stop': self.step_direction = 'forward' self.step_size = 0 elif self.step_direction == 'backward' and self.step_size == 0: self.step_direction = 'stop' else: self.step_size += 1 elif event.key == 'left': if self.pause_mode: self.frame_number -= 1 if self.frame_number < 0: self.frame_number = 0 elif self.step_direction == 'stop': self.step_direction = 'backward' self.step_size = 0 elif self.step_direction == 'forward' and self.step_size == 0: self.step_direction = 'stop' else: self.step_size -= 1 elif event.key == 'escape' or event.key == 'q': self.active = False def _get_speed(self): delta = 0 if self.step_direction == 'forward': delta = 2 ** abs(self.step_size) elif self.step_direction == 'backward': delta = -1 * 2 ** abs(self.step_size) return delta def step(self): delta = self._get_speed() self.frame_number += delta if self.frame_number < 0: self.frame_number = 0 elif self.frame_number >= self.sequence_length: self.frame_number = self.sequence_length - 1 def show(self, image, bb_list, trackers, gt=None): self.ax.cla() self.ax.imshow(image) # Draw rects rect_handles = [] for i, bb in enumerate(bb_list): rect = patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1, edgecolor=self.plot_draw_styles[i]['color'], facecolor='none') self.ax.add_patch(rect) rect_handles.append(patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1, edgecolor=self.plot_draw_styles[i]['color'], facecolor=self.plot_draw_styles[i]['color'], label=trackers[i])) if gt is not None: rect = patches.Rectangle((gt[0], gt[1]), gt[2], gt[3], linewidth=2, edgecolor='g', facecolor='none') self.ax.add_patch(rect) rect_handles.append(rect) self.ax.set_axis_off() self.ax.axis('equal') plt.legend(handles=rect_handles, loc=4, borderaxespad=0.) mode = 'manual' if self.pause_mode else 'auto ' speed = self._get_speed() self.fig.suptitle('Sequence: {} Mode: {} Speed: {:d}x'.format(self.sequence_name, mode, speed), fontsize=14) draw_figure(self.fig) def read_image(image_file: str): im = cv.imread(image_file) return cv.cvtColor(im, cv.COLOR_BGR2RGB) def _get_display_name(tracker): if tracker.display_name is None: if tracker.run_id is not None: return '{}_{}_{:03d}'.format(tracker.name, tracker.parameter_name, tracker.run_id) else: return '{}_{}'.format(tracker.name, tracker.parameter_name) else: return tracker.display_name def playback_results(trackers, sequence): """ Playback saved results of input trackers for a particular sequence. You can navigate the sequence using left/right arrow keys. You can also change to 'auto' mode by pressing space bar, in which case the sequence will be replayed at a particular speed. The speed for playback in 'auto' mode can be controlled using the left/right arrow keys. You can exit the application using escape or q keys. """ plot_draw_styles = get_plot_draw_styles() tracker_results = [] # Load results for trk_id, trk in enumerate(trackers): # Load results base_results_path = '{}/{}'.format(trk.results_dir, sequence.name) results_path = '{}.txt'.format(base_results_path) if os.path.isfile(results_path): try: pred_bb = torch.tensor(np.loadtxt(str(results_path), dtype=np.float64)) except: pred_bb = torch.tensor(np.loadtxt(str(results_path), delimiter=',', dtype=np.float64)) else: raise Exception('Result not found. {}'.format(results_path)) tracker_results.append(pred_bb) # Convert to list of shape seq_length * num_trackers * 4 tracker_results = torch.stack(tracker_results, dim=1).tolist() tracker_names = [_get_display_name(t) for t in trackers] display = Display(len(tracker_results), plot_draw_styles, sequence.name) while display.active: frame_number = display.frame_number image = read_image(sequence.frames[frame_number]) display.show(image, tracker_results[frame_number], tracker_names) time.sleep(0.01) if display.pause_mode and display.frame_number == frame_number: time.sleep(0.1) elif not display.pause_mode: display.step() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/analysis/plot_results.py ================================================ import tikzplotlib import matplotlib import matplotlib.pyplot as plt import os import torch import pickle import json from pytracking.evaluation.environment import env_settings from pytracking.analysis.extract_results import extract_results def get_plot_draw_styles(): plot_draw_style = [{'color': (1.0, 0.0, 0.0), 'line_style': '-'}, {'color': (0.0, 1.0, 0.0), 'line_style': '-'}, {'color': (0.0, 0.0, 1.0), 'line_style': '-'}, {'color': (0.0, 0.0, 0.0), 'line_style': '-'}, {'color': (1.0, 0.0, 1.0), 'line_style': '-'}, {'color': (0.0, 1.0, 1.0), 'line_style': '-'}, {'color': (0.5, 0.5, 0.5), 'line_style': '-'}, {'color': (136.0 / 255.0, 0.0, 21.0 / 255.0), 'line_style': '-'}, {'color': (1.0, 127.0 / 255.0, 39.0 / 255.0), 'line_style': '-'}, {'color': (0.0, 162.0 / 255.0, 232.0 / 255.0), 'line_style': '-'}, {'color': (0.0, 0.5, 0.0), 'line_style': '-'}, {'color': (1.0, 0.5, 0.2), 'line_style': '-'}, {'color': (0.1, 0.4, 0.0), 'line_style': '-'}, {'color': (0.6, 0.3, 0.9), 'line_style': '-'}, {'color': (0.4, 0.7, 0.1), 'line_style': '-'}, {'color': (0.2, 0.1, 0.7), 'line_style': '-'}, {'color': (0.7, 0.6, 0.2), 'line_style': '-'}] return plot_draw_style def check_eval_data_is_valid(eval_data, trackers, dataset): """ Checks if the pre-computed results are valid""" seq_names = [s.name for s in dataset] seq_names_saved = eval_data['sequences'] tracker_names_f = [(t.name, t.parameter_name, t.run_id) for t in trackers] tracker_names_f_saved = [(t['name'], t['param'], t['run_id']) for t in eval_data['trackers']] return seq_names == seq_names_saved and tracker_names_f == tracker_names_f_saved def merge_multiple_runs(eval_data): new_tracker_names = [] ave_success_rate_plot_overlap_merged = [] ave_success_rate_plot_center_merged = [] ave_success_rate_plot_center_norm_merged = [] avg_overlap_all_merged = [] ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) trackers = eval_data['trackers'] merged = torch.zeros(len(trackers), dtype=torch.uint8) for i in range(len(trackers)): if merged[i]: continue base_tracker = trackers[i] new_tracker_names.append(base_tracker) match = [t['name'] == base_tracker['name'] and t['param'] == base_tracker['param'] for t in trackers] match = torch.tensor(match) ave_success_rate_plot_overlap_merged.append(ave_success_rate_plot_overlap[:, match, :].mean(1)) ave_success_rate_plot_center_merged.append(ave_success_rate_plot_center[:, match, :].mean(1)) ave_success_rate_plot_center_norm_merged.append(ave_success_rate_plot_center_norm[:, match, :].mean(1)) avg_overlap_all_merged.append(avg_overlap_all[:, match].mean(1)) merged[match] = 1 ave_success_rate_plot_overlap_merged = torch.stack(ave_success_rate_plot_overlap_merged, dim=1) ave_success_rate_plot_center_merged = torch.stack(ave_success_rate_plot_center_merged, dim=1) ave_success_rate_plot_center_norm_merged = torch.stack(ave_success_rate_plot_center_norm_merged, dim=1) avg_overlap_all_merged = torch.stack(avg_overlap_all_merged, dim=1) eval_data['trackers'] = new_tracker_names eval_data['ave_success_rate_plot_overlap'] = ave_success_rate_plot_overlap_merged.tolist() eval_data['ave_success_rate_plot_center'] = ave_success_rate_plot_center_merged.tolist() eval_data['ave_success_rate_plot_center_norm'] = ave_success_rate_plot_center_norm_merged.tolist() eval_data['avg_overlap_all'] = avg_overlap_all_merged.tolist() return eval_data def get_tracker_display_name(tracker): if tracker['disp_name'] is None: if tracker['run_id'] is None: disp_name = '{}_{}'.format(tracker['name'], tracker['param']) else: disp_name = '{}_{}_{:03d}'.format(tracker['name'], tracker['param'], tracker['run_id']) else: disp_name = tracker['disp_name'] return disp_name def plot_draw_save(y, x, scores, trackers, plot_draw_styles, result_plot_path, plot_opts): # Plot settings font_size = plot_opts.get('font_size', 12) font_size_axis = plot_opts.get('font_size_axis', 13) line_width = plot_opts.get('line_width', 2) font_size_legend = plot_opts.get('font_size_legend', 13) plot_type = plot_opts['plot_type'] legend_loc = plot_opts['legend_loc'] xlabel = plot_opts['xlabel'] ylabel = plot_opts['ylabel'] xlim = plot_opts['xlim'] ylim = plot_opts['ylim'] title = plot_opts['title'] matplotlib.rcParams.update({'font.size': font_size}) matplotlib.rcParams.update({'axes.titlesize': font_size_axis}) matplotlib.rcParams.update({'axes.titleweight': 'black'}) matplotlib.rcParams.update({'axes.labelsize': font_size_axis}) fig, ax = plt.subplots() index_sort = scores.argsort(descending=False) plotted_lines = [] legend_text = [] for id, id_sort in enumerate(index_sort): line = ax.plot(x.tolist(), y[id_sort, :].tolist(), linewidth=line_width, color=plot_draw_styles[index_sort.numel() - id - 1]['color'], linestyle=plot_draw_styles[index_sort.numel() - id - 1]['line_style']) plotted_lines.append(line[0]) tracker = trackers[id_sort] disp_name = get_tracker_display_name(tracker) legend_text.append('{} [{:.1f}]'.format(disp_name, scores[id_sort])) ax.legend(plotted_lines[::-1], legend_text[::-1], loc=legend_loc, fancybox=False, edgecolor='black', fontsize=font_size_legend, framealpha=1.0) ax.set(xlabel=xlabel, ylabel=ylabel, xlim=xlim, ylim=ylim, title=title) ax.grid(True, linestyle='-.') fig.tight_layout() tikzplotlib.save('{}/{}_plot.tex'.format(result_plot_path, plot_type)) fig.savefig('{}/{}_plot.pdf'.format(result_plot_path, plot_type), dpi=300, format='pdf', transparent=True) plt.draw() def check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation=False, **kwargs): # Load data settings = env_settings() # Load pre-computed results result_plot_path = os.path.join(settings.result_plot_path, report_name) eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl') if os.path.isfile(eval_data_path) and not force_evaluation: with open(eval_data_path, 'rb') as fh: eval_data = pickle.load(fh) else: # print('Pre-computed evaluation data not found. Computing results!') eval_data = extract_results(trackers, dataset, report_name, **kwargs) if not check_eval_data_is_valid(eval_data, trackers, dataset): # print('Pre-computed evaluation data invalid. Re-computing results!') eval_data = extract_results(trackers, dataset, report_name, **kwargs) else: # Update display names tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name} for t in trackers] eval_data['trackers'] = tracker_names return eval_data def get_auc_curve(ave_success_rate_plot_overlap, valid_sequence): ave_success_rate_plot_overlap = ave_success_rate_plot_overlap[valid_sequence, :, :] auc_curve = ave_success_rate_plot_overlap.mean(0) * 100.0 auc = auc_curve.mean(-1) return auc_curve, auc def get_prec_curve(ave_success_rate_plot_center, valid_sequence): ave_success_rate_plot_center = ave_success_rate_plot_center[valid_sequence, :, :] prec_curve = ave_success_rate_plot_center.mean(0) * 100.0 prec_score = prec_curve[:, 20] return prec_curve, prec_score def plot_results(trackers, dataset, report_name, merge_results=False, plot_types=('success'), force_evaluation=False, **kwargs): """ Plot results for the given trackers args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success', 'prec' (precision), and 'norm_prec' (normalized precision) """ # Load data settings = env_settings() plot_draw_styles = get_plot_draw_styles() # Load pre-computed results result_plot_path = os.path.join(settings.result_plot_path, report_name) eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nPlotting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) print('\nGenerating plots for: {}'.format(report_name)) # ******************************** Success Plot ************************************** if 'success' in plot_types: ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold', 'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'} plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts) # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) threshold_set_center = torch.tensor(eval_data['threshold_set_center']) precision_plot_opts = {'plot_type': 'precision', 'legend_loc': 'lower right', 'xlabel': 'Location error threshold [pixels]', 'ylabel': 'Distance Precision [%]', 'xlim': (0, 50), 'ylim': (0, 100), 'title': 'Precision plot'} plot_draw_save(prec_curve, threshold_set_center, prec_score, tracker_names, plot_draw_styles, result_plot_path, precision_plot_opts) # ******************************** Norm Precision Plot ************************************** if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) threshold_set_center_norm = torch.tensor(eval_data['threshold_set_center_norm']) norm_precision_plot_opts = {'plot_type': 'norm_precision', 'legend_loc': 'lower right', 'xlabel': 'Location error threshold', 'ylabel': 'Distance Precision [%]', 'xlim': (0, 0.5), 'ylim': (0, 100), 'title': 'Normalized Precision plot'} plot_draw_save(prec_curve, threshold_set_center_norm, prec_score, tracker_names, plot_draw_styles, result_plot_path, norm_precision_plot_opts) plt.show() def generate_formatted_report(row_labels, scores, table_name=''): name_width = max([len(d) for d in row_labels] + [len(table_name)]) + 5 min_score_width = 10 report_text = '\n{label: <{width}} |'.format(label=table_name, width=name_width) score_widths = [max(min_score_width, len(k) + 3) for k in scores.keys()] for s, s_w in zip(scores.keys(), score_widths): report_text = '{prev} {s: <{width}} |'.format(prev=report_text, s=s, width=s_w) report_text = '{prev}\n'.format(prev=report_text) for trk_id, d_name in enumerate(row_labels): # display name report_text = '{prev}{tracker: <{width}} |'.format(prev=report_text, tracker=d_name, width=name_width) for (score_type, score_value), s_w in zip(scores.items(), score_widths): report_text = '{prev} {score: <{width}} |'.format(prev=report_text, score='{:0.2f}'.format(score_value[trk_id].item()), width=s_w) report_text = '{prev}\n'.format(prev=report_text) return report_text def print_results(trackers, dataset, report_name, merge_results=False, plot_types=('success'), **kwargs): """ Print the results for the given trackers in a formatted table args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores), 'prec' (prints precision score), and 'norm_prec' (prints normalized precision score) """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) scores = {} # ******************************** Success Plot ************************************** if 'success' in plot_types: threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) scores['AUC'] = auc scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50] scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75] # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) scores['Precision'] = prec_score # ******************************** Norm Precision Plot ********************************* if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) scores['Norm Precision'] = norm_prec_score # Print tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name) print(report_text) def plot_got_success(trackers, report_name): """ Plot success plot for GOT-10k dataset using the json reports. Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to env_settings.got_reports_path The tracker name in the experiment file should be set to the name of the report file for that tracker, e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json args: trackers - List of trackers to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved """ # Load data settings = env_settings() plot_draw_styles = get_plot_draw_styles() result_plot_path = os.path.join(settings.result_plot_path, report_name) auc_curve = torch.zeros((len(trackers), 101)) scores = torch.zeros(len(trackers)) # Load results tracker_names = [] for trk_id, trk in enumerate(trackers): json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name) if os.path.isfile(json_path): with open(json_path, 'r') as f: eval_data = json.load(f) else: raise Exception('Report not found {}'.format(json_path)) if len(eval_data.keys()) > 1: raise Exception # First field is the tracker name. Index it out eval_data = eval_data[list(eval_data.keys())[0]] if 'succ_curve' in eval_data.keys(): curve = eval_data['succ_curve'] ao = eval_data['ao'] elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys(): curve = eval_data['overall']['succ_curve'] ao = eval_data['overall']['ao'] else: raise Exception('Invalid JSON file {}'.format(json_path)) auc_curve[trk_id, :] = torch.tensor(curve) * 100.0 scores[trk_id] = ao * 100.0 tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id, 'disp_name': trk.display_name}) threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64) success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold', 'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'} plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts) plt.show() def print_per_sequence_results(trackers, dataset, report_name, merge_results=False, filter_criteria=None, **kwargs): """ Print per-sequence results for the given trackers. Additionally, the sequences to list can be filtered using the filter criteria. args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged filter_criteria - Filter sequence results which are reported. Following modes are supported None: No filtering. Display results for all sequences in dataset 'ao_min': Only display sequences for which the minimum average overlap (AO) score over the trackers is less than a threshold filter_criteria['threshold']. This mode can be used to select sequences where at least one tracker performs poorly. 'ao_max': Only display sequences for which the maximum average overlap (AO) score over the trackers is less than a threshold filter_criteria['threshold']. This mode can be used to select sequences all tracker performs poorly. 'delta_ao': Only display sequences for which the performance of different trackers vary by at least filter_criteria['threshold'] in average overlap (AO) score. This mode can be used to select sequences where the behaviour of the trackers greatly differ between each other. """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) sequence_names = eval_data['sequences'] avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) * 100.0 # Filter sequences if filter_criteria is not None: if filter_criteria['mode'] == 'ao_min': min_ao = avg_overlap_all.min(dim=1)[0] valid_sequence = valid_sequence & (min_ao < filter_criteria['threshold']) elif filter_criteria['mode'] == 'ao_max': max_ao = avg_overlap_all.max(dim=1)[0] valid_sequence = valid_sequence & (max_ao < filter_criteria['threshold']) elif filter_criteria['mode'] == 'delta_ao': min_ao = avg_overlap_all.min(dim=1)[0] max_ao = avg_overlap_all.max(dim=1)[0] valid_sequence = valid_sequence & ((max_ao - min_ao) > filter_criteria['threshold']) else: raise Exception avg_overlap_all = avg_overlap_all[valid_sequence, :] sequence_names = [s + ' (ID={})'.format(i) for i, (s, v) in enumerate(zip(sequence_names, valid_sequence.tolist())) if v] tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] scores_per_tracker = {k: avg_overlap_all[:, i] for i, k in enumerate(tracker_disp_names)} report_text = generate_formatted_report(sequence_names, scores_per_tracker) print(report_text) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/analysis/vos_utils.py ================================================ import warnings import numpy as np from skimage.morphology import binary_dilation, disk from math import floor def text_bargraph(values): blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o')) nsteps = len(blocks)-2-1 hstep = 1 / (2*nsteps) values = np.array(values) nans = np.isnan(values) values[nans] = 0 # '░' indices = ((values + hstep) * nsteps + 1).astype(np.int) indices[values < 0] = 0 indices[values > 1] = len(blocks)-1 graph = blocks[indices] graph[nans] = '░' graph = str.join('', graph) return graph # ---------------------------------------------------------------------------- # The 2017 DAVIS Challenge on Video Object Segmentation # ----------------------------------------------------------------------------- # Copyright (c) 2017 Federico Perazzi # Licensed under the BSD License [see LICENSE for details] # Written by Federico Perazzi (federico@disneyresearch.com) # Adapted from DAVIS 2016 (Federico Perazzi) # ---------------------------------------------------------------------------- # Originally db_eval_iou() in the davis challenge toolkit: def davis_jaccard_measure(fg_mask, gt_mask): """ Compute region similarity as the Jaccard Index. :param fg_mask: (ndarray): binary segmentation map. :param gt_mask: (ndarray): binary annotation map. :return: jaccard (float): region similarity """ gt_mask = gt_mask.astype(np.bool) fg_mask = fg_mask.astype(np.bool) if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0): return 1 else: return np.sum((gt_mask & fg_mask)) / \ np.sum((gt_mask | fg_mask), dtype=np.float32) def davis_jaccard_measure_torch(fg_mask, gt_mask): """ Compute region similarity as the Jaccard Index. :param fg_mask: (ndarray): binary segmentation map. :param gt_mask: (ndarray): binary annotation map. :return: jaccard (float): region similarity """ #gt_mask = gt_mask.astype(np.bool) #fg_mask = fg_mask.astype(np.bool) if gt_mask.sum() == 0 and fg_mask.sum() == 0: return 1 else: return (gt_mask & fg_mask).sum() / \ (gt_mask | fg_mask).sum().float() # Originally db_eval_boundary() in the davis challenge toolkit: def davis_f_measure(foreground_mask, gt_mask, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. Returns: F (float): boundaries F-measure P (float): boundaries precision R (float): boundaries recall """ assert np.atleast_3d(foreground_mask).shape[2] == 1 bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = seg2bmap(foreground_mask) gt_boundary = seg2bmap(gt_mask) fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not (width > w | height > h | abs(ar1 - ar2) > 0.01), \ 'Can''t convert %dx%d seg to %dx%d bmap.' % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + floor((y - 1) + height / h) i = 1 + floor((x - 1) + width / h) bmap[j, i] = 1 return bmap def nanmean(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) return np.nanmean(*args, **kwargs) def mean(X): """ Compute average ignoring NaN values. """ return np.nanmean(X) def recall(X, threshold=0.5): """ Fraction of values of X scoring higher than 'threshold' """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) x = X[~np.isnan(X)] x = mean(x > threshold) return x def decay(X, n_bins=4): """ Performance loss over time. """ X = X[~np.isnan(X)] ids = np.round(np.linspace(1, len(X), n_bins + 1) + 1e-10) - 1 ids = ids.astype(np.uint8) D_bins = [X[ids[i]:ids[i + 1] + 1] for i in range(0, 4)] with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3]) return D def std(X): """ Compute standard deviation. """ return np.nanstd(X) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/__init__.py ================================================ from .data import Sequence from .tracker import Tracker, trackerlist from .datasets import get_dataset ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/data.py ================================================ import numpy as np from pytracking.evaluation.environment import env_settings from ltr.data.image_loader import imread_indexed from collections import OrderedDict class BaseDataset: """Base class for all datasets.""" def __init__(self): self.env_settings = env_settings() def __len__(self): """Overload this function in your dataset. This should return number of sequences in the dataset.""" raise NotImplementedError def get_sequence_list(self): """Overload this in your dataset. Should return the list of sequences in the dataset.""" raise NotImplementedError class Sequence: """Class for the sequence in an evaluation.""" def __init__(self, name, frames, dataset, ground_truth_rect, ground_truth_seg=None, init_data=None, object_class=None, target_visible=None, object_ids=None, multiobj_mode=False): self.name = name self.frames = frames self.dataset = dataset self.ground_truth_rect = ground_truth_rect self.ground_truth_seg = ground_truth_seg self.object_class = object_class self.target_visible = target_visible self.object_ids = object_ids self.multiobj_mode = multiobj_mode self.init_data = self._construct_init_data(init_data) self._ensure_start_frame() def _ensure_start_frame(self): # Ensure start frame is 0 start_frame = min(list(self.init_data.keys())) if start_frame > 0: self.frames = self.frames[start_frame:] if self.ground_truth_rect is not None: if isinstance(self.ground_truth_rect, (dict, OrderedDict)): for obj_id, gt in self.ground_truth_rect.items(): self.ground_truth_rect[obj_id] = gt[start_frame:,:] else: self.ground_truth_rect = self.ground_truth_rect[start_frame:,:] if self.ground_truth_seg is not None: self.ground_truth_seg = self.ground_truth_seg[start_frame:] assert len(self.frames) == len(self.ground_truth_seg) if self.target_visible is not None: self.target_visible = self.target_visible[start_frame:] self.init_data = {frame-start_frame: val for frame, val in self.init_data.items()} def _construct_init_data(self, init_data): if init_data is not None: if not self.multiobj_mode: assert self.object_ids is None or len(self.object_ids) == 1 for frame, init_val in init_data.items(): if 'bbox' in init_val and isinstance(init_val['bbox'], (dict, OrderedDict)): init_val['bbox'] = init_val['bbox'][self.object_ids[0]] # convert to list for frame, init_val in init_data.items(): if 'bbox' in init_val: if isinstance(init_val['bbox'], (dict, OrderedDict)): init_val['bbox'] = OrderedDict({obj_id: list(init) for obj_id, init in init_val['bbox'].items()}) else: init_val['bbox'] = list(init_val['bbox']) else: init_data = {0: dict()} # Assume start from frame 0 if self.object_ids is not None: init_data[0]['object_ids'] = self.object_ids if self.ground_truth_rect is not None: if self.multiobj_mode: assert isinstance(self.ground_truth_rect, (dict, OrderedDict)) init_data[0]['bbox'] = OrderedDict({obj_id: list(gt[0,:]) for obj_id, gt in self.ground_truth_rect.items()}) else: assert self.object_ids is None or len(self.object_ids) == 1 if isinstance(self.ground_truth_rect, (dict, OrderedDict)): init_data[0]['bbox'] = list(self.ground_truth_rect[self.object_ids[0]][0, :]) else: init_data[0]['bbox'] = list(self.ground_truth_rect[0,:]) if self.ground_truth_seg is not None: init_data[0]['mask'] = self.ground_truth_seg[0] return init_data def init_info(self): info = self.frame_info(frame_num=0) return info def frame_info(self, frame_num): info = self.object_init_data(frame_num=frame_num) return info def init_bbox(self, frame_num=0): return self.object_init_data(frame_num=frame_num).get('init_bbox') def init_mask(self, frame_num=0): return self.object_init_data(frame_num=frame_num).get('init_mask') def get_info(self, keys, frame_num=None): info = dict() for k in keys: val = self.get(k, frame_num=frame_num) if val is not None: info[k] = val return info def object_init_data(self, frame_num=None) -> dict: if frame_num is None: frame_num = 0 if frame_num not in self.init_data: return dict() init_data = dict() for key, val in self.init_data[frame_num].items(): if val is None: continue init_data['init_'+key] = val if 'init_mask' in init_data and init_data['init_mask'] is not None: anno = imread_indexed(init_data['init_mask']) if not self.multiobj_mode and self.object_ids is not None: assert len(self.object_ids) == 1 anno = (anno == int(self.object_ids[0])).astype(np.uint8) init_data['init_mask'] = anno if self.object_ids is not None: init_data['object_ids'] = self.object_ids init_data['sequence_object_ids'] = self.object_ids return init_data def target_class(self, frame_num=None): return self.object_class def get(self, name, frame_num=None): return getattr(self, name)(frame_num) def __repr__(self): return "{self.__class__.__name__} {self.name}, length={len} frames".format(self=self, len=len(self.frames)) class SequenceList(list): """List of sequences. Supports the addition operator to concatenate sequence lists.""" def __getitem__(self, item): if isinstance(item, str): for seq in self: if seq.name == item: return seq raise IndexError('Sequence name not in the dataset.') elif isinstance(item, int): return super(SequenceList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return SequenceList([super(SequenceList, self).__getitem__(i) for i in item]) else: return SequenceList(super(SequenceList, self).__getitem__(item)) def __add__(self, other): return SequenceList(super(SequenceList, self).__add__(other)) def copy(self): return SequenceList(super(SequenceList, self).copy()) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/datasets.py ================================================ from collections import namedtuple import importlib from pytracking.evaluation.data import SequenceList DatasetInfo = namedtuple('DatasetInfo', ['module', 'class_name', 'kwargs']) pt = "pytracking.evaluation.%sdataset" # Useful abbreviations to reduce the clutter dataset_dict = dict( otb=DatasetInfo(module=pt % "otb", class_name="OTBDataset", kwargs=dict()), nfs=DatasetInfo(module=pt % "nfs", class_name="NFSDataset", kwargs=dict()), uav=DatasetInfo(module=pt % "uav", class_name="UAVDataset", kwargs=dict()), tpl=DatasetInfo(module=pt % "tpl", class_name="TPLDataset", kwargs=dict()), tpl_nootb=DatasetInfo(module=pt % "tpl", class_name="TPLDataset", kwargs=dict(exclude_otb=True)), vot=DatasetInfo(module=pt % "vot", class_name="VOTDataset", kwargs=dict()), trackingnet=DatasetInfo(module=pt % "trackingnet", class_name="TrackingNetDataset", kwargs=dict()), got10k_test=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='test')), got10k_val=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='val')), got10k_ltrval=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='ltrval')), lasot=DatasetInfo(module=pt % "lasot", class_name="LaSOTDataset", kwargs=dict()), dv2017_val=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2017', split='val')), dv2016_val=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2016', split='val')), dv2017_test_dev=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2017', split='test-dev')), dv2017_test_chal=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2017', split='test-challenge')), yt2019_test=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='test')), yt2019_valid=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='valid')), yt2019_valid_all=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='valid', all_frames=True)), yt2018_valid_all=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2018', split='valid', all_frames=True)), yt2018_jjval=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2018', split='jjvalid')), yt2019_jjval=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='jjvalid', cleanup=['starts'])), yt2019_jjval_all=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='jjvalid', all_frames=True, cleanup=['starts'])), ) def load_dataset(name: str): """ Import and load a single dataset.""" name = name.lower() dset_info = dataset_dict.get(name) if dset_info is None: raise ValueError('Unknown dataset \'%s\'' % name) m = importlib.import_module(dset_info.module) dataset = getattr(m, dset_info.class_name)(**dset_info.kwargs) # Call the constructor return dataset.get_sequence_list() def get_dataset(*args): """ Get a single or set of datasets.""" dset = SequenceList() for name in args: dset.extend(load_dataset(name)) return dset ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/environment.py ================================================ import importlib import os class EnvSettings: def __init__(self): pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) self.results_path = '{}/tracking_results/'.format(pytracking_path) self.segmentation_path = '{}/segmentation_results/'.format(pytracking_path) self.network_path = '{}/networks/'.format(pytracking_path) self.result_plot_path = '{}/result_plots/'.format(pytracking_path) self.otb_path = '' self.nfs_path = '' self.uav_path = '' self.tpl_path = '' self.vot_path = '' self.got10k_path = '' self.lasot_path = '' self.trackingnet_path = '' self.davis_dir = '' self.youtubevos_dir = '' self.got_packed_results_path = '' self.got_reports_path = '' self.tn_packed_results_path = '' def create_default_local_file(): comment = {'results_path': 'Where to store tracking results', 'network_path': 'Where tracking networks are stored.'} path = os.path.join(os.path.dirname(__file__), 'local.py') with open(path, 'w') as f: settings = EnvSettings() f.write('from pytracking.evaluation.environment import EnvSettings\n\n') f.write('def local_env_settings():\n') f.write(' settings = EnvSettings()\n\n') f.write(' # Set your local paths here.\n\n') for attr in dir(settings): comment_str = None if attr in comment: comment_str = comment[attr] attr_val = getattr(settings, attr) if not attr.startswith('__') and not callable(attr_val): if comment_str is None: f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) f.write('\n return settings\n\n') def env_settings(): env_module_name = 'pytracking.evaluation.local' try: env_module = importlib.import_module(env_module_name) return env_module.local_env_settings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') # Create a default file create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. ' 'Then try to run again.'.format(env_file)) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/got10kdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text import os class GOT10KDataset(BaseDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, split): super().__init__() # Split can be test, val, or ltrval (a validation split consisting of videos from the official train set) if split == 'test' or split == 'val': self.base_path = os.path.join(self.env_settings.got10k_path, split) else: self.base_path = os.path.join(self.env_settings.got10k_path, 'train') self.sequence_list = self._get_sequence_list(split) self.split = split def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) frames_path = '{}/{}'.format(self.base_path, sequence_name) frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")] frame_list.sort(key=lambda f: int(f[:-4])) frames_list = [os.path.join(frames_path, frame) for frame in frame_list] return Sequence(sequence_name, frames_list, 'got10k', ground_truth_rect.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self, split): with open('{}/list.txt'.format(self.base_path)) as f: sequence_list = f.read().splitlines() if split == 'ltrval': with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f: seq_ids = f.read().splitlines() sequence_list = [sequence_list[int(x)] for x in seq_ids] return sequence_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/lasotdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class LaSOTDataset(BaseDataset): """ LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper) Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.lasot_path self.sequence_list = self._get_sequence_list() self.clean_list = self.clean_seq_list() def clean_seq_list(self): clean_lst = [] for i in range(len(self.sequence_list)): cls, _ = self.sequence_list[i].split('-') clean_lst.append(cls) return clean_lst def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): class_name = sequence_name.split('-')[0] anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name) # NOTE: pandas backed seems super super slow for loading occlusion/oov masks full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy') out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name) out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy') target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0) frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name) frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)] target_class = class_name return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4), object_class=target_class, target_visible=target_visible) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list = ['airplane-1', 'airplane-9', 'airplane-13', 'airplane-15', 'basketball-1', 'basketball-6', 'basketball-7', 'basketball-11', 'bear-2', 'bear-4', 'bear-6', 'bear-17', 'bicycle-2', 'bicycle-7', 'bicycle-9', 'bicycle-18', 'bird-2', 'bird-3', 'bird-15', 'bird-17', 'boat-3', 'boat-4', 'boat-12', 'boat-17', 'book-3', 'book-10', 'book-11', 'book-19', 'bottle-1', 'bottle-12', 'bottle-14', 'bottle-18', 'bus-2', 'bus-5', 'bus-17', 'bus-19', 'car-2', 'car-6', 'car-9', 'car-17', 'cat-1', 'cat-3', 'cat-18', 'cat-20', 'cattle-2', 'cattle-7', 'cattle-12', 'cattle-13', 'spider-14', 'spider-16', 'spider-18', 'spider-20', 'coin-3', 'coin-6', 'coin-7', 'coin-18', 'crab-3', 'crab-6', 'crab-12', 'crab-18', 'surfboard-12', 'surfboard-4', 'surfboard-5', 'surfboard-8', 'cup-1', 'cup-4', 'cup-7', 'cup-17', 'deer-4', 'deer-8', 'deer-10', 'deer-14', 'dog-1', 'dog-7', 'dog-15', 'dog-19', 'guitar-3', 'guitar-8', 'guitar-10', 'guitar-16', 'person-1', 'person-5', 'person-10', 'person-12', 'pig-2', 'pig-10', 'pig-13', 'pig-18', 'rubicCube-1', 'rubicCube-6', 'rubicCube-14', 'rubicCube-19', 'swing-10', 'swing-14', 'swing-17', 'swing-20', 'drone-13', 'drone-15', 'drone-2', 'drone-7', 'pool-12', 'pool-15', 'pool-3', 'pool-7', 'rabbit-10', 'rabbit-13', 'rabbit-17', 'rabbit-19', 'racing-10', 'racing-15', 'racing-16', 'racing-20', 'robot-1', 'robot-19', 'robot-5', 'robot-8', 'sepia-13', 'sepia-16', 'sepia-6', 'sepia-8', 'sheep-3', 'sheep-5', 'sheep-7', 'sheep-9', 'skateboard-16', 'skateboard-19', 'skateboard-3', 'skateboard-8', 'tank-14', 'tank-16', 'tank-6', 'tank-9', 'tiger-12', 'tiger-18', 'tiger-4', 'tiger-6', 'train-1', 'train-11', 'train-20', 'train-7', 'truck-16', 'truck-3', 'truck-6', 'truck-7', 'turtle-16', 'turtle-5', 'turtle-8', 'turtle-9', 'umbrella-17', 'umbrella-19', 'umbrella-2', 'umbrella-9', 'yoyo-15', 'yoyo-17', 'yoyo-19', 'yoyo-7', 'zebra-10', 'zebra-14', 'zebra-16', 'zebra-17', 'elephant-1', 'elephant-12', 'elephant-16', 'elephant-18', 'goldfish-3', 'goldfish-7', 'goldfish-8', 'goldfish-10', 'hat-1', 'hat-2', 'hat-5', 'hat-18', 'kite-4', 'kite-6', 'kite-10', 'kite-15', 'motorcycle-1', 'motorcycle-3', 'motorcycle-9', 'motorcycle-18', 'mouse-1', 'mouse-8', 'mouse-9', 'mouse-17', 'flag-3', 'flag-9', 'flag-5', 'flag-2', 'frog-3', 'frog-4', 'frog-20', 'frog-9', 'gametarget-1', 'gametarget-2', 'gametarget-7', 'gametarget-13', 'hand-2', 'hand-3', 'hand-9', 'hand-16', 'helmet-5', 'helmet-11', 'helmet-19', 'helmet-13', 'licenseplate-6', 'licenseplate-12', 'licenseplate-13', 'licenseplate-15', 'electricfan-1', 'electricfan-10', 'electricfan-18', 'electricfan-20', 'chameleon-3', 'chameleon-6', 'chameleon-11', 'chameleon-20', 'crocodile-3', 'crocodile-4', 'crocodile-10', 'crocodile-14', 'gecko-1', 'gecko-5', 'gecko-16', 'gecko-19', 'fox-2', 'fox-3', 'fox-5', 'fox-20', 'giraffe-2', 'giraffe-10', 'giraffe-13', 'giraffe-15', 'gorilla-4', 'gorilla-6', 'gorilla-9', 'gorilla-13', 'hippo-1', 'hippo-7', 'hippo-9', 'hippo-20', 'horse-1', 'horse-4', 'horse-12', 'horse-15', 'kangaroo-2', 'kangaroo-5', 'kangaroo-11', 'kangaroo-14', 'leopard-1', 'leopard-7', 'leopard-16', 'leopard-20', 'lion-1', 'lion-5', 'lion-12', 'lion-20', 'lizard-1', 'lizard-3', 'lizard-6', 'lizard-13', 'microphone-2', 'microphone-6', 'microphone-14', 'microphone-16', 'monkey-3', 'monkey-4', 'monkey-9', 'monkey-17', 'shark-2', 'shark-3', 'shark-5', 'shark-6', 'squirrel-8', 'squirrel-11', 'squirrel-13', 'squirrel-19', 'volleyball-1', 'volleyball-13', 'volleyball-18', 'volleyball-19'] return sequence_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/mobifacedataset.py ================================================ from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList import glob import numpy as np import os.path as osp from collections import OrderedDict import pandas as pd class MobifaceDataset(BaseDataset): """ Mobiface dataset. Publication: MobiFace: A Novel Dataset for Mobile Face Tracking in the Wild Yiming Lin, Shiyang Cheng, Jie Shen, Maja Pantic arXiv:1805.09749, 2018 https://arxiv.org/pdf/1805.09749v2 Download dataset from https://mobiface.github.io/ """ def __init__(self, split): """ args: split - Split to use. Can be i) 'train': official training set, ii) 'test': official test set, iii) 'all': whole dataset. """ super().__init__() self.base_path = self.env_settings.mobiface_path self.sequence_list = self._get_sequence_list(split) self.split = split def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _get_sequence_list(self, split): self.train_meta_fn = osp.join(self.base_path, 'train.meta.csv') self.test_meta_fn = osp.join(self.base_path, 'test.meta.csv') self.train_meta = pd.read_csv(self.train_meta_fn,index_col=0).transpose().to_dict() self.test_meta = pd.read_csv(self.test_meta_fn,index_col=0).transpose().to_dict() if split == 'train': self.meta = self.train_meta elif split == 'test': self.meta = self.test_meta else: self.meta = {**self.train_meta, **self.test_meta} # In Python 3.5 or greater self.meta = OrderedDict(sorted(self.meta.items(), key=lambda t: t[0])) self.anno_files = [] for k,v in self.meta.items(): if k in self.train_meta.keys(): self.anno_files.append(osp.abspath(osp.join(self.base_path,'train', k+'.annot.csv'))) else: self.anno_files.append(osp.abspath(osp.join(self.base_path,'test', k+'.annot.csv'))) self.seq_names = sorted(list(self.meta.keys())) self.seq_dirs = [fn[:-len('.annot.csv')] for fn in self.anno_files] return self.seq_names def _construct_sequence(self, sequence_name): index = self.seq_names.index(sequence_name) img_files = sorted(glob.glob(self.seq_dirs[index]+'/*.jpg')) if len(img_files) == 0: img_files = sorted(glob.glob(self.seq_dirs[index]+'.png')) with open(self.anno_files[index], 'r') as f: anno = np.loadtxt(f, delimiter=',', skiprows=1, dtype=int) anno = anno[:,1:] assert anno.shape[1] == 4 return Sequence(sequence_name, img_files, anno.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/multi_object_wrapper.py ================================================ import numpy as np from collections import OrderedDict import time import copy class MultiObjectWrapper: def __init__(self, base_tracker_class, params, visdom=None, fast_load=False): self.base_tracker_class = base_tracker_class self.params = params self.visdom = visdom self.initialized_ids = [] self.trackers = OrderedDict() self.fast_load = fast_load if self.fast_load: self.tracker_copy = self.base_tracker_class(self.params) if hasattr(self.tracker_copy, 'initialize_features'): self.tracker_copy.initialize_features() def create_tracker(self): tracker = None if self.fast_load: try: tracker = copy.deepcopy(self.tracker_copy) except: pass if tracker is None: tracker = self.base_tracker_class(self.params) tracker.visdom = self.visdom return tracker def _split_info(self, info): info_split = OrderedDict() init_other = OrderedDict() # Init other contains init info for all other objects for obj_id in info['init_object_ids']: info_split[obj_id] = dict() init_other[obj_id] = dict() info_split[obj_id]['object_ids'] = [obj_id] info_split[obj_id]['sequence_object_ids'] = info['sequence_object_ids'] if 'init_bbox' in info: info_split[obj_id]['init_bbox'] = info['init_bbox'][obj_id] init_other[obj_id]['init_bbox'] = info['init_bbox'][obj_id] if 'init_mask' in info: info_split[obj_id]['init_mask'] = (info['init_mask'] == int(obj_id)).astype(np.uint8) init_other[obj_id]['init_mask'] = info_split[obj_id]['init_mask'] for obj_info in info_split.values(): obj_info['init_other'] = init_other return info_split def _set_defaults(self, tracker_out: dict, defaults=None): defaults = {} if defaults is None else defaults for key, val in defaults.items(): if tracker_out.get(key) is None: tracker_out[key] = val return tracker_out def default_merge(self, out_all): out_merged = OrderedDict() out_first = list(out_all.values())[0] out_types = out_first.keys() # Merge segmentation mask if 'segmentation' in out_types and out_first['segmentation'] is not None: # Stack all masks # If a tracker outputs soft segmentation mask, use that. Else use the binary segmentation segmentation_maps = [out.get('segmentation_soft', out['segmentation']) for out in out_all.values()] segmentation_maps = np.stack(segmentation_maps) obj_ids = np.array([0, *map(int, out_all.keys())], dtype=np.uint8) segm_threshold = getattr(self.params, 'segmentation_threshold', 0.5) merged_segmentation = obj_ids[np.where(segmentation_maps.max(axis=0) > segm_threshold, segmentation_maps.argmax(axis=0) + 1, 0)] out_merged['segmentation'] = merged_segmentation # Merge other fields for key in out_types: if key == 'segmentation': pass else: out_merged[key] = {obj_id: out[key] for obj_id, out in out_all.items()} return out_merged def merge_outputs(self, out_all): if hasattr(self.base_tracker_class, 'merge_results'): out_merged = self.base_tracker_class.merge_results(out_all) else: out_merged = self.default_merge(out_all) return out_merged def initialize(self, image, info: dict) -> dict: self.initialized_ids = [] self.trackers = OrderedDict() if len(info['init_object_ids']) == 0: return None object_ids = info['object_ids'] init_info_split = self._split_info(info) self.trackers = OrderedDict({obj_id: self.create_tracker() for obj_id in object_ids}) out_all = OrderedDict() # Run individual trackers for each object for obj_id in info['init_object_ids']: start_time = time.time() out = self.trackers[obj_id].initialize(image, init_info_split[obj_id]) if out is None: out = {} init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'), 'time': time.time() - start_time, 'segmentation': init_info_split[obj_id].get('init_mask')} out = self._set_defaults(out, init_default) out_all[obj_id] = out # Merge results out_merged = self.merge_outputs(out_all) self.initialized_ids = info['init_object_ids'].copy() return out_merged def track(self, image, info: dict = None) -> dict: if info is None: info = {} prev_output = info.get('previous_output', OrderedDict()) if info.get('init_object_ids', False): init_info_split = self._split_info(info) for obj_init_info in init_info_split.values(): obj_init_info['previous_output'] = prev_output info['init_other'] = list(init_info_split.values())[0]['init_other'] out_all = OrderedDict() for obj_id in self.initialized_ids: start_time = time.time() out = self.trackers[obj_id].track(image, info) default = {'time': time.time() - start_time} out = self._set_defaults(out, default) out_all[obj_id] = out # Initialize new if info.get('init_object_ids', False): for obj_id in info['init_object_ids']: if not obj_id in self.trackers: self.trackers[obj_id] = self.create_tracker() start_time = time.time() out = self.trackers[obj_id].initialize(image, init_info_split[obj_id]) if out is None: out = {} init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'), 'time': time.time() - start_time, 'segmentation': init_info_split[obj_id].get('init_mask')} out = self._set_defaults(out, init_default) out_all[obj_id] = out self.initialized_ids.extend(info['init_object_ids']) # Merge results out_merged = self.merge_outputs(out_all) return out_merged def visdom_draw_tracking(self, image, box, segmentation): if isinstance(box, (OrderedDict, dict)): box = [v for k, v in box.items()] else: box = (box,) if segmentation is None: self.visdom.register((image, *box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking') ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/nfsdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class NFSDataset(BaseDataset): """ NFS dataset. Publication: Need for Speed: A Benchmark for Higher Frame Rate Object Tracking H. Kiani Galoogahi, A. Fagg, C. Huang, D. Ramanan, and S.Lucey ICCV, 2017 http://openaccess.thecvf.com/content_ICCV_2017/papers/Galoogahi_Need_for_Speed_ICCV_2017_paper.pdf Download the dataset from http://ci2cv.net/nfs/index.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.nfs_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter='\t', dtype=np.float64) return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "nfs_Gymnastics", "path": "sequences/Gymnastics", "startFrame": 1, "endFrame": 368, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Gymnastics.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_MachLoop_jet", "path": "sequences/MachLoop_jet", "startFrame": 1, "endFrame": 99, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_MachLoop_jet.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_Skiing_red", "path": "sequences/Skiing_red", "startFrame": 1, "endFrame": 69, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Skiing_red.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_Skydiving", "path": "sequences/Skydiving", "startFrame": 1, "endFrame": 196, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Skydiving.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_airboard_1", "path": "sequences/airboard_1", "startFrame": 1, "endFrame": 425, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airboard_1.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_airplane_landing", "path": "sequences/airplane_landing", "startFrame": 1, "endFrame": 81, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airplane_landing.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_airtable_3", "path": "sequences/airtable_3", "startFrame": 1, "endFrame": 482, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airtable_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_1", "path": "sequences/basketball_1", "startFrame": 1, "endFrame": 282, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_1.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_2", "path": "sequences/basketball_2", "startFrame": 1, "endFrame": 102, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_3", "path": "sequences/basketball_3", "startFrame": 1, "endFrame": 421, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_6", "path": "sequences/basketball_6", "startFrame": 1, "endFrame": 224, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_7", "path": "sequences/basketball_7", "startFrame": 1, "endFrame": 240, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_7.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_basketball_player", "path": "sequences/basketball_player", "startFrame": 1, "endFrame": 369, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_player.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_basketball_player_2", "path": "sequences/basketball_player_2", "startFrame": 1, "endFrame": 437, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_player_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_beach_flipback_person", "path": "sequences/beach_flipback_person", "startFrame": 1, "endFrame": 61, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_beach_flipback_person.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_bee", "path": "sequences/bee", "startFrame": 1, "endFrame": 45, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bee.txt", "object_class": "insect", 'occlusion': False}, {"name": "nfs_biker_acrobat", "path": "sequences/biker_acrobat", "startFrame": 1, "endFrame": 128, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_acrobat.txt", "object_class": "bicycle", 'occlusion': False}, {"name": "nfs_biker_all_1", "path": "sequences/biker_all_1", "startFrame": 1, "endFrame": 113, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_all_1.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_biker_head_2", "path": "sequences/biker_head_2", "startFrame": 1, "endFrame": 132, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_head_2.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_biker_head_3", "path": "sequences/biker_head_3", "startFrame": 1, "endFrame": 254, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_head_3.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_biker_upper_body", "path": "sequences/biker_upper_body", "startFrame": 1, "endFrame": 194, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_upper_body.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_biker_whole_body", "path": "sequences/biker_whole_body", "startFrame": 1, "endFrame": 572, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_whole_body.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_billiard_2", "path": "sequences/billiard_2", "startFrame": 1, "endFrame": 604, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_3", "path": "sequences/billiard_3", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_6", "path": "sequences/billiard_6", "startFrame": 1, "endFrame": 771, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_7", "path": "sequences/billiard_7", "startFrame": 1, "endFrame": 724, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_7.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_8", "path": "sequences/billiard_8", "startFrame": 1, "endFrame": 778, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_8.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_bird_2", "path": "sequences/bird_2", "startFrame": 1, "endFrame": 476, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bird_2.txt", "object_class": "bird", 'occlusion': False}, {"name": "nfs_book", "path": "sequences/book", "startFrame": 1, "endFrame": 288, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_book.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_bottle", "path": "sequences/bottle", "startFrame": 1, "endFrame": 2103, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bottle.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_bowling_1", "path": "sequences/bowling_1", "startFrame": 1, "endFrame": 303, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_1.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_2", "path": "sequences/bowling_2", "startFrame": 1, "endFrame": 710, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_2.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_3", "path": "sequences/bowling_3", "startFrame": 1, "endFrame": 271, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_3.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_6", "path": "sequences/bowling_6", "startFrame": 1, "endFrame": 260, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_bowling_ball", "path": "sequences/bowling_ball", "startFrame": 1, "endFrame": 275, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_ball.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bunny", "path": "sequences/bunny", "startFrame": 1, "endFrame": 705, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bunny.txt", "object_class": "mammal", 'occlusion': False}, {"name": "nfs_car", "path": "sequences/car", "startFrame": 1, "endFrame": 2020, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car.txt", "object_class": "car", 'occlusion': True}, {"name": "nfs_car_camaro", "path": "sequences/car_camaro", "startFrame": 1, "endFrame": 36, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_camaro.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_drifting", "path": "sequences/car_drifting", "startFrame": 1, "endFrame": 173, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_drifting.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_jumping", "path": "sequences/car_jumping", "startFrame": 1, "endFrame": 22, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_jumping.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_rc_rolling", "path": "sequences/car_rc_rolling", "startFrame": 1, "endFrame": 62, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_rc_rolling.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_rc_rotating", "path": "sequences/car_rc_rotating", "startFrame": 1, "endFrame": 80, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_rc_rotating.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_side", "path": "sequences/car_side", "startFrame": 1, "endFrame": 108, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_side.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_white", "path": "sequences/car_white", "startFrame": 1, "endFrame": 2063, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_white.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_cheetah", "path": "sequences/cheetah", "startFrame": 1, "endFrame": 167, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cheetah.txt", "object_class": "mammal", 'occlusion': True}, {"name": "nfs_cup", "path": "sequences/cup", "startFrame": 1, "endFrame": 1281, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cup.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_cup_2", "path": "sequences/cup_2", "startFrame": 1, "endFrame": 182, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cup_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_dog", "path": "sequences/dog", "startFrame": 1, "endFrame": 1030, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dog_1", "path": "sequences/dog_1", "startFrame": 1, "endFrame": 168, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_1.txt", "object_class": "dog", 'occlusion': False}, {"name": "nfs_dog_2", "path": "sequences/dog_2", "startFrame": 1, "endFrame": 594, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_2.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dog_3", "path": "sequences/dog_3", "startFrame": 1, "endFrame": 200, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_3.txt", "object_class": "dog", 'occlusion': False}, {"name": "nfs_dogs", "path": "sequences/dogs", "startFrame": 1, "endFrame": 198, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dogs.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dollar", "path": "sequences/dollar", "startFrame": 1, "endFrame": 1426, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dollar.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_drone", "path": "sequences/drone", "startFrame": 1, "endFrame": 70, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_drone.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_ducks_lake", "path": "sequences/ducks_lake", "startFrame": 1, "endFrame": 107, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_ducks_lake.txt", "object_class": "bird", 'occlusion': False}, {"name": "nfs_exit", "path": "sequences/exit", "startFrame": 1, "endFrame": 359, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_exit.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_first", "path": "sequences/first", "startFrame": 1, "endFrame": 435, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_first.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_flower", "path": "sequences/flower", "startFrame": 1, "endFrame": 448, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_flower.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_footbal_skill", "path": "sequences/footbal_skill", "startFrame": 1, "endFrame": 131, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_footbal_skill.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_helicopter", "path": "sequences/helicopter", "startFrame": 1, "endFrame": 310, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_helicopter.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_horse_jumping", "path": "sequences/horse_jumping", "startFrame": 1, "endFrame": 117, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_horse_jumping.txt", "object_class": "horse", 'occlusion': True}, {"name": "nfs_horse_running", "path": "sequences/horse_running", "startFrame": 1, "endFrame": 139, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_horse_running.txt", "object_class": "horse", 'occlusion': False}, {"name": "nfs_iceskating_6", "path": "sequences/iceskating_6", "startFrame": 1, "endFrame": 603, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_iceskating_6.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_jellyfish_5", "path": "sequences/jellyfish_5", "startFrame": 1, "endFrame": 746, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_jellyfish_5.txt", "object_class": "invertebrate", 'occlusion': False}, {"name": "nfs_kid_swing", "path": "sequences/kid_swing", "startFrame": 1, "endFrame": 169, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_kid_swing.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_motorcross", "path": "sequences/motorcross", "startFrame": 1, "endFrame": 39, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_motorcross.txt", "object_class": "vehicle", 'occlusion': True}, {"name": "nfs_motorcross_kawasaki", "path": "sequences/motorcross_kawasaki", "startFrame": 1, "endFrame": 65, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_motorcross_kawasaki.txt", "object_class": "vehicle", 'occlusion': False}, {"name": "nfs_parkour", "path": "sequences/parkour", "startFrame": 1, "endFrame": 58, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_parkour.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_person_scooter", "path": "sequences/person_scooter", "startFrame": 1, "endFrame": 413, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_person_scooter.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_pingpong_2", "path": "sequences/pingpong_2", "startFrame": 1, "endFrame": 1277, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_pingpong_7", "path": "sequences/pingpong_7", "startFrame": 1, "endFrame": 1290, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_7.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_pingpong_8", "path": "sequences/pingpong_8", "startFrame": 1, "endFrame": 296, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_8.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_purse", "path": "sequences/purse", "startFrame": 1, "endFrame": 968, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_purse.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_rubber", "path": "sequences/rubber", "startFrame": 1, "endFrame": 1328, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_rubber.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_running", "path": "sequences/running", "startFrame": 1, "endFrame": 677, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_running_100_m", "path": "sequences/running_100_m", "startFrame": 1, "endFrame": 313, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_100_m.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_running_100_m_2", "path": "sequences/running_100_m_2", "startFrame": 1, "endFrame": 337, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_100_m_2.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_running_2", "path": "sequences/running_2", "startFrame": 1, "endFrame": 363, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_shuffleboard_1", "path": "sequences/shuffleboard_1", "startFrame": 1, "endFrame": 42, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_1.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_2", "path": "sequences/shuffleboard_2", "startFrame": 1, "endFrame": 41, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_4", "path": "sequences/shuffleboard_4", "startFrame": 1, "endFrame": 62, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_4.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_5", "path": "sequences/shuffleboard_5", "startFrame": 1, "endFrame": 32, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_5.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_6", "path": "sequences/shuffleboard_6", "startFrame": 1, "endFrame": 52, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_6.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_2", "path": "sequences/shuffletable_2", "startFrame": 1, "endFrame": 372, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_3", "path": "sequences/shuffletable_3", "startFrame": 1, "endFrame": 368, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_3.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_4", "path": "sequences/shuffletable_4", "startFrame": 1, "endFrame": 101, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_4.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_ski_long", "path": "sequences/ski_long", "startFrame": 1, "endFrame": 274, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_ski_long.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_soccer_ball", "path": "sequences/soccer_ball", "startFrame": 1, "endFrame": 163, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_ball_2", "path": "sequences/soccer_ball_2", "startFrame": 1, "endFrame": 1934, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_ball_3", "path": "sequences/soccer_ball_3", "startFrame": 1, "endFrame": 1381, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_player_2", "path": "sequences/soccer_player_2", "startFrame": 1, "endFrame": 475, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_player_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_soccer_player_3", "path": "sequences/soccer_player_3", "startFrame": 1, "endFrame": 319, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_player_3.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_stop_sign", "path": "sequences/stop_sign", "startFrame": 1, "endFrame": 302, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_stop_sign.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_suv", "path": "sequences/suv", "startFrame": 1, "endFrame": 2584, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_suv.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_tiger", "path": "sequences/tiger", "startFrame": 1, "endFrame": 1556, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_tiger.txt", "object_class": "mammal", 'occlusion': False}, {"name": "nfs_walking", "path": "sequences/walking", "startFrame": 1, "endFrame": 555, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_walking.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_walking_3", "path": "sequences/walking_3", "startFrame": 1, "endFrame": 1427, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_walking_3.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_water_ski_2", "path": "sequences/water_ski_2", "startFrame": 1, "endFrame": 47, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_water_ski_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_yoyo", "path": "sequences/yoyo", "startFrame": 1, "endFrame": 67, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_yoyo.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_zebra_fish", "path": "sequences/zebra_fish", "startFrame": 1, "endFrame": 671, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_zebra_fish.txt", "object_class": "fish", 'occlusion': False}, ] return sequence_info_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/otbdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class OTBDataset(BaseDataset): """ OTB-2015 dataset Publication: Object Tracking Benchmark Wu, Yi, Jongwoo Lim, and Ming-hsuan Yan TPAMI, 2015 http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.otb_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) # NOTE: OTB has some weird annos which panda cannot handle ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "Basketball", "path": "Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "Basketball/groundtruth_rect.txt", "object_class": "person"}, {"name": "Biker", "path": "Biker/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "Biker/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Bird1", "path": "Bird1/img", "startFrame": 1, "endFrame": 408, "nz": 4, "ext": "jpg", "anno_path": "Bird1/groundtruth_rect.txt", "object_class": "bird"}, {"name": "Bird2", "path": "Bird2/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "Bird2/groundtruth_rect.txt", "object_class": "bird"}, {"name": "BlurBody", "path": "BlurBody/img", "startFrame": 1, "endFrame": 334, "nz": 4, "ext": "jpg", "anno_path": "BlurBody/groundtruth_rect.txt", "object_class": "person"}, {"name": "BlurCar1", "path": "BlurCar1/img", "startFrame": 247, "endFrame": 988, "nz": 4, "ext": "jpg", "anno_path": "BlurCar1/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar2", "path": "BlurCar2/img", "startFrame": 1, "endFrame": 585, "nz": 4, "ext": "jpg", "anno_path": "BlurCar2/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar3", "path": "BlurCar3/img", "startFrame": 3, "endFrame": 359, "nz": 4, "ext": "jpg", "anno_path": "BlurCar3/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar4", "path": "BlurCar4/img", "startFrame": 18, "endFrame": 397, "nz": 4, "ext": "jpg", "anno_path": "BlurCar4/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurFace", "path": "BlurFace/img", "startFrame": 1, "endFrame": 493, "nz": 4, "ext": "jpg", "anno_path": "BlurFace/groundtruth_rect.txt", "object_class": "face"}, {"name": "BlurOwl", "path": "BlurOwl/img", "startFrame": 1, "endFrame": 631, "nz": 4, "ext": "jpg", "anno_path": "BlurOwl/groundtruth_rect.txt", "object_class": "other"}, {"name": "Board", "path": "Board/img", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "Board/groundtruth_rect.txt", "object_class": "other"}, {"name": "Bolt", "path": "Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "Bolt/groundtruth_rect.txt", "object_class": "person"}, {"name": "Bolt2", "path": "Bolt2/img", "startFrame": 1, "endFrame": 293, "nz": 4, "ext": "jpg", "anno_path": "Bolt2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Box", "path": "Box/img", "startFrame": 1, "endFrame": 1161, "nz": 4, "ext": "jpg", "anno_path": "Box/groundtruth_rect.txt", "object_class": "other"}, {"name": "Boy", "path": "Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "Boy/groundtruth_rect.txt", "object_class": "face"}, {"name": "Car1", "path": "Car1/img", "startFrame": 1, "endFrame": 1020, "nz": 4, "ext": "jpg", "anno_path": "Car1/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car2", "path": "Car2/img", "startFrame": 1, "endFrame": 913, "nz": 4, "ext": "jpg", "anno_path": "Car2/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car24", "path": "Car24/img", "startFrame": 1, "endFrame": 3059, "nz": 4, "ext": "jpg", "anno_path": "Car24/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car4", "path": "Car4/img", "startFrame": 1, "endFrame": 659, "nz": 4, "ext": "jpg", "anno_path": "Car4/groundtruth_rect.txt", "object_class": "car"}, {"name": "CarDark", "path": "CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "CarDark/groundtruth_rect.txt", "object_class": "car"}, {"name": "CarScale", "path": "CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "CarScale/groundtruth_rect.txt", "object_class": "car"}, {"name": "ClifBar", "path": "ClifBar/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "ClifBar/groundtruth_rect.txt", "object_class": "other"}, {"name": "Coke", "path": "Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "Coke/groundtruth_rect.txt", "object_class": "other"}, {"name": "Couple", "path": "Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "Couple/groundtruth_rect.txt", "object_class": "person"}, {"name": "Coupon", "path": "Coupon/img", "startFrame": 1, "endFrame": 327, "nz": 4, "ext": "jpg", "anno_path": "Coupon/groundtruth_rect.txt", "object_class": "other"}, {"name": "Crossing", "path": "Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "Crossing/groundtruth_rect.txt", "object_class": "person"}, {"name": "Crowds", "path": "Crowds/img", "startFrame": 1, "endFrame": 347, "nz": 4, "ext": "jpg", "anno_path": "Crowds/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dancer", "path": "Dancer/img", "startFrame": 1, "endFrame": 225, "nz": 4, "ext": "jpg", "anno_path": "Dancer/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dancer2", "path": "Dancer2/img", "startFrame": 1, "endFrame": 150, "nz": 4, "ext": "jpg", "anno_path": "Dancer2/groundtruth_rect.txt", "object_class": "person"}, {"name": "David", "path": "David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "David/groundtruth_rect.txt", "object_class": "face"}, {"name": "David2", "path": "David2/img", "startFrame": 1, "endFrame": 537, "nz": 4, "ext": "jpg", "anno_path": "David2/groundtruth_rect.txt", "object_class": "face"}, {"name": "David3", "path": "David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "David3/groundtruth_rect.txt", "object_class": "person"}, {"name": "Deer", "path": "Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "Deer/groundtruth_rect.txt", "object_class": "mammal"}, {"name": "Diving", "path": "Diving/img", "startFrame": 1, "endFrame": 215, "nz": 4, "ext": "jpg", "anno_path": "Diving/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dog", "path": "Dog/img", "startFrame": 1, "endFrame": 127, "nz": 4, "ext": "jpg", "anno_path": "Dog/groundtruth_rect.txt", "object_class": "dog"}, {"name": "Dog1", "path": "Dog1/img", "startFrame": 1, "endFrame": 1350, "nz": 4, "ext": "jpg", "anno_path": "Dog1/groundtruth_rect.txt", "object_class": "dog"}, {"name": "Doll", "path": "Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "Doll/groundtruth_rect.txt", "object_class": "other"}, {"name": "DragonBaby", "path": "DragonBaby/img", "startFrame": 1, "endFrame": 113, "nz": 4, "ext": "jpg", "anno_path": "DragonBaby/groundtruth_rect.txt", "object_class": "face"}, {"name": "Dudek", "path": "Dudek/img", "startFrame": 1, "endFrame": 1145, "nz": 4, "ext": "jpg", "anno_path": "Dudek/groundtruth_rect.txt", "object_class": "face"}, {"name": "FaceOcc1", "path": "FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc1/groundtruth_rect.txt", "object_class": "face"}, {"name": "FaceOcc2", "path": "FaceOcc2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc2/groundtruth_rect.txt", "object_class": "face"}, {"name": "Fish", "path": "Fish/img", "startFrame": 1, "endFrame": 476, "nz": 4, "ext": "jpg", "anno_path": "Fish/groundtruth_rect.txt", "object_class": "other"}, {"name": "FleetFace", "path": "FleetFace/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "FleetFace/groundtruth_rect.txt", "object_class": "face"}, {"name": "Football", "path": "Football/img", "startFrame": 1, "endFrame": 362, "nz": 4, "ext": "jpg", "anno_path": "Football/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Football1", "path": "Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "Football1/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman1", "path": "Freeman1/img", "startFrame": 1, "endFrame": 326, "nz": 4, "ext": "jpg", "anno_path": "Freeman1/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman3", "path": "Freeman3/img", "startFrame": 1, "endFrame": 460, "nz": 4, "ext": "jpg", "anno_path": "Freeman3/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman4", "path": "Freeman4/img", "startFrame": 1, "endFrame": 283, "nz": 4, "ext": "jpg", "anno_path": "Freeman4/groundtruth_rect.txt", "object_class": "face"}, {"name": "Girl", "path": "Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Girl/groundtruth_rect.txt", "object_class": "face"}, {"name": "Girl2", "path": "Girl2/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "Girl2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Gym", "path": "Gym/img", "startFrame": 1, "endFrame": 767, "nz": 4, "ext": "jpg", "anno_path": "Gym/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human2", "path": "Human2/img", "startFrame": 1, "endFrame": 1128, "nz": 4, "ext": "jpg", "anno_path": "Human2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human3", "path": "Human3/img", "startFrame": 1, "endFrame": 1698, "nz": 4, "ext": "jpg", "anno_path": "Human3/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human4_2", "path": "Human4/img", "startFrame": 1, "endFrame": 667, "nz": 4, "ext": "jpg", "anno_path": "Human4/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Human5", "path": "Human5/img", "startFrame": 1, "endFrame": 713, "nz": 4, "ext": "jpg", "anno_path": "Human5/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human6", "path": "Human6/img", "startFrame": 1, "endFrame": 792, "nz": 4, "ext": "jpg", "anno_path": "Human6/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human7", "path": "Human7/img", "startFrame": 1, "endFrame": 250, "nz": 4, "ext": "jpg", "anno_path": "Human7/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human8", "path": "Human8/img", "startFrame": 1, "endFrame": 128, "nz": 4, "ext": "jpg", "anno_path": "Human8/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human9", "path": "Human9/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "Human9/groundtruth_rect.txt", "object_class": "person"}, {"name": "Ironman", "path": "Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "Ironman/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Jogging_1", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.1.txt", "object_class": "person"}, {"name": "Jogging_2", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Jump", "path": "Jump/img", "startFrame": 1, "endFrame": 122, "nz": 4, "ext": "jpg", "anno_path": "Jump/groundtruth_rect.txt", "object_class": "person"}, {"name": "Jumping", "path": "Jumping/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "Jumping/groundtruth_rect.txt", "object_class": "face"}, {"name": "KiteSurf", "path": "KiteSurf/img", "startFrame": 1, "endFrame": 84, "nz": 4, "ext": "png", "anno_path": "KiteSurf/groundtruth_rect.txt", "object_class": "face"}, {"name": "Lemming", "path": "Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "Lemming/groundtruth_rect.txt", "object_class": "other"}, {"name": "Liquor", "path": "Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "Liquor/groundtruth_rect.txt", "object_class": "other"}, {"name": "Man", "path": "Man/img", "startFrame": 1, "endFrame": 134, "nz": 4, "ext": "jpg", "anno_path": "Man/groundtruth_rect.txt", "object_class": "face"}, {"name": "Matrix", "path": "Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "Matrix/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Mhyang", "path": "Mhyang/img", "startFrame": 1, "endFrame": 1490, "nz": 4, "ext": "jpg", "anno_path": "Mhyang/groundtruth_rect.txt", "object_class": "face"}, {"name": "MotorRolling", "path": "MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "MotorRolling/groundtruth_rect.txt", "object_class": "vehicle"}, {"name": "MountainBike", "path": "MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "MountainBike/groundtruth_rect.txt", "object_class": "bicycle"}, {"name": "Panda", "path": "Panda/img", "startFrame": 1, "endFrame": 1000, "nz": 4, "ext": "jpg", "anno_path": "Panda/groundtruth_rect.txt", "object_class": "mammal"}, {"name": "RedTeam", "path": "RedTeam/img", "startFrame": 1, "endFrame": 1918, "nz": 4, "ext": "jpg", "anno_path": "RedTeam/groundtruth_rect.txt", "object_class": "vehicle"}, {"name": "Rubik", "path": "Rubik/img", "startFrame": 1, "endFrame": 1997, "nz": 4, "ext": "jpg", "anno_path": "Rubik/groundtruth_rect.txt", "object_class": "other"}, {"name": "Shaking", "path": "Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Shaking/groundtruth_rect.txt", "object_class": "face"}, {"name": "Singer1", "path": "Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "Singer1/groundtruth_rect.txt", "object_class": "person"}, {"name": "Singer2", "path": "Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "Singer2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skater", "path": "Skater/img", "startFrame": 1, "endFrame": 160, "nz": 4, "ext": "jpg", "anno_path": "Skater/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skater2", "path": "Skater2/img", "startFrame": 1, "endFrame": 435, "nz": 4, "ext": "jpg", "anno_path": "Skater2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skating1", "path": "Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "Skating1/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skating2_1", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.1.txt", "object_class": "person"}, {"name": "Skating2_2", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Skiing", "path": "Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "Skiing/groundtruth_rect.txt", "object_class": "person"}, {"name": "Soccer", "path": "Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "Soccer/groundtruth_rect.txt", "object_class": "face"}, {"name": "Subway", "path": "Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "Subway/groundtruth_rect.txt", "object_class": "person"}, {"name": "Surfer", "path": "Surfer/img", "startFrame": 1, "endFrame": 376, "nz": 4, "ext": "jpg", "anno_path": "Surfer/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Suv", "path": "Suv/img", "startFrame": 1, "endFrame": 945, "nz": 4, "ext": "jpg", "anno_path": "Suv/groundtruth_rect.txt", "object_class": "car"}, {"name": "Sylvester", "path": "Sylvester/img", "startFrame": 1, "endFrame": 1345, "nz": 4, "ext": "jpg", "anno_path": "Sylvester/groundtruth_rect.txt", "object_class": "other"}, {"name": "Tiger1", "path": "Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "Tiger1/groundtruth_rect.txt", "initOmit": 5, "object_class": "other"}, {"name": "Tiger2", "path": "Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Tiger2/groundtruth_rect.txt", "object_class": "other"}, {"name": "Toy", "path": "Toy/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Toy/groundtruth_rect.txt", "object_class": "other"}, {"name": "Trans", "path": "Trans/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "Trans/groundtruth_rect.txt", "object_class": "other"}, {"name": "Trellis", "path": "Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "Trellis/groundtruth_rect.txt", "object_class": "face"}, {"name": "Twinnings", "path": "Twinnings/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "Twinnings/groundtruth_rect.txt", "object_class": "other"}, {"name": "Vase", "path": "Vase/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Vase/groundtruth_rect.txt", "object_class": "other"}, {"name": "Walking", "path": "Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "Walking/groundtruth_rect.txt", "object_class": "person"}, {"name": "Walking2", "path": "Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Walking2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Woman", "path": "Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "Woman/groundtruth_rect.txt", "object_class": "person"} ] return sequence_info_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/running.py ================================================ import numpy as np import multiprocessing import os import sys from itertools import product from collections import OrderedDict from pytracking.evaluation import Sequence, Tracker from ltr.data.image_loader import imwrite_indexed def _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict): """Saves the output of the tracker.""" if not os.path.exists(tracker.results_dir): os.makedirs(tracker.results_dir) base_results_path = os.path.join(tracker.results_dir, seq.name) segmentation_path = os.path.join(tracker.segmentation_dir, seq.name) frame_names = [os.path.splitext(os.path.basename(f))[0] for f in seq.frames] def save_bb(file, data): tracked_bb = np.array(data).astype(int) np.savetxt(file, tracked_bb, delimiter='\t', fmt='%d') def save_time(file, data): exec_times = np.array(data).astype(float) np.savetxt(file, exec_times, delimiter='\t', fmt='%f') def _convert_dict(input_dict): data_dict = {} for elem in input_dict: for k, v in elem.items(): if k in data_dict.keys(): data_dict[k].append(v) else: data_dict[k] = [v, ] return data_dict for key, data in output.items(): # If data is empty if not data: continue if key == 'target_bbox': if isinstance(data[0], (dict, OrderedDict)): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): bbox_file = '{}_{}.txt'.format(base_results_path, obj_id) save_bb(bbox_file, d) else: # Single-object mode bbox_file = '{}.txt'.format(base_results_path) save_bb(bbox_file, data) elif key == 'time': if isinstance(data[0], dict): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id) save_time(timings_file, d) else: timings_file = '{}_time.txt'.format(base_results_path) save_time(timings_file, data) elif key == 'segmentation': assert len(frame_names) == len(data) if not os.path.exists(segmentation_path): os.makedirs(segmentation_path) for frame_name, frame_seg in zip(frame_names, data): imwrite_indexed(os.path.join(segmentation_path, '{}.png'.format(frame_name)), frame_seg) def run_sequence(seq: Sequence, tracker: Tracker, debug=False, visdom_info=None): """Runs a tracker on a sequence.""" def _results_exist(): if seq.object_ids is None: bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name) return os.path.isfile(bbox_file) else: bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids] missing = [not os.path.isfile(f) for f in bbox_files] return sum(missing) == 0 visdom_info = {} if visdom_info is None else visdom_info if _results_exist() and not debug: return print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name)) if debug: output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info) else: try: output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info) except Exception as e: print(e) return sys.stdout.flush() if isinstance(output['time'][0], (dict, OrderedDict)): exec_time = sum([sum(times.values()) for times in output['time']]) num_frames = len(output['time']) else: exec_time = sum(output['time']) num_frames = len(output['time']) print('FPS: {}'.format(num_frames / exec_time)) if not debug: _save_tracker_output(seq, tracker, output) def run_dataset(dataset, trackers, debug=False, threads=0, visdom_info=None): """Runs a list of trackers on a dataset. args: dataset: List of Sequence instances, forming a dataset. trackers: List of Tracker instances. debug: Debug level. threads: Number of threads to use (default 0). visdom_info: Dict containing information about the server for visdom """ multiprocessing.set_start_method('spawn', force=True) print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset))) multiprocessing.set_start_method('spawn', force=True) visdom_info = {} if visdom_info is None else visdom_info if threads == 0: mode = 'sequential' else: mode = 'parallel' if mode == 'sequential': for seq in dataset: for tracker_info in trackers: run_sequence(seq, tracker_info, debug=debug, visdom_info=visdom_info) elif mode == 'parallel': param_list = [(seq, tracker_info, debug, visdom_info) for seq, tracker_info in product(dataset, trackers)] with multiprocessing.Pool(processes=threads) as pool: pool.starmap(run_sequence, param_list) print('Done') ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/tpldataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class TPLDataset(BaseDataset): """ Temple Color 128 dataset Publication: Encoding Color Information for Visual Tracking: Algorithms and Benchmark P. Liang, E. Blasch, and H. Ling TIP, 2015 http://www.dabi.temple.edu/~hbling/publication/TColor-128.pdf Download the dataset from http://www.dabi.temple.edu/~hbling/data/TColor-128/TColor-128.html """ def __init__(self, exclude_otb=False): """ args: exclude_otb (bool) - If True, sequences overlapping with the OTB dataset are excluded """ super().__init__() self.base_path = self.env_settings.tpl_path self.sequence_info_list = self._get_sequence_info_list(exclude_otb) def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'tpl', ground_truth_rect[init_omit:,:]) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self, exclude_otb=False): sequence_info_list = [ {"name": "tpl_Skating2", "path": "tpl_Skating2/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating2/Skating2_gt.txt"}, {"name": "tpl_Pool_ce3", "path": "tpl_Pool_ce3/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "tpl_Pool_ce3/Pool_ce3_gt.txt"}, {"name": "tpl_Microphone_ce1", "path": "tpl_Microphone_ce1/img", "startFrame": 1, "endFrame": 204, "nz": 4, "ext": "jpg", "anno_path": "tpl_Microphone_ce1/Microphone_ce1_gt.txt"}, {"name": "tpl_Torus", "path": "tpl_Torus/img", "startFrame": 1, "endFrame": 264, "nz": 4, "ext": "jpg", "anno_path": "tpl_Torus/Torus_gt.txt"}, {"name": "tpl_Lemming", "path": "tpl_Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "tpl_Lemming/Lemming_gt.txt"}, {"name": "tpl_Eagle_ce", "path": "tpl_Eagle_ce/img", "startFrame": 1, "endFrame": 112, "nz": 4, "ext": "jpg", "anno_path": "tpl_Eagle_ce/Eagle_ce_gt.txt"}, {"name": "tpl_Skating_ce2", "path": "tpl_Skating_ce2/img", "startFrame": 1, "endFrame": 497, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating_ce2/Skating_ce2_gt.txt"}, {"name": "tpl_Yo_yos_ce3", "path": "tpl_Yo_yos_ce3/img", "startFrame": 1, "endFrame": 201, "nz": 4, "ext": "jpg", "anno_path": "tpl_Yo_yos_ce3/Yo-yos_ce3_gt.txt"}, {"name": "tpl_Board", "path": "tpl_Board/img", "startFrame": 1, "endFrame": 598, "nz": 4, "ext": "jpg", "anno_path": "tpl_Board/Board_gt.txt"}, {"name": "tpl_Tennis_ce3", "path": "tpl_Tennis_ce3/img", "startFrame": 1, "endFrame": 204, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tennis_ce3/Tennis_ce3_gt.txt"}, {"name": "tpl_SuperMario_ce", "path": "tpl_SuperMario_ce/img", "startFrame": 1, "endFrame": 146, "nz": 4, "ext": "jpg", "anno_path": "tpl_SuperMario_ce/SuperMario_ce_gt.txt"}, {"name": "tpl_Yo_yos_ce1", "path": "tpl_Yo_yos_ce1/img", "startFrame": 1, "endFrame": 235, "nz": 4, "ext": "jpg", "anno_path": "tpl_Yo_yos_ce1/Yo-yos_ce1_gt.txt"}, {"name": "tpl_Soccer", "path": "tpl_Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "tpl_Soccer/Soccer_gt.txt"}, {"name": "tpl_Fish_ce2", "path": "tpl_Fish_ce2/img", "startFrame": 1, "endFrame": 573, "nz": 4, "ext": "jpg", "anno_path": "tpl_Fish_ce2/Fish_ce2_gt.txt"}, {"name": "tpl_Liquor", "path": "tpl_Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "tpl_Liquor/Liquor_gt.txt"}, {"name": "tpl_Plane_ce2", "path": "tpl_Plane_ce2/img", "startFrame": 1, "endFrame": 653, "nz": 4, "ext": "jpg", "anno_path": "tpl_Plane_ce2/Plane_ce2_gt.txt"}, {"name": "tpl_Couple", "path": "tpl_Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "tpl_Couple/Couple_gt.txt"}, {"name": "tpl_Logo_ce", "path": "tpl_Logo_ce/img", "startFrame": 1, "endFrame": 610, "nz": 4, "ext": "jpg", "anno_path": "tpl_Logo_ce/Logo_ce_gt.txt"}, {"name": "tpl_Hand_ce2", "path": "tpl_Hand_ce2/img", "startFrame": 1, "endFrame": 251, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hand_ce2/Hand_ce2_gt.txt"}, {"name": "tpl_Kite_ce2", "path": "tpl_Kite_ce2/img", "startFrame": 1, "endFrame": 658, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kite_ce2/Kite_ce2_gt.txt"}, {"name": "tpl_Walking", "path": "tpl_Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "tpl_Walking/Walking_gt.txt"}, {"name": "tpl_David", "path": "tpl_David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "tpl_David/David_gt.txt"}, {"name": "tpl_Boat_ce1", "path": "tpl_Boat_ce1/img", "startFrame": 1, "endFrame": 377, "nz": 4, "ext": "jpg", "anno_path": "tpl_Boat_ce1/Boat_ce1_gt.txt"}, {"name": "tpl_Airport_ce", "path": "tpl_Airport_ce/img", "startFrame": 1, "endFrame": 148, "nz": 4, "ext": "jpg", "anno_path": "tpl_Airport_ce/Airport_ce_gt.txt"}, {"name": "tpl_Tiger2", "path": "tpl_Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tiger2/Tiger2_gt.txt"}, {"name": "tpl_Suitcase_ce", "path": "tpl_Suitcase_ce/img", "startFrame": 1, "endFrame": 184, "nz": 4, "ext": "jpg", "anno_path": "tpl_Suitcase_ce/Suitcase_ce_gt.txt"}, {"name": "tpl_TennisBall_ce", "path": "tpl_TennisBall_ce/img", "startFrame": 1, "endFrame": 288, "nz": 4, "ext": "jpg", "anno_path": "tpl_TennisBall_ce/TennisBall_ce_gt.txt"}, {"name": "tpl_Singer_ce1", "path": "tpl_Singer_ce1/img", "startFrame": 1, "endFrame": 214, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer_ce1/Singer_ce1_gt.txt"}, {"name": "tpl_Pool_ce2", "path": "tpl_Pool_ce2/img", "startFrame": 1, "endFrame": 133, "nz": 4, "ext": "jpg", "anno_path": "tpl_Pool_ce2/Pool_ce2_gt.txt"}, {"name": "tpl_Surf_ce3", "path": "tpl_Surf_ce3/img", "startFrame": 1, "endFrame": 279, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce3/Surf_ce3_gt.txt"}, {"name": "tpl_Bird", "path": "tpl_Bird/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bird/Bird_gt.txt"}, {"name": "tpl_Crossing", "path": "tpl_Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "tpl_Crossing/Crossing_gt.txt"}, {"name": "tpl_Plate_ce1", "path": "tpl_Plate_ce1/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "tpl_Plate_ce1/Plate_ce1_gt.txt"}, {"name": "tpl_Cup", "path": "tpl_Cup/img", "startFrame": 1, "endFrame": 303, "nz": 4, "ext": "jpg", "anno_path": "tpl_Cup/Cup_gt.txt"}, {"name": "tpl_Surf_ce2", "path": "tpl_Surf_ce2/img", "startFrame": 1, "endFrame": 391, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce2/Surf_ce2_gt.txt"}, {"name": "tpl_Busstation_ce2", "path": "tpl_Busstation_ce2/img", "startFrame": 6, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "tpl_Busstation_ce2/Busstation_ce2_gt.txt"}, {"name": "tpl_Charger_ce", "path": "tpl_Charger_ce/img", "startFrame": 1, "endFrame": 298, "nz": 4, "ext": "jpg", "anno_path": "tpl_Charger_ce/Charger_ce_gt.txt"}, {"name": "tpl_Pool_ce1", "path": "tpl_Pool_ce1/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "tpl_Pool_ce1/Pool_ce1_gt.txt"}, {"name": "tpl_MountainBike", "path": "tpl_MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "tpl_MountainBike/MountainBike_gt.txt"}, {"name": "tpl_Guitar_ce1", "path": "tpl_Guitar_ce1/img", "startFrame": 1, "endFrame": 268, "nz": 4, "ext": "jpg", "anno_path": "tpl_Guitar_ce1/Guitar_ce1_gt.txt"}, {"name": "tpl_Busstation_ce1", "path": "tpl_Busstation_ce1/img", "startFrame": 1, "endFrame": 363, "nz": 4, "ext": "jpg", "anno_path": "tpl_Busstation_ce1/Busstation_ce1_gt.txt"}, {"name": "tpl_Diving", "path": "tpl_Diving/img", "startFrame": 1, "endFrame": 231, "nz": 4, "ext": "jpg", "anno_path": "tpl_Diving/Diving_gt.txt"}, {"name": "tpl_Skating_ce1", "path": "tpl_Skating_ce1/img", "startFrame": 1, "endFrame": 409, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating_ce1/Skating_ce1_gt.txt"}, {"name": "tpl_Hurdle_ce2", "path": "tpl_Hurdle_ce2/img", "startFrame": 27, "endFrame": 330, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hurdle_ce2/Hurdle_ce2_gt.txt"}, {"name": "tpl_Plate_ce2", "path": "tpl_Plate_ce2/img", "startFrame": 1, "endFrame": 181, "nz": 4, "ext": "jpg", "anno_path": "tpl_Plate_ce2/Plate_ce2_gt.txt"}, {"name": "tpl_CarDark", "path": "tpl_CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "tpl_CarDark/CarDark_gt.txt"}, {"name": "tpl_Singer_ce2", "path": "tpl_Singer_ce2/img", "startFrame": 1, "endFrame": 999, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer_ce2/Singer_ce2_gt.txt"}, {"name": "tpl_Shaking", "path": "tpl_Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "tpl_Shaking/Shaking_gt.txt"}, {"name": "tpl_Iceskater", "path": "tpl_Iceskater/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Iceskater/Iceskater_gt.txt"}, {"name": "tpl_Badminton_ce2", "path": "tpl_Badminton_ce2/img", "startFrame": 1, "endFrame": 705, "nz": 4, "ext": "jpg", "anno_path": "tpl_Badminton_ce2/Badminton_ce2_gt.txt"}, {"name": "tpl_Spiderman_ce", "path": "tpl_Spiderman_ce/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "tpl_Spiderman_ce/Spiderman_ce_gt.txt"}, {"name": "tpl_Kite_ce1", "path": "tpl_Kite_ce1/img", "startFrame": 1, "endFrame": 484, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kite_ce1/Kite_ce1_gt.txt"}, {"name": "tpl_Skyjumping_ce", "path": "tpl_Skyjumping_ce/img", "startFrame": 1, "endFrame": 938, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skyjumping_ce/Skyjumping_ce_gt.txt"}, {"name": "tpl_Ball_ce1", "path": "tpl_Ball_ce1/img", "startFrame": 1, "endFrame": 391, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce1/Ball_ce1_gt.txt"}, {"name": "tpl_Yo_yos_ce2", "path": "tpl_Yo_yos_ce2/img", "startFrame": 1, "endFrame": 454, "nz": 4, "ext": "jpg", "anno_path": "tpl_Yo_yos_ce2/Yo-yos_ce2_gt.txt"}, {"name": "tpl_Ironman", "path": "tpl_Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ironman/Ironman_gt.txt"}, {"name": "tpl_FaceOcc1", "path": "tpl_FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "tpl_FaceOcc1/FaceOcc1_gt.txt"}, {"name": "tpl_Surf_ce1", "path": "tpl_Surf_ce1/img", "startFrame": 1, "endFrame": 404, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce1/Surf_ce1_gt.txt"}, {"name": "tpl_Ring_ce", "path": "tpl_Ring_ce/img", "startFrame": 1, "endFrame": 201, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ring_ce/Ring_ce_gt.txt"}, {"name": "tpl_Surf_ce4", "path": "tpl_Surf_ce4/img", "startFrame": 1, "endFrame": 135, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce4/Surf_ce4_gt.txt"}, {"name": "tpl_Ball_ce4", "path": "tpl_Ball_ce4/img", "startFrame": 1, "endFrame": 538, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce4/Ball_ce4_gt.txt"}, {"name": "tpl_Bikeshow_ce", "path": "tpl_Bikeshow_ce/img", "startFrame": 1, "endFrame": 361, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bikeshow_ce/Bikeshow_ce_gt.txt"}, {"name": "tpl_Kobe_ce", "path": "tpl_Kobe_ce/img", "startFrame": 1, "endFrame": 582, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kobe_ce/Kobe_ce_gt.txt"}, {"name": "tpl_Tiger1", "path": "tpl_Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tiger1/Tiger1_gt.txt"}, {"name": "tpl_Skiing", "path": "tpl_Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skiing/Skiing_gt.txt"}, {"name": "tpl_Tennis_ce1", "path": "tpl_Tennis_ce1/img", "startFrame": 1, "endFrame": 454, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tennis_ce1/Tennis_ce1_gt.txt"}, {"name": "tpl_Carchasing_ce4", "path": "tpl_Carchasing_ce4/img", "startFrame": 1, "endFrame": 442, "nz": 4, "ext": "jpg", "anno_path": "tpl_Carchasing_ce4/Carchasing_ce4_gt.txt"}, {"name": "tpl_Walking2", "path": "tpl_Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Walking2/Walking2_gt.txt"}, {"name": "tpl_Sailor_ce", "path": "tpl_Sailor_ce/img", "startFrame": 1, "endFrame": 402, "nz": 4, "ext": "jpg", "anno_path": "tpl_Sailor_ce/Sailor_ce_gt.txt"}, {"name": "tpl_Railwaystation_ce", "path": "tpl_Railwaystation_ce/img", "startFrame": 1, "endFrame": 413, "nz": 4, "ext": "jpg", "anno_path": "tpl_Railwaystation_ce/Railwaystation_ce_gt.txt"}, {"name": "tpl_Bee_ce", "path": "tpl_Bee_ce/img", "startFrame": 1, "endFrame": 90, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bee_ce/Bee_ce_gt.txt"}, {"name": "tpl_Girl", "path": "tpl_Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Girl/Girl_gt.txt"}, {"name": "tpl_Subway", "path": "tpl_Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "tpl_Subway/Subway_gt.txt"}, {"name": "tpl_David3", "path": "tpl_David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "tpl_David3/David3_gt.txt"}, {"name": "tpl_Electricalbike_ce", "path": "tpl_Electricalbike_ce/img", "startFrame": 1, "endFrame": 818, "nz": 4, "ext": "jpg", "anno_path": "tpl_Electricalbike_ce/Electricalbike_ce_gt.txt"}, {"name": "tpl_Michaeljackson_ce", "path": "tpl_Michaeljackson_ce/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "tpl_Michaeljackson_ce/Michaeljackson_ce_gt.txt"}, {"name": "tpl_Woman", "path": "tpl_Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "tpl_Woman/Woman_gt.txt"}, {"name": "tpl_TableTennis_ce", "path": "tpl_TableTennis_ce/img", "startFrame": 1, "endFrame": 198, "nz": 4, "ext": "jpg", "anno_path": "tpl_TableTennis_ce/TableTennis_ce_gt.txt"}, {"name": "tpl_Motorbike_ce", "path": "tpl_Motorbike_ce/img", "startFrame": 1, "endFrame": 563, "nz": 4, "ext": "jpg", "anno_path": "tpl_Motorbike_ce/Motorbike_ce_gt.txt"}, {"name": "tpl_Baby_ce", "path": "tpl_Baby_ce/img", "startFrame": 1, "endFrame": 296, "nz": 4, "ext": "jpg", "anno_path": "tpl_Baby_ce/Baby_ce_gt.txt"}, {"name": "tpl_Gym", "path": "tpl_Gym/img", "startFrame": 1, "endFrame": 766, "nz": 4, "ext": "jpg", "anno_path": "tpl_Gym/Gym_gt.txt"}, {"name": "tpl_Matrix", "path": "tpl_Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "tpl_Matrix/Matrix_gt.txt"}, {"name": "tpl_Kite_ce3", "path": "tpl_Kite_ce3/img", "startFrame": 1, "endFrame": 528, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kite_ce3/Kite_ce3_gt.txt"}, {"name": "tpl_Fish_ce1", "path": "tpl_Fish_ce1/img", "startFrame": 1, "endFrame": 401, "nz": 4, "ext": "jpg", "anno_path": "tpl_Fish_ce1/Fish_ce1_gt.txt"}, {"name": "tpl_Hand_ce1", "path": "tpl_Hand_ce1/img", "startFrame": 1, "endFrame": 401, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hand_ce1/Hand_ce1_gt.txt"}, {"name": "tpl_Doll", "path": "tpl_Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "tpl_Doll/Doll_gt.txt"}, {"name": "tpl_Carchasing_ce3", "path": "tpl_Carchasing_ce3/img", "startFrame": 1, "endFrame": 572, "nz": 4, "ext": "jpg", "anno_path": "tpl_Carchasing_ce3/Carchasing_ce3_gt.txt"}, {"name": "tpl_Thunder_ce", "path": "tpl_Thunder_ce/img", "startFrame": 1, "endFrame": 375, "nz": 4, "ext": "jpg", "anno_path": "tpl_Thunder_ce/Thunder_ce_gt.txt"}, {"name": "tpl_Singer2", "path": "tpl_Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer2/Singer2_gt.txt"}, {"name": "tpl_Basketball", "path": "tpl_Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball/Basketball_gt.txt"}, {"name": "tpl_Hand", "path": "tpl_Hand/img", "startFrame": 1, "endFrame": 244, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hand/Hand_gt.txt"}, {"name": "tpl_Cup_ce", "path": "tpl_Cup_ce/img", "startFrame": 1, "endFrame": 338, "nz": 4, "ext": "jpg", "anno_path": "tpl_Cup_ce/Cup_ce_gt.txt"}, {"name": "tpl_MotorRolling", "path": "tpl_MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "tpl_MotorRolling/MotorRolling_gt.txt"}, {"name": "tpl_Boat_ce2", "path": "tpl_Boat_ce2/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "tpl_Boat_ce2/Boat_ce2_gt.txt"}, {"name": "tpl_CarScale", "path": "tpl_CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "tpl_CarScale/CarScale_gt.txt"}, {"name": "tpl_Sunshade", "path": "tpl_Sunshade/img", "startFrame": 1, "endFrame": 172, "nz": 4, "ext": "jpg", "anno_path": "tpl_Sunshade/Sunshade_gt.txt"}, {"name": "tpl_Football1", "path": "tpl_Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "tpl_Football1/Football1_gt.txt"}, {"name": "tpl_Singer1", "path": "tpl_Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer1/Singer1_gt.txt"}, {"name": "tpl_Hurdle_ce1", "path": "tpl_Hurdle_ce1/img", "startFrame": 1, "endFrame": 300, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hurdle_ce1/Hurdle_ce1_gt.txt"}, {"name": "tpl_Basketball_ce3", "path": "tpl_Basketball_ce3/img", "startFrame": 1, "endFrame": 441, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball_ce3/Basketball_ce3_gt.txt"}, {"name": "tpl_Toyplane_ce", "path": "tpl_Toyplane_ce/img", "startFrame": 1, "endFrame": 405, "nz": 4, "ext": "jpg", "anno_path": "tpl_Toyplane_ce/Toyplane_ce_gt.txt"}, {"name": "tpl_Skating1", "path": "tpl_Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating1/Skating1_gt.txt"}, {"name": "tpl_Juice", "path": "tpl_Juice/img", "startFrame": 1, "endFrame": 404, "nz": 4, "ext": "jpg", "anno_path": "tpl_Juice/Juice_gt.txt"}, {"name": "tpl_Biker", "path": "tpl_Biker/img", "startFrame": 1, "endFrame": 180, "nz": 4, "ext": "jpg", "anno_path": "tpl_Biker/Biker_gt.txt"}, {"name": "tpl_Boy", "path": "tpl_Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "tpl_Boy/Boy_gt.txt"}, {"name": "tpl_Jogging1", "path": "tpl_Jogging1/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "tpl_Jogging1/Jogging1_gt.txt"}, {"name": "tpl_Deer", "path": "tpl_Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "tpl_Deer/Deer_gt.txt"}, {"name": "tpl_Panda", "path": "tpl_Panda/img", "startFrame": 1, "endFrame": 241, "nz": 4, "ext": "jpg", "anno_path": "tpl_Panda/Panda_gt.txt"}, {"name": "tpl_Coke", "path": "tpl_Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "tpl_Coke/Coke_gt.txt"}, {"name": "tpl_Carchasing_ce1", "path": "tpl_Carchasing_ce1/img", "startFrame": 1, "endFrame": 501, "nz": 4, "ext": "jpg", "anno_path": "tpl_Carchasing_ce1/Carchasing_ce1_gt.txt"}, {"name": "tpl_Badminton_ce1", "path": "tpl_Badminton_ce1/img", "startFrame": 1, "endFrame": 579, "nz": 4, "ext": "jpg", "anno_path": "tpl_Badminton_ce1/Badminton_ce1_gt.txt"}, {"name": "tpl_Trellis", "path": "tpl_Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "tpl_Trellis/Trellis_gt.txt"}, {"name": "tpl_Face_ce2", "path": "tpl_Face_ce2/img", "startFrame": 1, "endFrame": 148, "nz": 4, "ext": "jpg", "anno_path": "tpl_Face_ce2/Face_ce2_gt.txt"}, {"name": "tpl_Ball_ce2", "path": "tpl_Ball_ce2/img", "startFrame": 1, "endFrame": 603, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce2/Ball_ce2_gt.txt"}, {"name": "tpl_Skiing_ce", "path": "tpl_Skiing_ce/img", "startFrame": 1, "endFrame": 511, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skiing_ce/Skiing_ce_gt.txt"}, {"name": "tpl_Jogging2", "path": "tpl_Jogging2/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "tpl_Jogging2/Jogging2_gt.txt"}, {"name": "tpl_Bike_ce1", "path": "tpl_Bike_ce1/img", "startFrame": 1, "endFrame": 801, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bike_ce1/Bike_ce1_gt.txt"}, {"name": "tpl_Bike_ce2", "path": "tpl_Bike_ce2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bike_ce2/Bike_ce2_gt.txt"}, {"name": "tpl_Ball_ce3", "path": "tpl_Ball_ce3/img", "startFrame": 1, "endFrame": 273, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce3/Ball_ce3_gt.txt"}, {"name": "tpl_Girlmov", "path": "tpl_Girlmov/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Girlmov/Girlmov_gt.txt"}, {"name": "tpl_Bolt", "path": "tpl_Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bolt/Bolt_gt.txt"}, {"name": "tpl_Basketball_ce2", "path": "tpl_Basketball_ce2/img", "startFrame": 1, "endFrame": 455, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball_ce2/Basketball_ce2_gt.txt"}, {"name": "tpl_Bicycle", "path": "tpl_Bicycle/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bicycle/Bicycle_gt.txt"}, {"name": "tpl_Face_ce", "path": "tpl_Face_ce/img", "startFrame": 1, "endFrame": 620, "nz": 4, "ext": "jpg", "anno_path": "tpl_Face_ce/Face_ce_gt.txt"}, {"name": "tpl_Basketball_ce1", "path": "tpl_Basketball_ce1/img", "startFrame": 1, "endFrame": 496, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball_ce1/Basketball_ce1_gt.txt"}, {"name": "tpl_Messi_ce", "path": "tpl_Messi_ce/img", "startFrame": 1, "endFrame": 272, "nz": 4, "ext": "jpg", "anno_path": "tpl_Messi_ce/Messi_ce_gt.txt"}, {"name": "tpl_Tennis_ce2", "path": "tpl_Tennis_ce2/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tennis_ce2/Tennis_ce2_gt.txt"}, {"name": "tpl_Microphone_ce2", "path": "tpl_Microphone_ce2/img", "startFrame": 1, "endFrame": 103, "nz": 4, "ext": "jpg", "anno_path": "tpl_Microphone_ce2/Microphone_ce2_gt.txt"}, {"name": "tpl_Guitar_ce2", "path": "tpl_Guitar_ce2/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "tpl_Guitar_ce2/Guitar_ce2_gt.txt"} ] otb_sequences = ['tpl_Skating2', 'tpl_Lemming', 'tpl_Board', 'tpl_Soccer', 'tpl_Liquor', 'tpl_Couple', 'tpl_Walking', 'tpl_David', 'tpl_Tiger2', 'tpl_Bird', 'tpl_Crossing', 'tpl_MountainBike', 'tpl_Diving', 'tpl_CarDark', 'tpl_Shaking', 'tpl_Ironman', 'tpl_FaceOcc1', 'tpl_Tiger1', 'tpl_Skiing', 'tpl_Walking2', 'tpl_Girl', 'tpl_Girlmov', 'tpl_Subway', 'tpl_David3', 'tpl_Woman', 'tpl_Gym', 'tpl_Matrix', 'tpl_Doll', 'tpl_Singer2', 'tpl_Basketball', 'tpl_MotorRolling', 'tpl_CarScale', 'tpl_Football1', 'tpl_Singer1', 'tpl_Skating1', 'tpl_Biker', 'tpl_Boy', 'tpl_Jogging1', 'tpl_Deer', 'tpl_Panda', 'tpl_Coke', 'tpl_Trellis', 'tpl_Jogging2', 'tpl_Bolt', ] if exclude_otb: sequence_info_list_nootb = [] for seq in sequence_info_list: if seq['name'] not in otb_sequences: sequence_info_list_nootb.append(seq) sequence_info_list = sequence_info_list_nootb return sequence_info_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/tracker.py ================================================ import importlib import os import numpy as np from collections import OrderedDict from pytracking.evaluation.environment import env_settings import time import cv2 as cv from pytracking.utils.visdom import Visdom import matplotlib.pyplot as plt import matplotlib.patches as patches from pytracking.utils.plotting import draw_figure, overlay_mask from pytracking.utils.convert_vot_anno_to_rect import convert_vot_anno_to_rect from ltr.data.bounding_box_utils import masks_to_bboxes from pytracking.evaluation.multi_object_wrapper import MultiObjectWrapper import torch _tracker_disp_colors = {1: (0, 255, 0), 2: (0, 0, 255), 3: (255, 0, 0), 4: (255, 255, 255), 5: (0, 0, 0), 6: (0, 255, 128), 7: (123, 123, 123), 8: (255, 128, 0), 9: (128, 0, 255)} def trackerlist(name: str, parameter_name: str, run_ids = None, display_name: str = None): """Generate list of trackers. args: name: Name of tracking method. parameter_name: Name of parameter file. run_ids: A single or list of run_ids. display_name: Name to be displayed in the result plots. """ if run_ids is None or isinstance(run_ids, int): run_ids = [run_ids] return [Tracker(name, parameter_name, run_id, display_name) for run_id in run_ids] class Tracker: """Wraps the tracker for evaluation and running purposes. args: name: Name of tracking method. parameter_name: Name of parameter file. run_id: The run id. display_name: Name to be displayed in the result plots. """ def __init__(self, name: str, parameter_name: str, run_id: int = None, display_name: str = None): assert run_id is None or isinstance(run_id, int) self.name = name self.parameter_name = parameter_name self.run_id = run_id self.display_name = display_name env = env_settings() if self.run_id is None: self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name) self.segmentation_dir = '{}/{}/{}'.format(env.segmentation_path, self.name, self.parameter_name) else: self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id) self.segmentation_dir = '{}/{}/{}_{:03d}'.format(env.segmentation_path, self.name, self.parameter_name, self.run_id) tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tracker', self.name)) if os.path.isdir(tracker_module_abspath): tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name)) self.tracker_class = tracker_module.get_tracker_class() else: self.tracker_class = None self.visdom = None def _init_visdom(self, visdom_info, debug): visdom_info = {} if visdom_info is None else visdom_info self.pause_mode = False self.step = False if debug > 0 and visdom_info.get('use_visdom', True): try: self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'}, visdom_info=visdom_info) # Show help help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \ 'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \ 'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \ 'block list.' self.visdom.register(help_text, 'text', 1, 'Help') except: time.sleep(0.5) print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n' '!!! Start Visdom in a separate terminal window by typing \'visdom\' !!!') def _visdom_ui_handler(self, data): if data['event_type'] == 'KeyPress': if data['key'] == ' ': self.pause_mode = not self.pause_mode elif data['key'] == 'ArrowRight' and self.pause_mode: self.step = True def create_tracker(self, params): tracker = self.tracker_class(params) tracker.visdom = self.visdom return tracker def run_sequence(self, seq, visualization=None, debug=None, visdom_info=None, multiobj_mode=None): """Run tracker on sequence. args: seq: Sequence to run the tracker on. visualization: Set visualization flag (None means default value specified in the parameters). debug: Set debug level (None means default value specified in the parameters). visdom_info: Visdom info. multiobj_mode: Which mode to use for multiple objects. """ params = self.get_parameters() visualization_ = visualization debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) if visualization is None: if debug is None: visualization_ = getattr(params, 'visualization', False) else: visualization_ = True if debug else False params.visualization = visualization_ params.debug = debug_ self._init_visdom(visdom_info, debug_) if visualization_ and self.visdom is None: self.init_visualization() # Get init information init_info = seq.init_info() is_single_object = not seq.multiobj_mode if multiobj_mode is None: multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default' or is_single_object: tracker = self.create_tracker(params) elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) output = self._track_sequence(tracker, seq, init_info) return output def _track_sequence(self, tracker, seq, init_info): # Define outputs # Each field in output is a list containing tracker prediction for each frame. # In case of single object tracking mode: # target_bbox[i] is the predicted bounding box for frame i # time[i] is the processing time for frame i # segmentation[i] is the segmentation mask for frame i (numpy array) # In case of multi object tracking mode: # target_bbox[i] is an OrderedDict, where target_bbox[i][obj_id] is the predicted box for target obj_id in # frame i # time[i] is either the processing time for frame i, or an OrderedDict containing processing times for each # object in frame i # segmentation[i] is the multi-label segmentation mask for frame i (numpy array) output = {'target_bbox': [], 'time': [], 'segmentation': []} def _store_outputs(tracker_out: dict, defaults=None): defaults = {} if defaults is None else defaults for key in output.keys(): val = tracker_out.get(key, defaults.get(key, None)) if key in tracker_out or val is not None: output[key].append(val) # Initialize image = self._read_image(seq.frames[0]) if tracker.params.visualization and self.visdom is None: self.visualize(image, init_info.get('init_bbox')) start_time = time.time() out = tracker.initialize(image, init_info) if out is None: out = {} prev_output = OrderedDict(out) init_default = {'target_bbox': init_info.get('init_bbox'), 'time': time.time() - start_time, 'segmentation': init_info.get('init_mask')} _store_outputs(out, init_default) for frame_num, frame_path in enumerate(seq.frames[1:], start=1): while True: if not self.pause_mode: break elif self.step: self.step = False break else: time.sleep(0.1) image = self._read_image(frame_path) start_time = time.time() info = seq.frame_info(frame_num) info['previous_output'] = prev_output out = tracker.track(image, info) prev_output = OrderedDict(out) _store_outputs(out, {'time': time.time() - start_time}) segmentation = out['segmentation'] if 'segmentation' in out else None if self.visdom is not None: tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation) elif tracker.params.visualization: self.visualize(image, out['target_bbox'], segmentation) for key in ['target_bbox', 'segmentation']: if key in output and len(output[key]) <= 1: output.pop(key) return output def run_video(self, videofilepath, optional_box=None, debug=None, visdom_info=None): """Run the tracker with the vieofile. args: debug: Debug level. """ params = self.get_parameters() debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) params.debug = debug_ params.tracker_name = self.name params.param_name = self.parameter_name self._init_visdom(visdom_info, debug_) multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default': tracker = self.create_tracker(params) if hasattr(tracker, 'initialize_features'): tracker.initialize_features() elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) assert os.path.isfile(videofilepath), "Invalid param {}".format(videofilepath) ", videofilepath must be a valid videofile" cap = cv.VideoCapture(videofilepath) display_name = 'Display: ' + tracker.params.tracker_name cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO) cv.resizeWindow(display_name, 960, 720) success, frame = cap.read() cv.imshow(display_name, frame) def _build_init_info(box): return {'init_bbox': OrderedDict({1: box}), 'init_object_ids': [1, ], 'object_ids': [1, ], 'sequence_object_ids': [1, ]} if success is not True: print("Read frame from {} failed.".format(videofilepath)) exit(-1) if optional_box is not None: assert isinstance(optional_box, list, tuple) assert len(optional_box) == 4, "valid box's foramt is [x,y,w,h]" tracker.initialize(frame, _build_init_info(optional_box)) else: while True: # cv.waitKey() frame_disp = frame.copy() cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 1) x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False) init_state = [x, y, w, h] tracker.initialize(frame, _build_init_info(init_state)) break while True: ret, frame = cap.read() if frame is None: return frame_disp = frame.copy() # Draw box out = tracker.track(frame) state = [int(s) for s in out['target_bbox'][1]] cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]), (0, 255, 0), 5) font_color = (0, 0, 0) cv.putText(frame_disp, 'Tracking!', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press q to quit', (20, 80), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) # Display the resulting frame cv.imshow(display_name, frame_disp) key = cv.waitKey(1) if key == ord('q'): break elif key == ord('r'): ret, frame = cap.read() frame_disp = frame.copy() cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 1) cv.imshow(display_name, frame_disp) x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False) init_state = [x, y, w, h] tracker.initialize(frame, _build_init_info(init_state)) # When everything done, release the capture cap.release() cv.destroyAllWindows() def run_webcam(self, debug=None, visdom_info=None): """Run the tracker with the webcam. args: debug: Debug level. """ params = self.get_parameters() debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) params.debug = debug_ params.tracker_name = self.name params.param_name = self.parameter_name self._init_visdom(visdom_info, debug_) multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default': tracker = self.create_tracker(params) elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) class UIControl: def __init__(self): self.mode = 'init' # init, select, track self.target_tl = (-1, -1) self.target_br = (-1, -1) self.new_init = False def mouse_callback(self, event, x, y, flags, param): if event == cv.EVENT_LBUTTONDOWN and self.mode == 'init': self.target_tl = (x, y) self.target_br = (x, y) self.mode = 'select' elif event == cv.EVENT_MOUSEMOVE and self.mode == 'select': self.target_br = (x, y) elif event == cv.EVENT_LBUTTONDOWN and self.mode == 'select': self.target_br = (x, y) self.mode = 'init' self.new_init = True def get_tl(self): return self.target_tl if self.target_tl[0] < self.target_br[0] else self.target_br def get_br(self): return self.target_br if self.target_tl[0] < self.target_br[0] else self.target_tl def get_bb(self): tl = self.get_tl() br = self.get_br() bb = [min(tl[0], br[0]), min(tl[1], br[1]), abs(br[0] - tl[0]), abs(br[1] - tl[1])] return bb ui_control = UIControl() cap = cv.VideoCapture(0) display_name = 'Display: ' + self.name cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO) cv.resizeWindow(display_name, 960, 720) cv.setMouseCallback(display_name, ui_control.mouse_callback) next_object_id = 1 sequence_object_ids = [] prev_output = OrderedDict() while True: # Capture frame-by-frame ret, frame = cap.read() frame_disp = frame.copy() info = OrderedDict() info['previous_output'] = prev_output if ui_control.new_init: ui_control.new_init = False init_state = ui_control.get_bb() info['init_object_ids'] = [next_object_id, ] info['init_bbox'] = OrderedDict({next_object_id: init_state}) sequence_object_ids.append(next_object_id) next_object_id += 1 # Draw box if ui_control.mode == 'select': cv.rectangle(frame_disp, ui_control.get_tl(), ui_control.get_br(), (255, 0, 0), 2) if len(sequence_object_ids) > 0: info['sequence_object_ids'] = sequence_object_ids out = tracker.track(frame, info) prev_output = OrderedDict(out) if 'segmentation' in out: frame_disp = overlay_mask(frame_disp, out['segmentation']) if 'target_bbox' in out: for obj_id, state in out['target_bbox'].items(): state = [int(s) for s in state] cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]), _tracker_disp_colors[obj_id], 5) # Put text font_color = (0, 0, 0) cv.putText(frame_disp, 'Select target', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press q to quit', (20, 85), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) # Display the resulting frame cv.imshow(display_name, frame_disp) key = cv.waitKey(1) if key == ord('q'): break elif key == ord('r'): next_object_id = 1 sequence_object_ids = [] prev_output = OrderedDict() info = OrderedDict() info['object_ids'] = [] info['init_object_ids'] = [] info['init_bbox'] = OrderedDict() tracker.initialize(frame, info) ui_control.mode = 'init' # When everything done, release the capture cap.release() cv.destroyAllWindows() def run_vot2020(self, debug=None, visdom_info=None): params = self.get_parameters() params.tracker_name = self.name params.param_name = self.parameter_name params.run_id = self.run_id debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) if debug is None: visualization_ = getattr(params, 'visualization', False) else: visualization_ = True if debug else False params.visualization = visualization_ params.debug = debug_ self._init_visdom(visdom_info, debug_) tracker = self.create_tracker(params) tracker.initialize_features() output_segmentation = tracker.predicts_segmentation_mask() import pytracking.evaluation.vot2020 as vot def _convert_anno_to_list(vot_anno): vot_anno = [vot_anno[0], vot_anno[1], vot_anno[2], vot_anno[3]] return vot_anno def _convert_image_path(image_path): return image_path """Run tracker on VOT.""" if output_segmentation: handle = vot.VOT("mask") else: handle = vot.VOT("rectangle") vot_anno = handle.region() image_path = handle.frame() if not image_path: return image_path = _convert_image_path(image_path) image = self._read_image(image_path) if output_segmentation: vot_anno_mask = vot.make_full_size(vot_anno, (image.shape[1], image.shape[0])) bbox = masks_to_bboxes(torch.from_numpy(vot_anno_mask), fmt='t').squeeze().tolist() else: bbox = _convert_anno_to_list(vot_anno) vot_anno_mask = None out = tracker.initialize(image, {'init_mask': vot_anno_mask, 'init_bbox': bbox}) if out is None: out = {} prev_output = OrderedDict(out) # Track while True: image_path = handle.frame() if not image_path: break image_path = _convert_image_path(image_path) image = self._read_image(image_path) info = OrderedDict() info['previous_output'] = prev_output out = tracker.track(image, info) prev_output = OrderedDict(out) if output_segmentation: pred = out['segmentation'].astype(np.uint8) else: state = out['target_bbox'] pred = vot.Rectangle(*state) handle.report(pred, 1.0) segmentation = out['segmentation'] if 'segmentation' in out else None if self.visdom is not None: tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation) elif tracker.params.visualization: self.visualize(image, out['target_bbox'], segmentation) def run_vot(self, debug=None, visdom_info=None): params = self.get_parameters() params.tracker_name = self.name params.param_name = self.parameter_name params.run_id = self.run_id debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) if debug is None: visualization_ = getattr(params, 'visualization', False) else: visualization_ = True if debug else False params.visualization = visualization_ params.debug = debug_ self._init_visdom(visdom_info, debug_) tracker = self.create_tracker(params) tracker.initialize_features() import pytracking.evaluation.vot as vot def _convert_anno_to_list(vot_anno): vot_anno = [vot_anno[0][0][0], vot_anno[0][0][1], vot_anno[0][1][0], vot_anno[0][1][1], vot_anno[0][2][0], vot_anno[0][2][1], vot_anno[0][3][0], vot_anno[0][3][1]] return vot_anno def _convert_image_path(image_path): image_path_new = image_path[20:- 2] return "".join(image_path_new) """Run tracker on VOT.""" handle = vot.VOT("polygon") vot_anno_polygon = handle.region() vot_anno_polygon = _convert_anno_to_list(vot_anno_polygon) init_state = convert_vot_anno_to_rect(vot_anno_polygon, tracker.params.vot_anno_conversion_type) image_path = handle.frame() if not image_path: return image_path = _convert_image_path(image_path) image = self._read_image(image_path) tracker.initialize(image, {'init_bbox': init_state}) # Track while True: image_path = handle.frame() if not image_path: break image_path = _convert_image_path(image_path) image = self._read_image(image_path) out = tracker.track(image) state = out['target_bbox'] handle.report(vot.Rectangle(state[0], state[1], state[2], state[3])) segmentation = out['segmentation'] if 'segmentation' in out else None if self.visdom is not None: tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation) elif tracker.params.visualization: self.visualize(image, out['target_bbox'], segmentation) def get_parameters(self): """Get parameters.""" param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name)) params = param_module.parameters() return params def init_visualization(self): self.pause_mode = False self.fig, self.ax = plt.subplots(1) self.fig.canvas.mpl_connect('key_press_event', self.press) plt.tight_layout() def visualize(self, image, state, segmentation=None): self.ax.cla() self.ax.imshow(image) if segmentation is not None: self.ax.imshow(segmentation, alpha=0.5) if isinstance(state, (OrderedDict, dict)): boxes = [v for k, v in state.items()] else: boxes = (state,) for i, box in enumerate(boxes, start=1): col = _tracker_disp_colors[i] col = [float(c) / 255.0 for c in col] rect = patches.Rectangle((box[0], box[1]), box[2], box[3], linewidth=1, edgecolor=col, facecolor='none') self.ax.add_patch(rect) if getattr(self, 'gt_state', None) is not None: gt_state = self.gt_state rect = patches.Rectangle((gt_state[0], gt_state[1]), gt_state[2], gt_state[3], linewidth=1, edgecolor='g', facecolor='none') self.ax.add_patch(rect) self.ax.set_axis_off() self.ax.axis('equal') draw_figure(self.fig) if self.pause_mode: keypress = False while not keypress: keypress = plt.waitforbuttonpress() def reset_tracker(self): pass def press(self, event): if event.key == 'p': self.pause_mode = not self.pause_mode print("Switching pause mode!") elif event.key == 'r': self.reset_tracker() print("Resetting target pos to gt!") def _read_image(self, image_file: str): im = cv.imread(image_file) return cv.cvtColor(im, cv.COLOR_BGR2RGB) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/trackingnetdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList import os from pytracking.utils.load_text import load_text class TrackingNetDataset(BaseDataset): """ TrackingNet test set. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self): super().__init__() self.base_path = self.env_settings.trackingnet_path sets = 'TEST' if not isinstance(sets, (list, tuple)): if sets == 'TEST': sets = ['TEST'] elif sets == 'TRAIN': sets = ['TRAIN_{}'.format(i) for i in range(5)] self.sequence_list = self._list_sequences(self.base_path, sets) def get_sequence_list(self): return SequenceList([self._construct_sequence(set, seq_name) for set, seq_name in self.sequence_list]) def _construct_sequence(self, set, sequence_name): anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy') frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name) frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")] frame_list.sort(key=lambda f: int(f[:-4])) frames_list = [os.path.join(frames_path, frame) for frame in frame_list] return Sequence(sequence_name, frames_list, 'trackingnet', ground_truth_rect.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) def _list_sequences(self, root, set_ids): sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, s, "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/uavdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class UAVDataset(BaseDataset): """ UAV123 dataset. Publication: A Benchmark and Simulator for UAV Tracking. Matthias Mueller, Neil Smith and Bernard Ghanem ECCV, 2016 https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx """ def __init__(self): super().__init__() self.base_path = self.env_settings.uav_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "uav_bike1", "path": "data_seq/UAV123/bike1", "startFrame": 1, "endFrame": 3085, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike1.txt", "object_class": "vehicle"}, {"name": "uav_bike2", "path": "data_seq/UAV123/bike2", "startFrame": 1, "endFrame": 553, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike2.txt", "object_class": "vehicle"}, {"name": "uav_bike3", "path": "data_seq/UAV123/bike3", "startFrame": 1, "endFrame": 433, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike3.txt", "object_class": "vehicle"}, {"name": "uav_bird1_1", "path": "data_seq/UAV123/bird1", "startFrame": 1, "endFrame": 253, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_1.txt", "object_class": "bird"}, {"name": "uav_bird1_2", "path": "data_seq/UAV123/bird1", "startFrame": 775, "endFrame": 1477, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_2.txt", "object_class": "bird"}, {"name": "uav_bird1_3", "path": "data_seq/UAV123/bird1", "startFrame": 1573, "endFrame": 2437, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_3.txt", "object_class": "bird"}, {"name": "uav_boat1", "path": "data_seq/UAV123/boat1", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat1.txt", "object_class": "vessel"}, {"name": "uav_boat2", "path": "data_seq/UAV123/boat2", "startFrame": 1, "endFrame": 799, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat2.txt", "object_class": "vessel"}, {"name": "uav_boat3", "path": "data_seq/UAV123/boat3", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat3.txt", "object_class": "vessel"}, {"name": "uav_boat4", "path": "data_seq/UAV123/boat4", "startFrame": 1, "endFrame": 553, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat4.txt", "object_class": "vessel"}, {"name": "uav_boat5", "path": "data_seq/UAV123/boat5", "startFrame": 1, "endFrame": 505, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat5.txt", "object_class": "vessel"}, {"name": "uav_boat6", "path": "data_seq/UAV123/boat6", "startFrame": 1, "endFrame": 805, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat6.txt", "object_class": "vessel"}, {"name": "uav_boat7", "path": "data_seq/UAV123/boat7", "startFrame": 1, "endFrame": 535, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat7.txt", "object_class": "vessel"}, {"name": "uav_boat8", "path": "data_seq/UAV123/boat8", "startFrame": 1, "endFrame": 685, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat8.txt", "object_class": "vessel"}, {"name": "uav_boat9", "path": "data_seq/UAV123/boat9", "startFrame": 1, "endFrame": 1399, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat9.txt", "object_class": "vessel"}, {"name": "uav_building1", "path": "data_seq/UAV123/building1", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building1.txt", "object_class": "other"}, {"name": "uav_building2", "path": "data_seq/UAV123/building2", "startFrame": 1, "endFrame": 577, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building2.txt", "object_class": "other"}, {"name": "uav_building3", "path": "data_seq/UAV123/building3", "startFrame": 1, "endFrame": 829, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building3.txt", "object_class": "other"}, {"name": "uav_building4", "path": "data_seq/UAV123/building4", "startFrame": 1, "endFrame": 787, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building4.txt", "object_class": "other"}, {"name": "uav_building5", "path": "data_seq/UAV123/building5", "startFrame": 1, "endFrame": 481, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building5.txt", "object_class": "other"}, {"name": "uav_car1_1", "path": "data_seq/UAV123/car1", "startFrame": 1, "endFrame": 751, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_1.txt", "object_class": "car"}, {"name": "uav_car1_2", "path": "data_seq/UAV123/car1", "startFrame": 751, "endFrame": 1627, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_2.txt", "object_class": "car"}, {"name": "uav_car1_3", "path": "data_seq/UAV123/car1", "startFrame": 1627, "endFrame": 2629, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_3.txt", "object_class": "car"}, {"name": "uav_car10", "path": "data_seq/UAV123/car10", "startFrame": 1, "endFrame": 1405, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car10.txt", "object_class": "car"}, {"name": "uav_car11", "path": "data_seq/UAV123/car11", "startFrame": 1, "endFrame": 337, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car11.txt", "object_class": "car"}, {"name": "uav_car12", "path": "data_seq/UAV123/car12", "startFrame": 1, "endFrame": 499, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car12.txt", "object_class": "car"}, {"name": "uav_car13", "path": "data_seq/UAV123/car13", "startFrame": 1, "endFrame": 415, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car13.txt", "object_class": "car"}, {"name": "uav_car14", "path": "data_seq/UAV123/car14", "startFrame": 1, "endFrame": 1327, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car14.txt", "object_class": "car"}, {"name": "uav_car15", "path": "data_seq/UAV123/car15", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car15.txt", "object_class": "car"}, {"name": "uav_car16_1", "path": "data_seq/UAV123/car16", "startFrame": 1, "endFrame": 415, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car16_1.txt", "object_class": "car"}, {"name": "uav_car16_2", "path": "data_seq/UAV123/car16", "startFrame": 415, "endFrame": 1993, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car16_2.txt", "object_class": "car"}, {"name": "uav_car17", "path": "data_seq/UAV123/car17", "startFrame": 1, "endFrame": 1057, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car17.txt", "object_class": "car"}, {"name": "uav_car18", "path": "data_seq/UAV123/car18", "startFrame": 1, "endFrame": 1207, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car18.txt", "object_class": "car"}, {"name": "uav_car1_s", "path": "data_seq/UAV123/car1_s", "startFrame": 1, "endFrame": 1475, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_s.txt", "object_class": "car"}, {"name": "uav_car2", "path": "data_seq/UAV123/car2", "startFrame": 1, "endFrame": 1321, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car2.txt", "object_class": "car"}, {"name": "uav_car2_s", "path": "data_seq/UAV123/car2_s", "startFrame": 1, "endFrame": 320, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car2_s.txt", "object_class": "car"}, {"name": "uav_car3", "path": "data_seq/UAV123/car3", "startFrame": 1, "endFrame": 1717, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car3.txt", "object_class": "car"}, {"name": "uav_car3_s", "path": "data_seq/UAV123/car3_s", "startFrame": 1, "endFrame": 1300, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car3_s.txt", "object_class": "car"}, {"name": "uav_car4", "path": "data_seq/UAV123/car4", "startFrame": 1, "endFrame": 1345, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car4.txt", "object_class": "car"}, {"name": "uav_car4_s", "path": "data_seq/UAV123/car4_s", "startFrame": 1, "endFrame": 830, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car4_s.txt", "object_class": "car"}, {"name": "uav_car5", "path": "data_seq/UAV123/car5", "startFrame": 1, "endFrame": 745, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car5.txt", "object_class": "car"}, {"name": "uav_car6_1", "path": "data_seq/UAV123/car6", "startFrame": 1, "endFrame": 487, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_1.txt", "object_class": "car"}, {"name": "uav_car6_2", "path": "data_seq/UAV123/car6", "startFrame": 487, "endFrame": 1807, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_2.txt", "object_class": "car"}, {"name": "uav_car6_3", "path": "data_seq/UAV123/car6", "startFrame": 1807, "endFrame": 2953, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_3.txt", "object_class": "car"}, {"name": "uav_car6_4", "path": "data_seq/UAV123/car6", "startFrame": 2953, "endFrame": 3925, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_4.txt", "object_class": "car"}, {"name": "uav_car6_5", "path": "data_seq/UAV123/car6", "startFrame": 3925, "endFrame": 4861, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_5.txt", "object_class": "car"}, {"name": "uav_car7", "path": "data_seq/UAV123/car7", "startFrame": 1, "endFrame": 1033, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car7.txt", "object_class": "car"}, {"name": "uav_car8_1", "path": "data_seq/UAV123/car8", "startFrame": 1, "endFrame": 1357, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car8_1.txt", "object_class": "car"}, {"name": "uav_car8_2", "path": "data_seq/UAV123/car8", "startFrame": 1357, "endFrame": 2575, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car8_2.txt", "object_class": "car"}, {"name": "uav_car9", "path": "data_seq/UAV123/car9", "startFrame": 1, "endFrame": 1879, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car9.txt", "object_class": "car"}, {"name": "uav_group1_1", "path": "data_seq/UAV123/group1", "startFrame": 1, "endFrame": 1333, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_1.txt", "object_class": "person"}, {"name": "uav_group1_2", "path": "data_seq/UAV123/group1", "startFrame": 1333, "endFrame": 2515, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_2.txt", "object_class": "person"}, {"name": "uav_group1_3", "path": "data_seq/UAV123/group1", "startFrame": 2515, "endFrame": 3925, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_3.txt", "object_class": "person"}, {"name": "uav_group1_4", "path": "data_seq/UAV123/group1", "startFrame": 3925, "endFrame": 4873, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_4.txt", "object_class": "person"}, {"name": "uav_group2_1", "path": "data_seq/UAV123/group2", "startFrame": 1, "endFrame": 907, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_1.txt", "object_class": "person"}, {"name": "uav_group2_2", "path": "data_seq/UAV123/group2", "startFrame": 907, "endFrame": 1771, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_2.txt", "object_class": "person"}, {"name": "uav_group2_3", "path": "data_seq/UAV123/group2", "startFrame": 1771, "endFrame": 2683, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_3.txt", "object_class": "person"}, {"name": "uav_group3_1", "path": "data_seq/UAV123/group3", "startFrame": 1, "endFrame": 1567, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_1.txt", "object_class": "person"}, {"name": "uav_group3_2", "path": "data_seq/UAV123/group3", "startFrame": 1567, "endFrame": 2827, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_2.txt", "object_class": "person"}, {"name": "uav_group3_3", "path": "data_seq/UAV123/group3", "startFrame": 2827, "endFrame": 4369, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_3.txt", "object_class": "person"}, {"name": "uav_group3_4", "path": "data_seq/UAV123/group3", "startFrame": 4369, "endFrame": 5527, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_4.txt", "object_class": "person"}, {"name": "uav_person1", "path": "data_seq/UAV123/person1", "startFrame": 1, "endFrame": 799, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person1.txt", "object_class": "person"}, {"name": "uav_person10", "path": "data_seq/UAV123/person10", "startFrame": 1, "endFrame": 1021, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person10.txt", "object_class": "person"}, {"name": "uav_person11", "path": "data_seq/UAV123/person11", "startFrame": 1, "endFrame": 721, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person11.txt", "object_class": "person"}, {"name": "uav_person12_1", "path": "data_seq/UAV123/person12", "startFrame": 1, "endFrame": 601, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person12_1.txt", "object_class": "person"}, {"name": "uav_person12_2", "path": "data_seq/UAV123/person12", "startFrame": 601, "endFrame": 1621, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person12_2.txt", "object_class": "person"}, {"name": "uav_person13", "path": "data_seq/UAV123/person13", "startFrame": 1, "endFrame": 883, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person13.txt", "object_class": "person"}, {"name": "uav_person14_1", "path": "data_seq/UAV123/person14", "startFrame": 1, "endFrame": 847, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_1.txt", "object_class": "person"}, {"name": "uav_person14_2", "path": "data_seq/UAV123/person14", "startFrame": 847, "endFrame": 1813, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_2.txt", "object_class": "person"}, {"name": "uav_person14_3", "path": "data_seq/UAV123/person14", "startFrame": 1813, "endFrame": 2923, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_3.txt", "object_class": "person"}, {"name": "uav_person15", "path": "data_seq/UAV123/person15", "startFrame": 1, "endFrame": 1339, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person15.txt", "object_class": "person"}, {"name": "uav_person16", "path": "data_seq/UAV123/person16", "startFrame": 1, "endFrame": 1147, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person16.txt", "object_class": "person"}, {"name": "uav_person17_1", "path": "data_seq/UAV123/person17", "startFrame": 1, "endFrame": 1501, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_1.txt", "object_class": "person"}, {"name": "uav_person17_2", "path": "data_seq/UAV123/person17", "startFrame": 1501, "endFrame": 2347, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_2.txt", "object_class": "person"}, {"name": "uav_person18", "path": "data_seq/UAV123/person18", "startFrame": 1, "endFrame": 1393, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person18.txt", "object_class": "person"}, {"name": "uav_person19_1", "path": "data_seq/UAV123/person19", "startFrame": 1, "endFrame": 1243, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_1.txt", "object_class": "person"}, {"name": "uav_person19_2", "path": "data_seq/UAV123/person19", "startFrame": 1243, "endFrame": 2791, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_2.txt", "object_class": "person"}, {"name": "uav_person19_3", "path": "data_seq/UAV123/person19", "startFrame": 2791, "endFrame": 4357, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_3.txt", "object_class": "person"}, {"name": "uav_person1_s", "path": "data_seq/UAV123/person1_s", "startFrame": 1, "endFrame": 1600, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person1_s.txt", "object_class": "person"}, {"name": "uav_person2_1", "path": "data_seq/UAV123/person2", "startFrame": 1, "endFrame": 1189, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_1.txt", "object_class": "person"}, {"name": "uav_person2_2", "path": "data_seq/UAV123/person2", "startFrame": 1189, "endFrame": 2623, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_2.txt", "object_class": "person"}, {"name": "uav_person20", "path": "data_seq/UAV123/person20", "startFrame": 1, "endFrame": 1783, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person20.txt", "object_class": "person"}, {"name": "uav_person21", "path": "data_seq/UAV123/person21", "startFrame": 1, "endFrame": 487, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person21.txt", "object_class": "person"}, {"name": "uav_person22", "path": "data_seq/UAV123/person22", "startFrame": 1, "endFrame": 199, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person22.txt", "object_class": "person"}, {"name": "uav_person23", "path": "data_seq/UAV123/person23", "startFrame": 1, "endFrame": 397, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person23.txt", "object_class": "person"}, {"name": "uav_person2_s", "path": "data_seq/UAV123/person2_s", "startFrame": 1, "endFrame": 250, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_s.txt", "object_class": "person"}, {"name": "uav_person3", "path": "data_seq/UAV123/person3", "startFrame": 1, "endFrame": 643, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person3.txt", "object_class": "person"}, {"name": "uav_person3_s", "path": "data_seq/UAV123/person3_s", "startFrame": 1, "endFrame": 505, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person3_s.txt", "object_class": "person"}, {"name": "uav_person4_1", "path": "data_seq/UAV123/person4", "startFrame": 1, "endFrame": 1501, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person4_1.txt", "object_class": "person"}, {"name": "uav_person4_2", "path": "data_seq/UAV123/person4", "startFrame": 1501, "endFrame": 2743, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person4_2.txt", "object_class": "person"}, {"name": "uav_person5_1", "path": "data_seq/UAV123/person5", "startFrame": 1, "endFrame": 877, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person5_1.txt", "object_class": "person"}, {"name": "uav_person5_2", "path": "data_seq/UAV123/person5", "startFrame": 877, "endFrame": 2101, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person5_2.txt", "object_class": "person"}, {"name": "uav_person6", "path": "data_seq/UAV123/person6", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person6.txt", "object_class": "person"}, {"name": "uav_person7_1", "path": "data_seq/UAV123/person7", "startFrame": 1, "endFrame": 1249, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person7_1.txt", "object_class": "person"}, {"name": "uav_person7_2", "path": "data_seq/UAV123/person7", "startFrame": 1249, "endFrame": 2065, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person7_2.txt", "object_class": "person"}, {"name": "uav_person8_1", "path": "data_seq/UAV123/person8", "startFrame": 1, "endFrame": 1075, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person8_1.txt", "object_class": "person"}, {"name": "uav_person8_2", "path": "data_seq/UAV123/person8", "startFrame": 1075, "endFrame": 1525, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person8_2.txt", "object_class": "person"}, {"name": "uav_person9", "path": "data_seq/UAV123/person9", "startFrame": 1, "endFrame": 661, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person9.txt", "object_class": "person"}, {"name": "uav_truck1", "path": "data_seq/UAV123/truck1", "startFrame": 1, "endFrame": 463, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck1.txt", "object_class": "truck"}, {"name": "uav_truck2", "path": "data_seq/UAV123/truck2", "startFrame": 1, "endFrame": 385, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck2.txt", "object_class": "truck"}, {"name": "uav_truck3", "path": "data_seq/UAV123/truck3", "startFrame": 1, "endFrame": 535, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck3.txt", "object_class": "truck"}, {"name": "uav_truck4_1", "path": "data_seq/UAV123/truck4", "startFrame": 1, "endFrame": 577, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck4_1.txt", "object_class": "truck"}, {"name": "uav_truck4_2", "path": "data_seq/UAV123/truck4", "startFrame": 577, "endFrame": 1261, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck4_2.txt", "object_class": "truck"}, {"name": "uav_uav1_1", "path": "data_seq/UAV123/uav1", "startFrame": 1, "endFrame": 1555, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_1.txt", "object_class": "aircraft"}, {"name": "uav_uav1_2", "path": "data_seq/UAV123/uav1", "startFrame": 1555, "endFrame": 2377, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_2.txt", "object_class": "aircraft"}, {"name": "uav_uav1_3", "path": "data_seq/UAV123/uav1", "startFrame": 2473, "endFrame": 3469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_3.txt", "object_class": "aircraft"}, {"name": "uav_uav2", "path": "data_seq/UAV123/uav2", "startFrame": 1, "endFrame": 133, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav2.txt", "object_class": "aircraft"}, {"name": "uav_uav3", "path": "data_seq/UAV123/uav3", "startFrame": 1, "endFrame": 265, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav3.txt", "object_class": "aircraft"}, {"name": "uav_uav4", "path": "data_seq/UAV123/uav4", "startFrame": 1, "endFrame": 157, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav4.txt", "object_class": "aircraft"}, {"name": "uav_uav5", "path": "data_seq/UAV123/uav5", "startFrame": 1, "endFrame": 139, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav5.txt", "object_class": "aircraft"}, {"name": "uav_uav6", "path": "data_seq/UAV123/uav6", "startFrame": 1, "endFrame": 109, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav6.txt", "object_class": "aircraft"}, {"name": "uav_uav7", "path": "data_seq/UAV123/uav7", "startFrame": 1, "endFrame": 373, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav7.txt", "object_class": "aircraft"}, {"name": "uav_uav8", "path": "data_seq/UAV123/uav8", "startFrame": 1, "endFrame": 301, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav8.txt", "object_class": "aircraft"}, {"name": "uav_wakeboard1", "path": "data_seq/UAV123/wakeboard1", "startFrame": 1, "endFrame": 421, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard1.txt", "object_class": "person"}, {"name": "uav_wakeboard10", "path": "data_seq/UAV123/wakeboard10", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard10.txt", "object_class": "person"}, {"name": "uav_wakeboard2", "path": "data_seq/UAV123/wakeboard2", "startFrame": 1, "endFrame": 733, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard2.txt", "object_class": "person"}, {"name": "uav_wakeboard3", "path": "data_seq/UAV123/wakeboard3", "startFrame": 1, "endFrame": 823, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard3.txt", "object_class": "person"}, {"name": "uav_wakeboard4", "path": "data_seq/UAV123/wakeboard4", "startFrame": 1, "endFrame": 697, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard4.txt", "object_class": "person"}, {"name": "uav_wakeboard5", "path": "data_seq/UAV123/wakeboard5", "startFrame": 1, "endFrame": 1675, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard5.txt", "object_class": "person"}, {"name": "uav_wakeboard6", "path": "data_seq/UAV123/wakeboard6", "startFrame": 1, "endFrame": 1165, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard6.txt", "object_class": "person"}, {"name": "uav_wakeboard7", "path": "data_seq/UAV123/wakeboard7", "startFrame": 1, "endFrame": 199, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard7.txt", "object_class": "person"}, {"name": "uav_wakeboard8", "path": "data_seq/UAV123/wakeboard8", "startFrame": 1, "endFrame": 1543, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard8.txt", "object_class": "person"}, {"name": "uav_wakeboard9", "path": "data_seq/UAV123/wakeboard9", "startFrame": 1, "endFrame": 355, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard9.txt", "object_class": "person"} ] return sequence_info_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/vot.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016 """ import sys import copy import collections try: import trax import trax.server TRAX = True except ImportError: TRAX = False Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) def parse_region(string): tokens = map(float, string.split(',')) if len(tokens) == 4: return Rectangle(tokens[0], tokens[1], tokens[2], tokens[3]) elif len(tokens) % 2 == 0 and len(tokens) > 4: return Polygon([Point(tokens[i],tokens[i+1]) for i in xrange(0,len(tokens),2)]) return None def encode_region(region): if isinstance(region, Polygon): return ','.join(['{},{}'.format(p.x,p.y) for p in region.points]) elif isinstance(region, Rectangle): return '{},{},{},{}'.format(region.x, region.y, region.width, region.height) else: return "" def convert_region(region, to): if to == 'rectangle': if isinstance(region, Rectangle): return copy.copy(region) elif isinstance(region, Polygon): top = sys.float_info.max bottom = sys.float_info.min left = sys.float_info.max right = sys.float_info.min for point in region.points: top = min(top, point.y) bottom = max(bottom, point.y) left = min(left, point.x) right = max(right, point.x) return Rectangle(left, top, right - left, bottom - top) else: return None if to == 'polygon': if isinstance(region, Rectangle): points = [] points.append((region.x, region.y)) points.append((region.x + region.width, region.y)) points.append((region.x + region.width, region.y + region.height)) points.append((region.x, region.y + region.height)) return Polygon(points) elif isinstance(region, Polygon): return copy.copy(region) else: return None return None class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format): """ Constructor Args: region_format: Region format options """ assert(region_format in ['rectangle', 'polygon']) if TRAX: options = trax.server.ServerOptions(region_format, trax.image.PATH) self._trax = trax.server.Server(options) request = self._trax.wait() assert(request.type == 'initialize') if request.region.type == 'polygon': self._region = Polygon([Point(x[0], x[1]) for x in request.region.points]) else: self._region = Rectangle(request.region.x, request.region.y, request.region.width, request.region.height) self._image = str(request.image) self._trax.status(request.region) else: self._files = [x.strip('\n') for x in open('images.txt', 'r').readlines()] self._frame = 0 self._region = convert_region(parse_region(open('region.txt', 'r').readline()), region_format) self._result = [] def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = 0): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, Rectangle) or isinstance(region, Polygon)) if TRAX: if isinstance(region, Polygon): tregion = trax.region.Polygon([(x.x, x.y) for x in region.points]) else: tregion = trax.region.Rectangle(region.x, region.y, region.width, region.height) self._trax.status(tregion, {"confidence" : confidence}) else: self._result.append(region) self._frame += 1 def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if TRAX: if hasattr(self, "_image"): image = str(self._image) del self._image return image request = self._trax.wait() if request.type == 'frame': return str(request.image) else: return None else: if self._frame >= len(self._files): return None return self._files[self._frame] def quit(self): if TRAX: self._trax.quit() elif hasattr(self, '_result'): with open('output.txt', 'w') as f: for r in self._result: f.write(encode_region(r)) f.write('\n') def __del__(self): self.quit() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/vot2020.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016 """ import sys import copy import collections import numpy as np try: import trax except ImportError: raise Exception('TraX support not found. Please add trax module to Python path.') def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format, channels=None): """ Constructor Args: region_format: Region format options """ assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK]) if channels is None: channels = ['color'] elif channels == 'rgbd': channels = ['color', 'depth'] elif channels == 'rgbt': channels = ['color', 'ir'] elif channels == 'ir': channels = ['ir'] else: raise Exception('Illegal configuration {}.'.format(channels)) self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot="python")) request = self._trax.wait() assert(request.type == 'initialize') if isinstance(request.region, trax.Polygon): self._region = Polygon([Point(x[0], x[1]) for x in request.region]) if isinstance(request.region, trax.Mask): self._region = request.region.array(True) else: self._region = Rectangle(*request.region.bounds()) self._image = [x.path() for k, x in request.image.items()] if len(self._image) == 1: self._image = self._image[0] self._trax.status(request.region) def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = None): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, (Rectangle, Polygon, np.ndarray))) if isinstance(region, Polygon): tregion = trax.Polygon.create([(x.x, x.y) for x in region.points]) if isinstance(region, np.ndarray): tregion = trax.Mask.create(region) else: tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height) properties = {} if not confidence is None: properties['confidence'] = confidence self._trax.status(tregion, properties) def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if hasattr(self, "_image"): image = self._image del self._image return image request = self._trax.wait() if request.type == 'frame': image = [x.path() for k, x in request.image.items()] if len(image) == 1: return image[0] return image else: return None def quit(self): if hasattr(self, '_trax'): self._trax.quit() def __del__(self): self.quit() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/evaluation/votdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList class VOTDataset(BaseDataset): """ VOT2018 dataset Publication: The sixth Visual Object Tracking VOT2018 challenge results. Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir, Goutam Bhat, Alan Lukezic et al. ECCV, 2018 https://prints.vicos.si/publications/365 Download the dataset from http://www.votchallenge.net/vot2018/dataset.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.vot_path self.sequence_list = self._get_sequence_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): sequence_path = sequence_name nz = 8 ext = 'jpg' start_frame = 1 anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) try: ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64) except: ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64) end_frame = ground_truth_rect.shape[0] frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame, end_frame+1)] # Convert gt if ground_truth_rect.shape[1] > 4: gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]] gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]] x1 = np.amin(gt_x_all, 1).reshape(-1,1) y1 = np.amin(gt_y_all, 1).reshape(-1,1) x2 = np.amax(gt_x_all, 1).reshape(-1,1) y2 = np.amax(gt_y_all, 1).reshape(-1,1) ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1) return Sequence(sequence_name, frames, 'vot', ground_truth_rect) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list= ['ants1', 'ants3', 'bag', 'ball1', 'ball2', 'basketball', 'birds1', 'blanket', 'bmx', 'bolt1', 'bolt2', 'book', 'butterfly', 'car1', 'conduction1', 'crabs1', 'crossing', 'dinosaur', 'drone_across', 'drone_flip', 'drone1', 'fernando', 'fish1', 'fish2', 'fish3', 'flamingo1', 'frisbee', 'girl', 'glove', 'godfather', 'graduate', 'gymnastics1', 'gymnastics2', 'gymnastics3', 'hand', 'handball1', 'handball2', 'helicopter', 'iceskater1', 'iceskater2', 'leaves', 'matrix', 'motocross1', 'motocross2', 'nature', 'pedestrian1', 'rabbit', 'racing', 'road', 'shaking', 'sheep', 'singer2', 'singer3', 'soccer1', 'soccer2', 'soldier', 'tiger', 'traffic', 'wiper', 'zebrafish1'] return sequence_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/experiments/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/experiments/myexperiments.py ================================================ from pytracking.evaluation import Tracker, get_dataset, trackerlist def atom_nfs_uav(): # Run three runs of ATOM on NFS and UAV datasets trackers = trackerlist('atom', 'default', range(3)) dataset = get_dataset('nfs', 'uav') return trackers, dataset def uav_test(): # Run DiMP18, ATOM and ECO on the UAV dataset trackers = trackerlist('dimp', 'dimp18', range(1)) + \ trackerlist('atom', 'default', range(1)) + \ trackerlist('eco', 'default', range(1)) dataset = get_dataset('uav') return trackers, dataset ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/augmentation.py ================================================ import numpy as np import math import torch import torch.nn.functional as F import cv2 as cv import random from pytracking.features.preprocessing import numpy_to_torch, torch_to_numpy class Transform: """Base data augmentation transform class.""" def __init__(self, output_sz = None, shift = None): self.output_sz = output_sz self.shift = (0,0) if shift is None else shift def __call__(self, image, is_mask=False): raise NotImplementedError def crop_to_output(self, image): if isinstance(image, torch.Tensor): imsz = image.shape[2:] if self.output_sz is None: pad_h = 0 pad_w = 0 else: pad_h = (self.output_sz[0] - imsz[0]) / 2 pad_w = (self.output_sz[1] - imsz[1]) / 2 pad_left = math.floor(pad_w) + self.shift[1] pad_right = math.ceil(pad_w) - self.shift[1] pad_top = math.floor(pad_h) + self.shift[0] pad_bottom = math.ceil(pad_h) - self.shift[0] return F.pad(image, (pad_left, pad_right, pad_top, pad_bottom), 'replicate') else: raise NotImplementedError class Identity(Transform): """Identity transformation.""" def __call__(self, image, is_mask=False): return self.crop_to_output(image) class FlipHorizontal(Transform): """Flip along horizontal axis.""" def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(image.flip((3,))) else: return np.fliplr(image) class FlipVertical(Transform): """Flip along vertical axis.""" def __call__(self, image: torch.Tensor, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(image.flip((2,))) else: return np.flipud(image) class Translation(Transform): """Translate.""" def __init__(self, translation, output_sz = None, shift = None): super().__init__(output_sz, shift) self.shift = (self.shift[0] + translation[0], self.shift[1] + translation[1]) def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(image) else: raise NotImplementedError class Scale(Transform): """Scale.""" def __init__(self, scale_factor, output_sz = None, shift = None): super().__init__(output_sz, shift) self.scale_factor = scale_factor def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): # Calculate new size. Ensure that it is even so that crop/pad becomes easier h_orig, w_orig = image.shape[2:] if h_orig != w_orig: raise NotImplementedError h_new = round(h_orig /self.scale_factor) h_new += (h_new - h_orig) % 2 w_new = round(w_orig /self.scale_factor) w_new += (w_new - w_orig) % 2 image_resized = F.interpolate(image, [h_new, w_new], mode='bilinear') return self.crop_to_output(image_resized) else: raise NotImplementedError class Affine(Transform): """Affine transformation.""" def __init__(self, transform_matrix, output_sz = None, shift = None): super().__init__(output_sz, shift) self.transform_matrix = transform_matrix def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image)))) else: return cv.warpAffine(image, self.transform_matrix, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE) class Rotate(Transform): """Rotate with given angle.""" def __init__(self, angle, output_sz = None, shift = None): super().__init__(output_sz, shift) self.angle = math.pi * angle/180 def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image)))) else: c = (np.expand_dims(np.array(image.shape[:2]),1)-1)/2 R = np.array([[math.cos(self.angle), math.sin(self.angle)], [-math.sin(self.angle), math.cos(self.angle)]]) H =np.concatenate([R, c - R @ c], 1) return cv.warpAffine(image, H, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE) class Blur(Transform): """Blur with given sigma (can be axis dependent).""" def __init__(self, sigma, output_sz = None, shift = None): super().__init__(output_sz, shift) if isinstance(sigma, (float, int)): sigma = (sigma, sigma) self.sigma = sigma self.filter_size = [math.ceil(2*s) for s in self.sigma] x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size] self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)] self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum() self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum() def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): sz = image.shape[2:] im1 = F.conv2d(image.view(-1,1,sz[0],sz[1]), self.filter[0], padding=(self.filter_size[0],0)) return self.crop_to_output(F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(1,-1,sz[0],sz[1])) else: raise NotImplementedError class RandomAffine(Transform): """Affine transformation.""" def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0, border_mode='constant', output_sz = None, shift = None): super().__init__(output_sz, shift) self.p_flip = p_flip self.max_rotation = max_rotation self.max_shear = max_shear self.max_scale = max_scale self.max_ar_factor = max_ar_factor self.pad_amount = 0 if border_mode == 'constant': self.border_flag = cv.BORDER_CONSTANT elif border_mode == 'replicate': self.border_flag == cv.BORDER_REPLICATE else: raise Exception self.roll_values = self.roll() def roll(self): do_flip = random.random() < self.p_flip theta = random.uniform(-self.max_rotation, self.max_rotation) shear_x = random.uniform(-self.max_shear, self.max_shear) shear_y = random.uniform(-self.max_shear, self.max_shear) ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor)) scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale)) return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor) def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors): im_h, im_w = image_shape t_mat = np.identity(3) if do_flip: if do_flip: t_mat[0, 0] = -1.0 t_mat[0, 2] = im_w t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0) t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3))) t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w], [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w], [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_mat = t_scale @ t_rot @ t_shear @ t_mat t_mat[0, 2] += self.pad_amount t_mat[1, 2] += self.pad_amount t_mat = t_mat[:2, :] return t_mat def __call__(self, image, is_mask=False): input_tensor = torch.is_tensor(image) if input_tensor: image = torch_to_numpy(image) do_flip, theta, shear_values, scale_factors = self.roll_values t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors) output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount) if not is_mask: image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR, borderMode=self.border_flag) else: image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_NEAREST, borderMode=self.border_flag) image_t = image_t.reshape(image.shape) if input_tensor: image_t = numpy_to_torch(image_t) return self.crop_to_output(image_t) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/color.py ================================================ import torch from pytracking.features.featurebase import FeatureBase class RGB(FeatureBase): """RGB feature normalized to [-0.5, 0.5].""" def dim(self): return 3 def stride(self): return self.pool_stride def extract(self, im: torch.Tensor): return im/255 - 0.5 class Grayscale(FeatureBase): """Grayscale feature normalized to [-0.5, 0.5].""" def dim(self): return 1 def stride(self): return self.pool_stride def extract(self, im: torch.Tensor): return torch.mean(im/255 - 0.5, 1, keepdim=True) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/deep.py ================================================ from pytracking.features.featurebase import FeatureBase, MultiFeatureBase import torch import torchvision from pytracking import TensorList from pytracking.evaluation.environment import env_settings import os from pytracking.utils.loading import load_network from ltr.models.backbone.resnet18_vggm import resnet18_vggmconv1 normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) class ResNet18m1(MultiFeatureBase): """ResNet18 feature together with the VGG-m conv1 layer. args: output_layers: List of layers to output. net_path: Relative or absolute net path (default should be fine). use_gpu: Use GPU or CPU. """ def __init__(self, output_layers, net_path=None, use_gpu=True, *args, **kwargs): super(ResNet18m1, self).__init__(*args, **kwargs) for l in output_layers: if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer') self.output_layers = list(output_layers) self.use_gpu = use_gpu self.net_path = 'resnet18_vggmconv1/resnet18_vggmconv1.pth' if net_path is None else net_path def initialize(self): if isinstance(self.pool_stride, int) and self.pool_stride == 1: self.pool_stride = [1] * len(self.output_layers) self.layer_stride = {'vggconv1': 2, 'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'fc': None} self.layer_dim = {'vggconv1': 96, 'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'fc': None} self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1) self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1) if os.path.isabs(self.net_path): net_path_full = [self.net_path] else: root_paths = env_settings().network_path if isinstance(root_paths, str): root_paths = [root_paths] net_path_full = [os.path.join(root, self.net_path) for root in root_paths] self.net = None for net_path in net_path_full: try: self.net = resnet18_vggmconv1(self.output_layers, path=net_path) break except: pass if self.net is None: raise Exception('Did not find network file {}'.format(self.net_path)) if self.use_gpu: self.net.cuda() self.net.eval() def dim(self): return TensorList([self.layer_dim[l] for l in self.output_layers]) def stride(self): return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)]) def extract(self, im: torch.Tensor): im = im / 255 im -= self.mean im /= self.std if self.use_gpu: im = im.cuda() with torch.no_grad(): return TensorList(self.net(im).values()) class ATOMResNet18(MultiFeatureBase): """ResNet18 feature with the ATOM IoUNet. args: output_layers: List of layers to output. net_path: Relative or absolute net path (default should be fine). use_gpu: Use GPU or CPU. """ def __init__(self, output_layers=('layer3',), net_path='atom_iou', use_gpu=True, *args, **kwargs): super().__init__(*args, **kwargs) self.output_layers = list(output_layers) self.use_gpu = use_gpu self.net_path = net_path def initialize(self): self.net = load_network(self.net_path) if self.use_gpu: self.net.cuda() self.net.eval() self.iou_predictor = self.net.bb_regressor self.layer_stride = {'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'classification': 16, 'fc': None} self.layer_dim = {'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'classification': 256, 'fc': None} self.iounet_feature_layers = self.net.bb_regressor_layer if isinstance(self.pool_stride, int) and self.pool_stride == 1: self.pool_stride = [1] * len(self.output_layers) self.feature_layers = sorted(list(set(self.output_layers + self.iounet_feature_layers))) self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1) self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1) def dim(self): return TensorList([self.layer_dim[l] for l in self.output_layers]) def stride(self): return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)]) def extract(self, im: torch.Tensor): im = im / 255 im -= self.mean im /= self.std if self.use_gpu: im = im.cuda() with torch.no_grad(): output_features = self.net.extract_features(im, self.feature_layers) # Store the raw resnet features which are input to iounet self.iounet_backbone_features = TensorList( [output_features[layer].clone() for layer in self.iounet_feature_layers]) # Store the processed features from iounet, just before pooling with torch.no_grad(): self.iounet_features = TensorList(self.iou_predictor.get_iou_feat(self.iounet_backbone_features)) return TensorList([output_features[layer] for layer in self.output_layers]) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/extractor.py ================================================ import torch from pytracking.features.preprocessing import sample_patch from pytracking import TensorList class ExtractorBase: """Base feature extractor class. args: features: List of features. """ def __init__(self, features): self.features = features def initialize(self): for f in self.features: f.initialize() class SingleResolutionExtractor(ExtractorBase): """Single resolution feature extractor. args: features: List of features. """ def __init__(self, features): super().__init__(features) self.feature_stride = self.features[0].stride() if isinstance(self.feature_stride, (list, TensorList)): self.feature_stride = self.feature_stride[0] def stride(self): return self.feature_stride def size(self, input_sz): return input_sz // self.stride() def extract(self, im, pos, scales, image_sz): if isinstance(scales, (int, float)): scales = [scales] # Get image patches im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales]) # Compute features feature_map = torch.cat(TensorList([f.get_feature(im_patches) for f in self.features]).unroll(), dim=1) return feature_map class MultiResolutionExtractor(ExtractorBase): """Multi-resolution feature extractor. args: features: List of features. """ def __init__(self, features, patch_mode='replicate', max_scale_change=None): super().__init__(features) self.patch_mode = patch_mode self.max_scale_change = max_scale_change self.is_color = None def stride(self): return torch.Tensor(TensorList([f.stride() for f in self.features if self._return_feature(f)]).unroll().list()) def size(self, input_sz): return TensorList([f.size(input_sz) for f in self.features if self._return_feature(f)]).unroll() def dim(self): return TensorList([f.dim() for f in self.features if self._return_feature(f)]).unroll() def get_fparams(self, name: str = None): if name is None: return [f.fparams for f in self.features if self._return_feature(f)] return TensorList([getattr(f.fparams, name) for f in self.features if self._return_feature(f)]).unroll() def get_attribute(self, name: str, ignore_missing: bool = False): if ignore_missing: return TensorList([getattr(f, name) for f in self.features if self._return_feature(f) and hasattr(f, name)]) else: return TensorList([getattr(f, name, None) for f in self.features if self._return_feature(f)]) def get_unique_attribute(self, name: str): feat = None for f in self.features: if self._return_feature(f) and hasattr(f, name): if feat is not None: raise RuntimeError('The attribute was not unique.') feat = f if feat is None: raise RuntimeError('The attribute did not exist') return getattr(feat, name) def _return_feature(self, f): return self.is_color is None or self.is_color and f.use_for_color or not self.is_color and f.use_for_gray def set_is_color(self, is_color: bool): self.is_color = is_color def extract(self, im, pos, scales, image_sz, return_patches=False): """Extract features. args: im: Image. pos: Center position for extraction. scales: Image scales to extract features from. image_sz: Size to resize the image samples to before extraction. """ if isinstance(scales, (int, float)): scales = [scales] # Get image patches patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=self.patch_mode, max_scale_change=self.max_scale_change) for s in scales)) im_patches = torch.cat(list(patch_iter)) patch_coords = torch.cat(list(coord_iter)) # im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales]) # Compute features feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll() if return_patches: return feature_map, patch_coords, im_patches else: return feature_map, patch_coords def extract_transformed(self, im, pos, scale, image_sz, transforms): """Extract features from a set of transformed image samples. args: im: Image. pos: Center position for extraction. scale: Image scale to extract features from. image_sz: Size to resize the image samples to before extraction. transforms: A set of image transforms to apply. """ # Get image patche im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz) # Apply transforms im_patches = torch.cat([T(im_patch) for T in transforms]) # Compute features feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll() return feature_map ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/featurebase.py ================================================ import torch import torch.nn.functional as F from pytracking import TensorList class FeatureBase: """Base feature class. args: fparams: Feature specific parameters. pool_stride: Amount of average pooling to apply do downsample the feature map. output_size: Alternatively, specify the output size of the feature map. Adaptive average pooling will be applied. normalize_power: The power exponent for the normalization. None means no normalization (default). use_for_color: Use this feature for color images. use_for_gray: Use this feature for grayscale images. """ def __init__(self, fparams = None, pool_stride = None, output_size = None, normalize_power = None, use_for_color = True, use_for_gray = True): self.fparams = fparams self.pool_stride = 1 if pool_stride is None else pool_stride self.output_size = output_size self.normalize_power = normalize_power self.use_for_color = use_for_color self.use_for_gray = use_for_gray def initialize(self): pass def dim(self): raise NotImplementedError def stride(self): raise NotImplementedError def size(self, im_sz): if self.output_size is None: return im_sz // self.stride() if isinstance(im_sz, torch.Tensor): return torch.Tensor([self.output_size[0], self.output_size[1]]) return self.output_size def extract(self, im): """Performs feature extraction.""" raise NotImplementedError def get_feature(self, im: torch.Tensor): """Get the feature. Generally, call this function. args: im: image patch as a torch.Tensor. """ # Return empty tensor if it should not be used is_color = im.shape[1] == 3 if is_color and not self.use_for_color or not is_color and not self.use_for_gray: return torch.Tensor([]) # Extract feature feat = self.extract(im) # Pool/downsample if self.output_size is not None: feat = F.adaptive_avg_pool2d(feat, self.output_size) elif self.pool_stride != 1: feat = F.avg_pool2d(feat, self.pool_stride, self.pool_stride) # Normalize if self.normalize_power is not None: feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) / (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power) return feat class MultiFeatureBase(FeatureBase): """Base class for features potentially having multiple feature blocks as output (like CNNs). See FeatureBase for more info. """ def size(self, im_sz): if self.output_size is None: return TensorList([im_sz // s for s in self.stride()]) if isinstance(im_sz, torch.Tensor): return TensorList([im_sz // s if sz is None else torch.Tensor([sz[0], sz[1]]) for sz, s in zip(self.output_size, self.stride())]) def get_feature(self, im: torch.Tensor): """Get the feature. Generally, call this function. args: im: image patch as a torch.Tensor. """ # Return empty tensor if it should not be used is_color = im.shape[1] == 3 if is_color and not self.use_for_color or not is_color and not self.use_for_gray: return torch.Tensor([]) feat_list = self.extract(im) output_sz = [None]*len(feat_list) if self.output_size is None else self.output_size # Pool/downsample for i, (sz, s) in enumerate(zip(output_sz, self.pool_stride)): if sz is not None: feat_list[i] = F.adaptive_avg_pool2d(feat_list[i], sz) elif s != 1: feat_list[i] = F.avg_pool2d(feat_list[i], s, s) # Normalize if self.normalize_power is not None: for feat in feat_list: feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) / (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power) return feat_list ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/net_wrappers.py ================================================ import torch from pytracking.utils.loading import load_network class NetWrapper: """Used for wrapping networks in pytracking. Network modules and functions can be accessed directly as if they were members of this class.""" _rec_iter=0 def __init__(self, net_path, use_gpu=True, initialize=False, **kwargs): self.net_path = net_path self.use_gpu = use_gpu self.net = None self.net_kwargs = kwargs if initialize: self.initialize() def __getattr__(self, name): if self._rec_iter > 0: self._rec_iter = 0 return None self._rec_iter += 1 try: ret_val = getattr(self.net, name) except Exception as e: self._rec_iter = 0 raise e self._rec_iter = 0 return ret_val def load_network(self): self.net = load_network(self.net_path, **self.net_kwargs) if self.use_gpu: self.cuda() self.eval() def initialize(self): self.load_network() class NetWithBackbone(NetWrapper): """Wraps a network with a common backbone. Assumes the network have a 'extract_backbone_features(image)' function.""" def __init__(self, net_path, use_gpu=True, initialize=False, image_format='rgb', mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), **kwargs): super().__init__(net_path, use_gpu, initialize, **kwargs) self.image_format = image_format self._mean = torch.Tensor(mean).view(1, -1, 1, 1) self._std = torch.Tensor(std).view(1, -1, 1, 1) def initialize(self, image_format='rgb', mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): super().initialize() def preprocess_image(self, im: torch.Tensor): """Normalize the image with the mean and standard deviation used by the network.""" if self.image_format in ['rgb', 'bgr']: im = im/255 if self.image_format in ['bgr', 'bgr255']: im = im[:, [2, 1, 0], :, :] im -= self._mean im /= self._std if self.use_gpu: im = im.cuda() return im def extract_backbone(self, im: torch.Tensor): """Extract backbone features from the network. Expects a float tensor image with pixel range [0, 255].""" im = self.preprocess_image(im) return self.net.extract_backbone_features(im) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/preprocessing.py ================================================ import torch import torch.nn.functional as F import numpy as np def numpy_to_torch(a: np.ndarray): return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0) def torch_to_numpy(a: torch.Tensor): return a.squeeze(0).permute(1,2,0).numpy() def sample_patch_transformed(im, pos, scale, image_sz, transforms, is_mask=False): """Extract transformed image samples. args: im: Image. pos: Center position for extraction. scale: Image scale to extract features from. image_sz: Size to resize the image samples to before extraction. transforms: A set of image transforms to apply. """ # Get image patche im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz, is_mask=is_mask) # Apply transforms im_patches = torch.cat([T(im_patch, is_mask=is_mask) for T in transforms]) return im_patches def sample_patch_multiscale(im, pos, scales, image_sz, mode: str='replicate', max_scale_change=None): """Extract image patches at multiple scales. args: im: Image. pos: Center position for extraction. scales: Image scales to extract image patches from. image_sz: Size to resize the image samples to mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major' max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode """ if isinstance(scales, (int, float)): scales = [scales] # Get image patches patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=mode, max_scale_change=max_scale_change) for s in scales)) im_patches = torch.cat(list(patch_iter)) patch_coords = torch.cat(list(coord_iter)) return im_patches, patch_coords def sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None, mode: str = 'replicate', max_scale_change=None, is_mask=False): """Sample an image patch. args: im: Image pos: center position of crop sample_sz: size to crop output_sz: size to resize to mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major' max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode """ # if mode not in ['replicate', 'inside']: # raise ValueError('Unknown border mode \'{}\'.'.format(mode)) # copy and convert posl = pos.long().clone() pad_mode = mode # Get new sample size if forced inside the image if mode == 'inside' or mode == 'inside_major': pad_mode = 'replicate' im_sz = torch.Tensor([im.shape[2], im.shape[3]]) shrink_factor = (sample_sz.float() / im_sz) if mode == 'inside': shrink_factor = shrink_factor.max() elif mode == 'inside_major': shrink_factor = shrink_factor.min() shrink_factor.clamp_(min=1, max=max_scale_change) sample_sz = (sample_sz.float() / shrink_factor).long() # Compute pre-downsampling factor if output_sz is not None: resize_factor = torch.min(sample_sz.float() / output_sz.float()).item() df = int(max(int(resize_factor - 0.1), 1)) else: df = int(1) sz = sample_sz.float() / df # new size # Do downsampling if df > 1: os = posl % df # offset posl = (posl - os) / df # new position im2 = im[..., os[0].item()::df, os[1].item()::df] # downsample else: im2 = im # compute size to crop szl = torch.max(sz.round(), torch.Tensor([2])).long() # Extract top and bottom coordinates tl = posl - (szl - 1)/2 br = posl + szl/2 + 1 # Shift the crop to inside if mode == 'inside' or mode == 'inside_major': im2_sz = torch.LongTensor([im2.shape[2], im2.shape[3]]) shift = (-tl).clamp(0) - (br - im2_sz).clamp(0) tl += shift br += shift outside = ((-tl).clamp(0) + (br - im2_sz).clamp(0)) // 2 shift = (-tl - outside) * (outside > 0).long() tl += shift br += shift # Get image patch # im_patch = im2[...,tl[0].item():br[0].item(),tl[1].item():br[1].item()] # Get image patch if not is_mask: im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]), pad_mode) else: im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2])) # Get image coordinates patch_coord = df * torch.cat((tl, br)).view(1,4) if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]): return im_patch.clone(), patch_coord # Resample if not is_mask: im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear') else: im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='nearest') return im_patch, patch_coord ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/features/util.py ================================================ import torch from pytracking.features.featurebase import FeatureBase class Concatenate(FeatureBase): """A feature that concatenates other features. args: features: List of features to concatenate. """ def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True): super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray) self.features = features self.input_stride = self.features[0].stride() for feat in self.features: if self.input_stride != feat.stride(): raise ValueError('Strides for the features must be the same for a bultiresolution feature.') def dim(self): return sum([f.dim() for f in self.features]) def stride(self): return self.pool_stride * self.input_stride def extract(self, im: torch.Tensor): return torch.cat([f.get_feature(im) for f in self.features], 1) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/__init__.py ================================================ from .tensorlist import TensorList from .tensordict import TensorDict ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/complex.py ================================================ import torch from pytracking.libs.tensorlist import tensor_operation def is_complex(a: torch.Tensor) -> bool: return a.dim() >= 4 and a.shape[-1] == 2 def is_real(a: torch.Tensor) -> bool: return not is_complex(a) @tensor_operation def mult(a: torch.Tensor, b: torch.Tensor): """Pointwise complex multiplication of complex tensors.""" if is_real(a): if a.dim() >= b.dim(): raise ValueError('Incorrect dimensions.') # a is real return mult_real_cplx(a, b) if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') # b is real return mult_real_cplx(b, a) # Both complex c = mult_real_cplx(a[..., 0], b) c[..., 0] -= a[..., 1] * b[..., 1] c[..., 1] += a[..., 1] * b[..., 0] return c @tensor_operation def mult_conj(a: torch.Tensor, b: torch.Tensor): """Pointwise complex multiplication of complex tensors, with conjugate on b: a*conj(b).""" if is_real(a): if a.dim() >= b.dim(): raise ValueError('Incorrect dimensions.') # a is real return mult_real_cplx(a, conj(b)) if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') # b is real return mult_real_cplx(b, a) # Both complex c = mult_real_cplx(b[...,0], a) c[..., 0] += a[..., 1] * b[..., 1] c[..., 1] -= a[..., 0] * b[..., 1] return c @tensor_operation def mult_real_cplx(a: torch.Tensor, b: torch.Tensor): """Pointwise complex multiplication of real tensor a with complex tensor b.""" if is_real(b): raise ValueError('Last dimension must have length 2.') return a.unsqueeze(-1) * b @tensor_operation def div(a: torch.Tensor, b: torch.Tensor): """Pointwise complex division of complex tensors.""" if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') # b is real return div_cplx_real(a, b) return div_cplx_real(mult_conj(a, b), abs_sqr(b)) @tensor_operation def div_cplx_real(a: torch.Tensor, b: torch.Tensor): """Pointwise complex division of complex tensor a with real tensor b.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return a / b.unsqueeze(-1) @tensor_operation def abs_sqr(a: torch.Tensor): """Squared absolute value.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return torch.sum(a*a, -1) @tensor_operation def abs(a: torch.Tensor): """Absolute value.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return torch.sqrt(abs_sqr(a)) @tensor_operation def conj(a: torch.Tensor): """Complex conjugate.""" if is_real(a): raise ValueError('Last dimension must have length 2.') # return a * torch.Tensor([1, -1], device=a.device) return complex(a[...,0], -a[...,1]) @tensor_operation def real(a: torch.Tensor): """Real part.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return a[..., 0] @tensor_operation def imag(a: torch.Tensor): """Imaginary part.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return a[..., 1] @tensor_operation def complex(a: torch.Tensor, b: torch.Tensor = None): """Create complex tensor from real and imaginary part.""" if b is None: b = a.new_zeros(a.shape) elif a is None: a = b.new_zeros(b.shape) return torch.cat((a.unsqueeze(-1), b.unsqueeze(-1)), -1) @tensor_operation def mtimes(a: torch.Tensor, b: torch.Tensor, conj_a=False, conj_b=False): """Complex matrix multiplication of complex tensors. The dimensions (-3, -2) are matrix multiplied. -1 is the complex dimension.""" if is_real(a): if a.dim() >= b.dim(): raise ValueError('Incorrect dimensions.') return mtimes_real_complex(a, b, conj_b=conj_b) if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') return mtimes_complex_real(a, b, conj_a=conj_a) if not conj_a and not conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]), torch.matmul(a[..., 0], b[..., 1]) + torch.matmul(a[..., 1], b[..., 0])) if conj_a and not conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]), torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0])) if not conj_a and conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]), torch.matmul(a[..., 1], b[..., 0]) - torch.matmul(a[..., 0], b[..., 1])) if conj_a and conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]), -torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0])) @tensor_operation def mtimes_real_complex(a: torch.Tensor, b: torch.Tensor, conj_b=False): if is_real(b): raise ValueError('Incorrect dimensions.') if not conj_b: return complex(torch.matmul(a, b[..., 0]), torch.matmul(a, b[..., 1])) if conj_b: return complex(torch.matmul(a, b[..., 0]), -torch.matmul(a, b[..., 1])) @tensor_operation def mtimes_complex_real(a: torch.Tensor, b: torch.Tensor, conj_a=False): if is_real(a): raise ValueError('Incorrect dimensions.') if not conj_a: return complex(torch.matmul(a[..., 0], b), torch.matmul(a[..., 1], b)) if conj_a: return complex(torch.matmul(a[..., 0], b), -torch.matmul(a[..., 1], b)) @tensor_operation def exp_imag(a: torch.Tensor): """Complex exponential with imaginary input: e^(i*a)""" a = a.unsqueeze(-1) return torch.cat((torch.cos(a), torch.sin(a)), -1) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/dcf.py ================================================ import torch import math from pytracking import fourier from pytracking import complex import torch.nn.functional as F def hann1d(sz: int, centered = True) -> torch.Tensor: """1D cosine window.""" if centered: return 0.5 * (1 - torch.cos((2 * math.pi / (sz + 1)) * torch.arange(1, sz + 1).float())) w = 0.5 * (1 + torch.cos((2 * math.pi / (sz + 2)) * torch.arange(0, sz//2 + 1).float())) return torch.cat([w, w[1:sz-sz//2].flip((0,))]) def hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor: """2D cosine window.""" return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1) def hann2d_clipped(sz: torch.Tensor, effective_sz: torch.Tensor, centered = True) -> torch.Tensor: """1D clipped cosine window.""" # Ensure that the difference is even effective_sz += (effective_sz - sz) % 2 effective_window = hann1d(effective_sz[0].item(), True).reshape(1, 1, -1, 1) * hann1d(effective_sz[1].item(), True).reshape(1, 1, 1, -1) pad = (sz - effective_sz) / 2 window = F.pad(effective_window, (pad[1].item(), pad[1].item(), pad[0].item(), pad[0].item()), 'replicate') if centered: return window else: mid = (sz / 2).int() window_shift_lr = torch.cat((window[:, :, :, mid[1]:], window[:, :, :, :mid[1]]), 3) return torch.cat((window_shift_lr[:, :, mid[0]:, :], window_shift_lr[:, :, :mid[0], :]), 2) def gauss_fourier(sz: int, sigma: float, half: bool = False) -> torch.Tensor: if half: k = torch.arange(0, int(sz/2+1)) else: k = torch.arange(-int((sz-1)/2), int(sz/2+1)) return (math.sqrt(2*math.pi) * sigma / sz) * torch.exp(-2 * (math.pi * sigma * k.float() / sz)**2) def gauss_spatial(sz, sigma, center=0, end_pad=0): k = torch.arange(-(sz-1)/2, (sz+1)/2+end_pad) return torch.exp(-1.0/(2*sigma**2) * (k - center)**2) def label_function(sz: torch.Tensor, sigma: torch.Tensor): return gauss_fourier(sz[0].item(), sigma[0].item()).reshape(1, 1, -1, 1) * gauss_fourier(sz[1].item(), sigma[1].item(), True).reshape(1, 1, 1, -1) def label_function_spatial(sz: torch.Tensor, sigma: torch.Tensor, center: torch.Tensor = torch.zeros(2), end_pad: torch.Tensor = torch.zeros(2)): """The origin is in the middle of the image.""" return gauss_spatial(sz[0].item(), sigma[0].item(), center[0], end_pad[0].item()).reshape(1, 1, -1, 1) * \ gauss_spatial(sz[1].item(), sigma[1].item(), center[1], end_pad[1].item()).reshape(1, 1, 1, -1) def cubic_spline_fourier(f, a): """The continuous Fourier transform of a cubic spline kernel.""" bf = (6*(1 - torch.cos(2 * math.pi * f)) + 3*a*(1 - torch.cos(4 * math.pi * f)) - (6 + 8*a)*math.pi*f*torch.sin(2 * math.pi * f) - 2*a*math.pi*f*torch.sin(4 * math.pi * f)) \ / (4 * math.pi**4 * f**4) bf[f == 0] = 1 return bf def get_interp_fourier(sz: torch.Tensor, method='ideal', bicubic_param=0.5, centering=True, windowing=False, device='cpu'): ky, kx = fourier.get_frequency_coord(sz) if method=='ideal': interp_y = torch.ones(ky.shape) / sz[0] interp_x = torch.ones(kx.shape) / sz[1] elif method=='bicubic': interp_y = cubic_spline_fourier(ky / sz[0], bicubic_param) / sz[0] interp_x = cubic_spline_fourier(kx / sz[1], bicubic_param) / sz[1] else: raise ValueError('Unknown method.') if centering: interp_y = complex.mult(interp_y, complex.exp_imag((-math.pi/sz[0]) * ky)) interp_x = complex.mult(interp_x, complex.exp_imag((-math.pi/sz[1]) * kx)) if windowing: raise NotImplementedError return interp_y.to(device), interp_x.to(device) def interpolate_dft(a: torch.Tensor, interp_fs) -> torch.Tensor: if isinstance(interp_fs, torch.Tensor): return complex.mult(a, interp_fs) if isinstance(interp_fs, (tuple, list)): return complex.mult(complex.mult(a, interp_fs[0]), interp_fs[1]) raise ValueError('"interp_fs" must be tensor or tuple of tensors.') def get_reg_filter(sz: torch.Tensor, target_sz: torch.Tensor, params): """Computes regularization filter in CCOT and ECO.""" if not params.use_reg_window: return params.reg_window_min * torch.ones(1,1,1,1) if getattr(params, 'reg_window_square', False): target_sz = target_sz.prod().sqrt() * torch.ones(2) # Normalization factor reg_scale = 0.5 * target_sz # Construct grid if getattr(params, 'reg_window_centered', True): wrg = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32).view(1,1,-1,1) wcg = torch.arange(-int((sz[1]-1)/2), int(sz[1]/2+1), dtype=torch.float32).view(1,1,1,-1) else: wrg = torch.cat([torch.arange(0, int(sz[0]/2+1), dtype=torch.float32), torch.arange(-int((sz[0] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,-1,1) wcg = torch.cat([torch.arange(0, int(sz[1]/2+1), dtype=torch.float32), torch.arange(-int((sz[1] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,1,-1) # Construct regularization window reg_window = (params.reg_window_edge - params.reg_window_min) * \ (torch.abs(wrg/reg_scale[0])**params.reg_window_power + torch.abs(wcg/reg_scale[1])**params.reg_window_power) + params.reg_window_min # Compute DFT and enforce sparsity reg_window_dft = torch.rfft(reg_window, 2) / sz.prod() reg_window_dft_abs = complex.abs(reg_window_dft) reg_window_dft[reg_window_dft_abs < params.reg_sparsity_threshold * reg_window_dft_abs.max(), :] = 0 # Do the inverse transform to correct for the window minimum reg_window_sparse = torch.irfft(reg_window_dft, 2, signal_sizes=sz.long().tolist()) reg_window_dft[0,0,0,0,0] += params.reg_window_min - sz.prod() * reg_window_sparse.min() reg_window_dft = complex.real(fourier.rfftshift2(reg_window_dft)) # Remove zeros max_inds,_ = reg_window_dft.nonzero().max(dim=0) mid_ind = int((reg_window_dft.shape[2]-1)/2) top = max_inds[-2].item() + 1 bottom = 2*mid_ind - max_inds[-2].item() right = max_inds[-1].item() + 1 reg_window_dft = reg_window_dft[..., bottom:top, :right] if reg_window_dft.shape[-1] > 1: reg_window_dft = torch.cat([reg_window_dft[..., 1:].flip((2, 3)), reg_window_dft], -1) return reg_window_dft def max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor): """Computes maximum and argmax in the last two dimensions.""" max_val_row, argmax_row = torch.max(a, dim=-2) max_val, argmax_col = torch.max(max_val_row, dim=-1) argmax_row = argmax_row.view(argmax_col.numel(),-1)[torch.arange(argmax_col.numel()), argmax_col.view(-1)] argmax_row = argmax_row.reshape(argmax_col.shape) argmax = torch.cat((argmax_row.unsqueeze(-1), argmax_col.unsqueeze(-1)), -1) return max_val, argmax ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/fourier.py ================================================ import torch import torch.nn.functional as F from pytracking import complex, TensorList from pytracking.libs.tensorlist import tensor_operation @tensor_operation def rfftshift2(a: torch.Tensor): h = a.shape[2] + 2 return torch.cat((a[:,:,(h-1)//2:,...], a[:,:,:h//2,...]), 2) @tensor_operation def irfftshift2(a: torch.Tensor): mid = int((a.shape[2]-1)/2) return torch.cat((a[:,:,mid:,...], a[:,:,:mid,...]), 2) @tensor_operation def cfft2(a): """Do FFT and center the low frequency component. Always produces odd (full) output sizes.""" return rfftshift2(torch.rfft(a, 2)) @tensor_operation def cifft2(a, signal_sizes=None): """Do inverse FFT corresponding to cfft2.""" return torch.irfft(irfftshift2(a), 2, signal_sizes=signal_sizes) @tensor_operation def sample_fs(a: torch.Tensor, grid_sz: torch.Tensor = None, rescale = True): """Samples the Fourier series.""" # Size of the fourier series sz = torch.Tensor([a.shape[2], 2*a.shape[3]-1]).float() # Default grid if grid_sz is None or sz[0] == grid_sz[0] and sz[1] == grid_sz[1]: if rescale: return sz.prod().item() * cifft2(a) return cifft2(a) if sz[0] > grid_sz[0] or sz[1] > grid_sz[1]: raise ValueError("Only grid sizes that are smaller than the Fourier series size are supported.") tot_pad = (grid_sz - sz).tolist() is_even = [s.item() % 2 == 0 for s in sz] # Compute paddings pad_top = int((tot_pad[0]+1)/2) if is_even[0] else int(tot_pad[0]/2) pad_bottom = int(tot_pad[0] - pad_top) pad_right = int((tot_pad[1]+1)/2) if rescale: return grid_sz.prod().item() * cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist()) else: return cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist()) def get_frequency_coord(sz, add_complex_dim = False, device='cpu'): """Frequency coordinates.""" ky = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32, device=device).view(1,1,-1,1) kx = torch.arange(0, int(sz[1]/2+1), dtype=torch.float32, device=device).view(1,1,1,-1) if add_complex_dim: ky = ky.unsqueeze(-1) kx = kx.unsqueeze(-1) return ky, kx @tensor_operation def shift_fs(a: torch.Tensor, shift: torch.Tensor): """Shift a sample a in the Fourier domain. Params: a : The fourier coefficiens of the sample. shift : The shift to be performed normalized to the range [-pi, pi].""" if a.dim() != 5: raise ValueError('a must be the Fourier coefficients, a 5-dimensional tensor.') if shift[0] == 0 and shift[1] == 0: return a ky, kx = get_frequency_coord((a.shape[2], 2*a.shape[3]-1), device=a.device) return complex.mult(complex.mult(a, complex.exp_imag(shift[0].item()*ky)), complex.exp_imag(shift[1].item()*kx)) def sum_fs(a: TensorList) -> torch.Tensor: """Sum a list of Fourier series expansions.""" s = None mid = None for e in sorted(a, key=lambda elem: elem.shape[-3], reverse=True): if s is None: s = e.clone() mid = int((s.shape[-3] - 1) / 2) else: # Compute coordinates top = mid - int((e.shape[-3] - 1) / 2) bottom = mid + int(e.shape[-3] / 2) + 1 right = e.shape[-2] # Add the data s[..., top:bottom, :right, :] += e return s def sum_fs12(a: TensorList) -> torch.Tensor: """Sum a list of Fourier series expansions.""" s = None mid = None for e in sorted(a, key=lambda elem: elem.shape[0], reverse=True): if s is None: s = e.clone() mid = int((s.shape[0] - 1) / 2) else: # Compute coordinates top = mid - int((e.shape[0] - 1) / 2) bottom = mid + int(e.shape[0] / 2) + 1 right = e.shape[1] # Add the data s[top:bottom, :right, ...] += e return s @tensor_operation def inner_prod_fs(a: torch.Tensor, b: torch.Tensor): if complex.is_complex(a) and complex.is_complex(b): return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0, :].reshape(-1) @ b[:, :, :, 0, :].reshape(-1) elif complex.is_real(a) and complex.is_real(b): return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0].reshape(-1) @ b[:, :, :, 0].reshape(-1) else: raise NotImplementedError('Not implemented for mixed real and complex.') ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/operation.py ================================================ import torch import torch.nn.functional as F from pytracking.libs.tensorlist import tensor_operation, TensorList @tensor_operation def conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None): """Standard conv2d. Returns the input if weight=None.""" if weight is None: return input ind = None if mode is not None: if padding != 0: raise ValueError('Cannot input both padding and mode.') if mode == 'same': padding = (weight.shape[2]//2, weight.shape[3]//2) if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0: ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None), slice(-1) if weight.shape[3] % 2 == 0 else slice(None)) elif mode == 'valid': padding = (0, 0) elif mode == 'full': padding = (weight.shape[2]-1, weight.shape[3]-1) else: raise ValueError('Unknown mode for padding.') out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) if ind is None: return out return out[:,:,ind[0],ind[1]] @tensor_operation def conv1x1(input: torch.Tensor, weight: torch.Tensor): """Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv.""" if weight is None: return input return torch.conv2d(input, weight) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/optimization.py ================================================ import torch import torch.autograd import math from pytracking.libs import TensorList from pytracking.utils.plotting import plot_graph from ltr.models.layers.activation import softmax_reg class L2Problem: """Base class for representing an L2 optimization problem.""" def __call__(self, x: TensorList) -> TensorList: """Shall compute the residuals of the problem.""" raise NotImplementedError def ip_input(self, a, b): """Inner product of the input space.""" return sum(a.view(-1) @ b.view(-1)) def ip_output(self, a, b): """Inner product of the output space.""" return sum(a.view(-1) @ b.view(-1)) def M1(self, x): """M1 preconditioner.""" return x def M2(self, x): """M2 preconditioner.""" return x class MinimizationProblem: """General minimization problem.""" def __call__(self, x: TensorList) -> TensorList: """Shall compute the loss.""" raise NotImplementedError def ip_input(self, a, b): """Inner product of the input space.""" return sum(a.view(-1) @ b.view(-1)) def M1(self, x): return x def M2(self, x): return x class ConjugateGradientBase: """Conjugate Gradient optimizer base class. Implements the CG loop.""" def __init__(self, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False): self.fletcher_reeves = fletcher_reeves self.standard_alpha = standard_alpha self.direction_forget_factor = direction_forget_factor self.debug = debug # State self.p = None self.rho = torch.ones(1) self.r_prev = None # Right hand side self.b = None def reset_state(self): self.p = None self.rho = torch.ones(1) self.r_prev = None def run_CG(self, num_iter, x=None, eps=0.0): """Main conjugate gradient method. args: num_iter: Number of iterations. x: Initial guess. Assumed zero if None. eps: Stop if the residual norm gets smaller than this. """ # Apply forgetting factor if self.direction_forget_factor == 0: self.reset_state() elif self.p is not None: self.rho /= self.direction_forget_factor if x is None: r = self.b.clone() else: r = self.b - self.A(x) # Norms of residuals etc for debugging resvec = None if self.debug: normr = self.residual_norm(r) resvec = torch.zeros(num_iter+1) resvec[0] = normr # Loop over iterations for ii in range(num_iter): # Preconditioners y = self.M1(r) z = self.M2(y) rho1 = self.rho self.rho = self.ip(r, z) if self.check_zero(self.rho): if self.debug: print('Stopped CG since rho = 0') if resvec is not None: resvec = resvec[:ii+1] return x, resvec if self.p is None: self.p = z.clone() else: if self.fletcher_reeves: beta = self.rho / rho1 else: rho2 = self.ip(self.r_prev, z) beta = (self.rho - rho2) / rho1 beta = beta.clamp(0) self.p = z + self.p * beta q = self.A(self.p) pq = self.ip(self.p, q) if self.standard_alpha: alpha = self.rho / pq else: alpha = self.ip(self.p, r) / pq # Save old r for PR formula if not self.fletcher_reeves: self.r_prev = r.clone() # Form new iterate if x is None: x = self.p * alpha else: x += self.p * alpha if ii < num_iter - 1 or self.debug: r -= q * alpha if eps > 0.0 or self.debug: normr = self.residual_norm(r) if self.debug: self.evaluate_CG_iteration(x) resvec[ii+1] = normr if eps > 0 and normr <= eps: if self.debug: print('Stopped CG since norm smaller than eps') break if resvec is not None: resvec = resvec[:ii+2] return x, resvec def A(self, x): # Implements the left hand operation raise NotImplementedError def ip(self, a, b): # Implements the inner product return a.view(-1) @ b.view(-1) def residual_norm(self, r): res = self.ip(r, r).sum() if isinstance(res, (TensorList, list, tuple)): res = sum(res) return res.sqrt() def check_zero(self, s, eps = 0.0): ss = s.abs() <= eps if isinstance(ss, (TensorList, list, tuple)): ss = sum(ss) return ss.item() > 0 def M1(self, x): # M1 preconditioner return x def M2(self, x): # M2 preconditioner return x def evaluate_CG_iteration(self, x): pass class ConjugateGradient(ConjugateGradientBase): """Conjugate Gradient optimizer, performing single linearization of the residuals in the start.""" def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False, plotting = False, visdom=None): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or plotting) self.problem = problem self.x = variable self.plotting = plotting self.fig_num = (10,11) self.visdom = visdom self.cg_eps = cg_eps self.f0 = None self.g = None self.dfdxt_g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None self.dfdxt_g = None def run(self, num_cg_iter): """Run the oprimizer with the provided number of iterations.""" if num_cg_iter == 0: return lossvec = None if self.debug: lossvec = torch.zeros(2) self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) # Create copy with graph detached self.g = self.f0.detach() if self.debug: lossvec[0] = self.problem.ip_output(self.g, self.g) self.g.requires_grad_(True) # Get df/dx^t @ f0 self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True)) # Get the right hand side self.b = - self.dfdxt_g.detach() # Run CG delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.f0 = self.problem(self.x) lossvec[-1] = self.problem.ip_output(self.f0, self.f0) self.residuals = torch.cat((self.residuals, res)) self.losses = torch.cat((self.losses, lossvec)) if self.visdom is not None: self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') self.x.detach_() self.clear_temp() def A(self, x): dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True) return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True)) def ip(self, a, b): return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) class GaussNewtonCG(ConjugateGradientBase): """Gauss-Newton with Conjugate Gradient optimizer.""" def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False, analyze = False, plotting = False, visdom=None): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting) self.problem = problem self.x = variable self.analyze_convergence = analyze self.plotting = plotting self.fig_num = (10,11,12) self.visdom = visdom self.cg_eps = cg_eps self.f0 = None self.g = None self.dfdxt_g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None self.dfdxt_g = None def run_GN(self, *args, **kwargs): return self.run(*args, **kwargs) def run(self, num_cg_iter, num_gn_iter=None): """Run the optimizer. args: num_cg_iter: Number of CG iterations per GN iter. If list, then each entry specifies number of CG iterations and number of GN iterations is given by the length of the list. num_gn_iter: Number of GN iterations. Shall only be given if num_cg_iter is an integer. """ if isinstance(num_cg_iter, int): if num_gn_iter is None: raise ValueError('Must specify number of GN iter if CG iter is constant') num_cg_iter = [num_cg_iter]*num_gn_iter num_gn_iter = len(num_cg_iter) if num_gn_iter == 0: return if self.analyze_convergence: self.evaluate_CG_iteration(0) # Outer loop for running the GN iterations. for cg_iter in num_cg_iter: self.run_GN_iter(cg_iter) if self.debug: if not self.analyze_convergence: self.f0 = self.problem(self.x) loss = self.problem.ip_output(self.f0, self.f0) self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) if self.visdom is not None: self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals') if self.analyze_convergence: self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') if self.analyze_convergence: plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude') self.x.detach_() self.clear_temp() return self.losses, self.residuals def run_GN_iter(self, num_cg_iter): """Runs a single GN iteration.""" self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) # Create copy with graph detached self.g = self.f0.detach() if self.debug and not self.analyze_convergence: loss = self.problem.ip_output(self.g, self.g) self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) self.g.requires_grad_(True) # Get df/dx^t @ f0 self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True)) # Get the right hand side self.b = - self.dfdxt_g.detach() # Run CG delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.residuals = torch.cat((self.residuals, res)) def A(self, x): dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True) return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True)) def ip(self, a, b): return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) def evaluate_CG_iteration(self, delta_x): if self.analyze_convergence: x = (self.x + delta_x).detach() x.requires_grad_(True) # compute loss and gradient f = self.problem(x) loss = self.problem.ip_output(f, f) grad = TensorList(torch.autograd.grad(loss, x)) # store in the vectors self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1))) class GradientDescentL2: """Gradient descent with momentum for L2 problems.""" def __init__(self, problem: L2Problem, variable: TensorList, step_length: float, momentum: float = 0.0, debug = False, plotting = False, visdom=None): self.problem = problem self.x = variable self.step_legnth = step_length self.momentum = momentum self.debug = debug or plotting self.plotting = plotting self.fig_num = (10,11) self.visdom = visdom self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) self.residuals = None self.clear_temp() def clear_temp(self): self.f0 = None self.dir = None def run(self, num_iter, dummy = None): if num_iter == 0: return lossvec = None if self.debug: lossvec = torch.zeros(num_iter+1) grad_mags = torch.zeros(num_iter+1) for i in range(num_iter): self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) # Compute loss loss = self.problem.ip_output(self.f0, self.f0) # Compute grad grad = TensorList(torch.autograd.grad(loss, self.x)) # Update direction if self.dir is None: self.dir = grad else: self.dir = grad + self.momentum * self.dir self.x.detach_() self.x -= self.step_legnth * self.dir if self.debug: lossvec[i] = loss.item() grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item() if self.debug: self.x.requires_grad_(True) self.f0 = self.problem(self.x) loss = self.problem.ip_output(self.f0, self.f0) grad = TensorList(torch.autograd.grad(loss, self.x)) lossvec[-1] = self.problem.ip_output(self.f0, self.f0).item() grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item() self.losses = torch.cat((self.losses, lossvec)) self.gradient_mags = torch.cat((self.gradient_mags, grad_mags)) if self.visdom is not None: self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude') self.x.detach_() self.clear_temp() class NewtonCG(ConjugateGradientBase): """Newton with Conjugate Gradient. Handels general minimization problems.""" def __init__(self, problem: MinimizationProblem, variable: TensorList, init_hessian_reg = 0.0, hessian_reg_factor = 1.0, cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False, analyze = False, plotting = False, fig_num=(10, 11, 12)): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting) self.problem = problem self.x = variable self.analyze_convergence = analyze self.plotting = plotting self.fig_num = fig_num self.hessian_reg = init_hessian_reg self.hessian_reg_factor = hessian_reg_factor self.cg_eps = cg_eps self.f0 = None self.g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None def run(self, num_cg_iter, num_newton_iter=None): if isinstance(num_cg_iter, int): if num_cg_iter == 0: return if num_newton_iter is None: num_newton_iter = 1 num_cg_iter = [num_cg_iter] * num_newton_iter num_newton_iter = len(num_cg_iter) if num_newton_iter == 0: return if self.analyze_convergence: self.evaluate_CG_iteration(0) for cg_iter in num_cg_iter: self.run_newton_iter(cg_iter) self.hessian_reg *= self.hessian_reg_factor if self.debug: if not self.analyze_convergence: loss = self.problem(self.x) self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) if self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') if self.analyze_convergence: plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude') self.x.detach_() self.clear_temp() return self.losses, self.residuals def run_newton_iter(self, num_cg_iter): self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) if self.debug and not self.analyze_convergence: self.losses = torch.cat((self.losses, self.f0.detach().cpu().view(-1))) # Gradient of loss self.g = TensorList(torch.autograd.grad(self.f0, self.x, create_graph=True)) # Get the right hand side self.b = - self.g.detach() # Run CG delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.residuals = torch.cat((self.residuals, res)) def A(self, x): return TensorList(torch.autograd.grad(self.g, self.x, x, retain_graph=True)) + self.hessian_reg * x def ip(self, a, b): # Implements the inner product return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) def evaluate_CG_iteration(self, delta_x): if self.analyze_convergence: x = (self.x + delta_x).detach() x.requires_grad_(True) # compute loss and gradient loss = self.problem(x) grad = TensorList(torch.autograd.grad(loss, x)) # store in the vectors self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1))) class GradientDescent: """Gradient descent for general minimization problems.""" def __init__(self, problem: MinimizationProblem, variable: TensorList, step_length: float, momentum: float = 0.0, debug = False, plotting = False, fig_num=(10,11)): self.problem = problem self.x = variable self.step_legnth = step_length self.momentum = momentum self.debug = debug or plotting self.plotting = plotting self.fig_num = fig_num self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) self.residuals = None self.clear_temp() def clear_temp(self): self.dir = None def run(self, num_iter, dummy = None): if num_iter == 0: return lossvec = None if self.debug: lossvec = torch.zeros(num_iter+1) grad_mags = torch.zeros(num_iter+1) for i in range(num_iter): self.x.requires_grad_(True) # Evaluate function at current estimate loss = self.problem(self.x) # Compute grad grad = TensorList(torch.autograd.grad(loss, self.x)) # Update direction if self.dir is None: self.dir = grad else: self.dir = grad + self.momentum * self.dir self.x.detach_() self.x -= self.step_legnth * self.dir if self.debug: lossvec[i] = loss.item() grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item() if self.debug: self.x.requires_grad_(True) loss = self.problem(self.x) grad = TensorList(torch.autograd.grad(loss, self.x)) lossvec[-1] = loss.item() grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item() self.losses = torch.cat((self.losses, lossvec)) self.gradient_mags = torch.cat((self.gradient_mags, grad_mags)) if self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude') self.x.detach_() self.clear_temp() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/tensordict.py ================================================ from collections import OrderedDict import torch import copy class TensorDict(OrderedDict): """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" def concat(self, other): """Concatenates two dicts without copying internal data.""" return TensorDict(self, **other) def copy(self): return TensorDict(super(TensorDict, self).copy()) def __deepcopy__(self, memodict={}): return TensorDict(copy.deepcopy(list(self), memodict)) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) return apply_attr def attribute(self, attr: str, *args): return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) def apply(self, fn, *args, **kwargs): return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) @staticmethod def _iterable(a): return isinstance(a, (TensorDict, list)) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/libs/tensorlist.py ================================================ import functools import torch import copy class TensorList(list): """Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.""" def __init__(self, list_of_tensors = None): if list_of_tensors is None: list_of_tensors = list() super(TensorList, self).__init__(list_of_tensors) def __deepcopy__(self, memodict={}): return TensorList(copy.deepcopy(list(self), memodict)) def __getitem__(self, item): if isinstance(item, int): return super(TensorList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return TensorList([super(TensorList, self).__getitem__(i) for i in item]) else: return TensorList(super(TensorList, self).__getitem__(item)) def __add__(self, other): if TensorList._iterable(other): return TensorList([e1 + e2 for e1, e2 in zip(self, other)]) return TensorList([e + other for e in self]) def __radd__(self, other): if TensorList._iterable(other): return TensorList([e2 + e1 for e1, e2 in zip(self, other)]) return TensorList([other + e for e in self]) def __iadd__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] += e2 else: for i in range(len(self)): self[i] += other return self def __sub__(self, other): if TensorList._iterable(other): return TensorList([e1 - e2 for e1, e2 in zip(self, other)]) return TensorList([e - other for e in self]) def __rsub__(self, other): if TensorList._iterable(other): return TensorList([e2 - e1 for e1, e2 in zip(self, other)]) return TensorList([other - e for e in self]) def __isub__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] -= e2 else: for i in range(len(self)): self[i] -= other return self def __mul__(self, other): if TensorList._iterable(other): return TensorList([e1 * e2 for e1, e2 in zip(self, other)]) return TensorList([e * other for e in self]) def __rmul__(self, other): if TensorList._iterable(other): return TensorList([e2 * e1 for e1, e2 in zip(self, other)]) return TensorList([other * e for e in self]) def __imul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] *= e2 else: for i in range(len(self)): self[i] *= other return self def __truediv__(self, other): if TensorList._iterable(other): return TensorList([e1 / e2 for e1, e2 in zip(self, other)]) return TensorList([e / other for e in self]) def __rtruediv__(self, other): if TensorList._iterable(other): return TensorList([e2 / e1 for e1, e2 in zip(self, other)]) return TensorList([other / e for e in self]) def __itruediv__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] /= e2 else: for i in range(len(self)): self[i] /= other return self def __matmul__(self, other): if TensorList._iterable(other): return TensorList([e1 @ e2 for e1, e2 in zip(self, other)]) return TensorList([e @ other for e in self]) def __rmatmul__(self, other): if TensorList._iterable(other): return TensorList([e2 @ e1 for e1, e2 in zip(self, other)]) return TensorList([other @ e for e in self]) def __imatmul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] @= e2 else: for i in range(len(self)): self[i] @= other return self def __mod__(self, other): if TensorList._iterable(other): return TensorList([e1 % e2 for e1, e2 in zip(self, other)]) return TensorList([e % other for e in self]) def __rmod__(self, other): if TensorList._iterable(other): return TensorList([e2 % e1 for e1, e2 in zip(self, other)]) return TensorList([other % e for e in self]) def __pos__(self): return TensorList([+e for e in self]) def __neg__(self): return TensorList([-e for e in self]) def __le__(self, other): if TensorList._iterable(other): return TensorList([e1 <= e2 for e1, e2 in zip(self, other)]) return TensorList([e <= other for e in self]) def __ge__(self, other): if TensorList._iterable(other): return TensorList([e1 >= e2 for e1, e2 in zip(self, other)]) return TensorList([e >= other for e in self]) def concat(self, other): return TensorList(super(TensorList, self).__add__(other)) def copy(self): return TensorList(super(TensorList, self).copy()) def unroll(self): if not any(isinstance(t, TensorList) for t in self): return self new_list = TensorList() for t in self: if isinstance(t, TensorList): new_list.extend(t.unroll()) else: new_list.append(t) return new_list def list(self): return list(self) def attribute(self, attr: str, *args): return TensorList([getattr(e, attr, *args) for e in self]) def apply(self, fn): return TensorList([fn(e) for e in self]) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorList\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorList([getattr(e, name)(*args, **kwargs) for e in self]) return apply_attr @staticmethod def _iterable(a): return isinstance(a, (TensorList, list)) def tensor_operation(op): def islist(a): return isinstance(a, TensorList) @functools.wraps(op) def oplist(*args, **kwargs): if len(args) == 0: raise ValueError('Must be at least one argument without keyword (i.e. operand).') if len(args) == 1: if islist(args[0]): return TensorList([op(a, **kwargs) for a in args[0]]) else: # Multiple operands, assume max two if islist(args[0]) and islist(args[1]): return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])]) if islist(args[0]): return TensorList([op(a, *args[1:], **kwargs) for a in args[0]]) if islist(args[1]): return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]]) # None of the operands are lists return op(*args, **kwargs) return oplist ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/atom/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/atom/atom_gmm_sampl.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = (1e-2, 5e-2) # 1 # Gradient step length in the bounding box refinement 5e-3 2e-2 params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_gmm_sampl', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/atom/atom_prob_ml.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = (2e-4, 10e-4) # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_prob_ml', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/atom/default.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/atom/default_vot.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (14 * 16) ** 2 # Maximum image sample size params.min_image_sample_size = (14 * 16) ** 2 # Minimum image sample size params.search_area_scale = 4 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.0075 # Learning rate deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4, 4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = True # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1 / 3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = -1 # Absolute score threshold to detect target missing params.distractor_threshold = 100 # Relative threshold to find distractors params.hard_negative_threshold = 0.3 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.7 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/atom/multiscale_no_iounet.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = 1.02**torch.arange(-2, 3).float() # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = False # Use IoU net or not params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp18.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 18*16 params.search_area_scale = 5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp18.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp18_vot.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 14 * 16 params.search_area_scale = 4 params.feature_size_odd = False # Learning parameters params.sample_memory_size = 250 params.learning_rate = 0.0075 params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 # Net optimization params params.update_classifier = True params.net_opt_iter = 25 params.net_opt_update_iter = 3 params.net_opt_hn_iter = 3 # Detection parameters params.window_output = True # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.45 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp18.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp50.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 18*16 params.search_area_scale = 5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp50.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp50_vot.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 14 * 16 params.search_area_scale = 4 # Learning parameters params.sample_memory_size = 250 params.learning_rate = 0.0075 params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 # Net optimization params params.update_classifier = True params.net_opt_iter = 25 params.net_opt_update_iter = 3 params.net_opt_hn_iter = 3 # Detection parameters params.window_output = True # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.45 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp50.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp50_vot19.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 16 * 16 params.search_area_scale = 4.5 # Learning parameters params.sample_memory_size = 100 params.learning_rate = 0.0075 params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 # Net optimization params params.update_classifier = True params.net_opt_iter = 15 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 2 # Detection parameters params.window_output = True # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [-5, 10, -30, 60], 'blur': [(2, 0.2), (1, 3)], 'relativeshift': [(0.6, 0.6), (-0.6, -0.6)], 'dropout': (3, 0.2)} params.augmentation_expansion_factor = 1.4 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.45 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 3 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp50.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/prdimp18.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 18*16 params.search_area_scale = 5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.score_preprocess = 'softmax' params.target_not_found_threshold = 0.04 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 2.5e-3 # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) params.net = NetWithBackbone(net_path='prdimp18.pth.tar', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/prdimp50.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 22*16 params.search_area_scale = 6 params.border_mode = 'inside_major' params.patch_max_scale_change = 1.5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.score_preprocess = 'softmax' params.target_not_found_threshold = 0.04 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 2.5e-3 # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) params.net = NetWithBackbone(net_path='prdimp50.pth.tar', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/dimp/super_dimp.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 22*16 params.search_area_scale = 6 params.border_mode = 'inside_major' params.patch_max_scale_change = 1.5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 2.5e-3 # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) params.net = NetWithBackbone(net_path='super_dimp.pth.tar', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/eco/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/parameter/eco/default.py ================================================ from pytracking.utils import TrackerParams, FeatureParams from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True # Feature specific parameters shallow_params = TrackerParams() deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = 250**2 # Maximum image sample size params.min_image_sample_size = 200**2 # Minimum image sample size params.search_area_scale = 4.5 # Scale relative to target size # Conjugate Gradient parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 100 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 10 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = 75 # Forgetting rate of the last conjugate direction params.precond_data_param = 0.3 # Weight of the data term in the preconditioner params.precond_reg_param = 0.15 # Weight of the regularization term in the preconditioner params.precond_proj_param = 35 # Weight of the projection matrix part in the preconditioner # Learning parameters shallow_params.learning_rate = 0.025 deep_params.learning_rate = 0.0075 shallow_params.output_sigma_factor = 1/16 deep_params.output_sigma_factor = 1/4 # Training parameters params.sample_memory_size = 200 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Detection parameters params.scale_factors = 1.02**torch.arange(-2, 3).float() # What scales to use for localization params.score_upsample_factor = 1 # How much Fourier upsampling to use params.score_fusion_strategy = 'weightedsum' # Fusion strategy shallow_params.translation_weight = 0.4 # Weight of this feature deep_params.translation_weight = 1 - shallow_params.translation_weight # Init augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'shift': [(6, 6), (-6, 6), (6, -6), (-6,-6)], 'dropout': (7, 0.2)} # Whether to use augmentation for this feature deep_params.use_augmentation = True shallow_params.use_augmentation = True # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not # params.proj_init_method = 'pca' # Method for initializing the projection matrix params.projection_reg = 5e-8 # Regularization paremeter of the projection matrix shallow_params.compressed_dim = 16 # Dimension output of projection matrix for shallow features deep_params.compressed_dim = 64 # Dimension output of projection matrix for deep features # Interpolation parameters params.interpolation_method = 'bicubic' # The kind of interpolation kernel params.interpolation_bicubic_a = -0.75 # The parameter for the bicubic interpolation kernel params.interpolation_centering = True # Center the kernel at the feature sample params.interpolation_windowing = False # Do additional windowing on the Fourier coefficients of the kernel # Regularization parameters shallow_params.use_reg_window = True # Use spatial regularization or not shallow_params.reg_window_min = 1e-4 # The minimum value of the regularization window shallow_params.reg_window_edge = 10e-3 # The impact of the spatial regularization shallow_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) shallow_params.reg_sparsity_threshold = 0.05 # A relative threshold of which DFT coefficients that should be set to zero deep_params.use_reg_window = True # Use spatial regularization or not deep_params.reg_window_min = 10e-4 # The minimum value of the regularization window deep_params.reg_window_edge = 50e-3 # The impact of the spatial regularization deep_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) deep_params.reg_sparsity_threshold = 0.1 # A relative threshold of which DFT coefficients that should be set to zero fparams = FeatureParams(feature_params=[shallow_params, deep_params]) features = deep.ResNet18m1(output_layers=['vggconv1', 'layer3'], use_gpu=params.use_gpu, fparams=fparams, pool_stride=[2, 1], normalize_power=2) params.features = MultiResolutionExtractor([features]) return params ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/atom/__init__.py ================================================ from .atom import ATOM def get_tracker_class(): return ATOM ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/atom/atom.py ================================================ from pytracking.tracker.base import BaseTracker import torch import torch.nn.functional as F import torch.nn import math import time from pytracking import dcf, fourier, TensorList, operation from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_tensor from pytracking.libs.optimization import GaussNewtonCG, ConjugateGradient, GradientDescentL2 from .optim import ConvProblem, FactorizedConvProblem from pytracking.features import augmentation import ltr.data.bounding_box_utils as bbutils class ATOM(BaseTracker): multiobj_mode = 'parallel' def initialize_features(self): if not getattr(self, 'features_initialized', False): self.params.features.initialize() self.features_initialized = True def initialize(self, image, info: dict) -> dict: state = info['init_bbox'] # Initialize some stuff self.frame_num = 1 if not self.params.has('device'): self.params.device = 'cuda' if self.params.use_gpu else 'cpu' # Initialize features self.initialize_features() # Check if image is color self.params.features.set_is_color(image.shape[2] == 3) # Get feature specific params self.fparams = self.params.features.get_fparams('feature_params') tic = time.time() # Get position and size self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2]) self.target_sz = torch.Tensor([state[3], state[2]]) # Set search area self.target_scale = 1.0 search_area = torch.prod(self.target_sz * self.params.search_area_scale).item() if search_area > self.params.max_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.max_image_sample_size) elif search_area < self.params.min_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.min_image_sample_size) # Check if IoUNet is used self.use_iou_net = self.params.get('use_iou_net', True) # Target size in base scale self.base_target_sz = self.target_sz / self.target_scale # Use odd square search area and set sizes feat_max_stride = max(self.params.features.stride()) if self.params.get('search_area_shape', 'square') == 'square': self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2) elif self.params.search_area_shape == 'initrect': self.img_sample_sz = torch.round(self.base_target_sz * self.params.search_area_scale) else: raise ValueError('Unknown search area shape') if self.params.feature_size_odd: self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride) else: self.img_sample_sz += feat_max_stride - (self.img_sample_sz + feat_max_stride) % (2 * feat_max_stride) # Set sizes self.img_support_sz = self.img_sample_sz self.feature_sz = self.params.features.size(self.img_sample_sz) self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output self.kernel_size = self.fparams.attribute('kernel_size') self.iou_img_sample_sz = self.img_sample_sz # Optimization options self.params.precond_learning_rate = self.fparams.attribute('learning_rate') if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1: self.params.direction_forget_factor = 0 else: self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate self.output_window = None if self.params.get('window_output', False): if self.params.get('use_clipped_window', False): self.output_window = dcf.hann2d_clipped(self.output_sz.long(), self.output_sz.long()*self.params.effective_search_area / self.params.search_area_scale, centered=False).to(self.params.device) else: self.output_window = dcf.hann2d(self.output_sz.long(), centered=False).to(self.params.device) # Initialize some learning things self.init_learning() # Convert image im = numpy_to_torch(image) self.im = im # For debugging only # Setup scale bounds self.image_sz = torch.Tensor([im.shape[2], im.shape[3]]) self.min_scale_factor = torch.max(10 / self.base_target_sz) self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz) # Extract and transform sample x = self.generate_init_samples(im) # Initialize iounet if self.use_iou_net: self.init_iou_net() # Initialize projection matrix self.init_projection_matrix(x) # Transform to get the training sample train_x = self.preprocess_sample(x) # Generate label function init_y = self.init_label_function(train_x) # Init memory self.init_memory(train_x) # Init optimizer and do initial optimization self.init_optimization(train_x, init_y) self.pos_iounet = self.pos.clone() out = {'time': time.time() - tic} return out def init_optimization(self, train_x, init_y): # Initialize filter filter_init_method = self.params.get('filter_init_method', 'zeros') self.filter = TensorList( [x.new_zeros(1, cdim, sz[0], sz[1]) for x, cdim, sz in zip(train_x, self.compressed_dim, self.kernel_size)]) if filter_init_method == 'zeros': pass elif filter_init_method == 'randn': for f in self.filter: f.normal_(0, 1/f.numel()) else: raise ValueError('Unknown "filter_init_method"') # Get parameters self.params.update_projection_matrix = self.params.get('update_projection_matrix', True) and self.params.use_projection_matrix optimizer = self.params.get('optimizer', 'GaussNewtonCG') # Setup factorized joint optimization if self.params.update_projection_matrix: self.joint_problem = FactorizedConvProblem(self.init_training_samples, init_y, self.filter_reg, self.fparams.attribute('projection_reg'), self.params, self.init_sample_weights, self.projection_activation, self.response_activation) # Variable containing both filter and projection matrix joint_var = self.filter.concat(self.projection_matrix) # Initialize optimizer analyze_convergence = self.params.get('analyze_convergence', False) if optimizer == 'GaussNewtonCG': self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug >= 1), plotting=(self.params.debug >= 3), analyze=analyze_convergence, visdom=self.visdom) elif optimizer == 'GradientDescentL2': self.joint_optimizer = GradientDescentL2(self.joint_problem, joint_var, self.params.optimizer_step_length, self.params.optimizer_momentum, plotting=(self.params.debug >= 3), debug=(self.params.debug >= 1), visdom=self.visdom) # Do joint optimization if isinstance(self.params.init_CG_iter, (list, tuple)): self.joint_optimizer.run(self.params.init_CG_iter) else: self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter) if analyze_convergence: opt_name = 'CG' if self.params.get('CG_optimizer', True) else 'GD' for val_name, values in zip(['loss', 'gradient'], [self.joint_optimizer.losses, self.joint_optimizer.gradient_mags]): val_str = ' '.join(['{:.8e}'.format(v.item()) for v in values]) file_name = '{}_{}.txt'.format(opt_name, val_name) with open(file_name, 'a') as f: f.write(val_str + '\n') raise RuntimeError('Exiting') # Re-project samples with the new projection matrix compressed_samples = self.project_sample(self.init_training_samples, self.projection_matrix) for train_samp, init_samp in zip(self.training_samples, compressed_samples): train_samp[:init_samp.shape[0],...] = init_samp self.hinge_mask = None # Initialize optimizer self.conv_problem = ConvProblem(self.training_samples, self.y, self.filter_reg, self.sample_weights, self.response_activation) if optimizer == 'GaussNewtonCG': self.filter_optimizer = ConjugateGradient(self.conv_problem, self.filter, fletcher_reeves=self.params.fletcher_reeves, direction_forget_factor=self.params.direction_forget_factor, debug=(self.params.debug>=1), plotting=(self.params.debug>=3), visdom=self.visdom) elif optimizer == 'GradientDescentL2': self.filter_optimizer = GradientDescentL2(self.conv_problem, self.filter, self.params.optimizer_step_length, self.params.optimizer_momentum, debug=(self.params.debug >= 1), plotting=(self.params.debug>=3), visdom=self.visdom) # Transfer losses from previous optimization if self.params.update_projection_matrix: self.filter_optimizer.residuals = self.joint_optimizer.residuals self.filter_optimizer.losses = self.joint_optimizer.losses if not self.params.update_projection_matrix: self.filter_optimizer.run(self.params.init_CG_iter) # Post optimization self.filter_optimizer.run(self.params.post_init_CG_iter) # Free memory del self.init_training_samples if self.params.use_projection_matrix: del self.joint_problem, self.joint_optimizer def track(self, image, info: dict = None) -> dict: self.debug_info = {} self.frame_num += 1 self.debug_info['frame_num'] = self.frame_num # Convert image im = numpy_to_torch(image) self.im = im # For debugging only # ------- LOCALIZATION ------- # # Get sample sample_pos = self.pos.round() sample_scales = self.target_scale * self.params.scale_factors test_x = self.extract_processed_sample(im, self.pos, sample_scales, self.img_sample_sz) # Compute scores scores_raw = self.apply_filter(test_x) translation_vec, scale_ind, s, flag = self.localize_target(scores_raw) # Update position and scale if flag != 'not_found': if self.use_iou_net: update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain' if self.params.get('use_classifier', True): self.update_state(sample_pos + translation_vec) self.refine_target_box(sample_pos, sample_scales[scale_ind], scale_ind, update_scale_flag) elif self.params.get('use_classifier', True): self.update_state(sample_pos + translation_vec, sample_scales[scale_ind]) score_map = s[scale_ind, ...] max_score = torch.max(score_map).item() self.debug_info['max_score'] = max_score self.debug_info['flag'] = flag if self.visdom is not None: self.visdom.register(score_map, 'heatmap', 2, 'Score Map') self.visdom.register(self.debug_info, 'info_dict', 1, 'Status') elif self.params.debug >= 2: show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score)) # ------- UPDATE ------- # # Check flags and set learning rate if hard negative update_flag = flag not in ['not_found', 'uncertain'] hard_negative = (flag == 'hard_negative') learning_rate = self.params.hard_negative_learning_rate if hard_negative else None if update_flag: # Get train sample train_x = TensorList([x[scale_ind:scale_ind+1, ...] for x in test_x]) # Create label for sample train_y = self.get_label_function(sample_pos, sample_scales[scale_ind]) # Update memory self.update_memory(train_x, train_y, learning_rate) # Train filter if hard_negative: self.filter_optimizer.run(self.params.hard_negative_CG_iter) elif (self.frame_num-1) % self.params.train_skipping == 0: self.filter_optimizer.run(self.params.CG_iter) # Set the pos of the tracker to iounet pos if self.use_iou_net and flag != 'not_found': self.pos = self.pos_iounet.clone() # Return new state new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]])) out = {'target_bbox': new_state.tolist()} return out def apply_filter(self, sample_x: TensorList): return operation.conv2d(sample_x, self.filter, mode='same') def localize_target(self, scores_raw): # Weighted sum (if multiple features) with interpolation in fourier domain weight = self.fparams.attribute('translation_weight', 1.0) scores_raw = weight * scores_raw sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) * scores_raw.size(3)) for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)): sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (1 - torch.Tensor([ksz[0]%2, ksz[1]%2]) / sz)) scores_fs = fourier.sum_fs(sf_weighted) scores = fourier.sample_fs(scores_fs, self.output_sz) if self.output_window is not None and not self.params.get('perform_hn_without_windowing', False): scores *= self.output_window if self.params.get('advanced_localization', False): return self.localize_advanced(scores) # Get maximum max_score, max_disp = dcf.max2d(scores) _, scale_ind = torch.max(max_score, dim=0) max_disp = max_disp.float().cpu() # Convert to displacements in the base scale disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2 # Compute translation vector and scale change factor translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale translation_vec *= self.params.scale_factors[scale_ind] # Shift the score output for visualization purposes if self.params.debug >= 2: sz = scores.shape[-2:] scores = torch.cat([scores[...,sz[0]//2:,:], scores[...,:sz[0]//2,:]], -2) scores = torch.cat([scores[...,:,sz[1]//2:], scores[...,:,:sz[1]//2]], -1) return translation_vec, scale_ind, scores, None def localize_advanced(self, scores): """Dows the advanced localization with hard negative detection and target not found.""" sz = scores.shape[-2:] if self.output_window is not None and self.params.get('perform_hn_without_windowing', False): scores_orig = scores.clone() scores_orig = torch.cat([scores_orig[..., (sz[0] + 1) // 2:, :], scores_orig[..., :(sz[0] + 1) // 2, :]], -2) scores_orig = torch.cat([scores_orig[..., :, (sz[1] + 1) // 2:], scores_orig[..., :, :(sz[1] + 1) // 2]], -1) scores *= self.output_window # Shift scores back scores = torch.cat([scores[...,(sz[0]+1)//2:,:], scores[...,:(sz[0]+1)//2,:]], -2) scores = torch.cat([scores[...,:,(sz[1]+1)//2:], scores[...,:,:(sz[1]+1)//2]], -1) # Find maximum max_score1, max_disp1 = dcf.max2d(scores) _, scale_ind = torch.max(max_score1, dim=0) max_score1 = max_score1[scale_ind] max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1) target_disp1 = max_disp1 - self.output_sz // 2 translation_vec1 = target_disp1 * (self.img_support_sz / self.output_sz) * self.target_scale if max_score1.item() < self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores, 'not_found' if self.output_window is not None and self.params.get('perform_hn_without_windowing', False): scores = scores_orig # Mask out target neighborhood target_neigh_sz = self.params.target_neighborhood_scale * self.target_sz / self.target_scale tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0) tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0]) tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0) tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1]) scores_masked = scores[scale_ind:scale_ind+1,...].clone() scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0 # Find new maximum max_score2, max_disp2 = dcf.max2d(scores_masked) max_disp2 = max_disp2.float().cpu().view(-1) target_disp2 = max_disp2 - self.output_sz // 2 translation_vec2 = target_disp2 * (self.img_support_sz / self.output_sz) * self.target_scale # Handle the different cases if max_score2 > self.params.distractor_threshold * max_score1: disp_norm1 = torch.sqrt(torch.sum(target_disp1**2)) disp_norm2 = torch.sqrt(torch.sum(target_disp2**2)) disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2 if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold: return translation_vec1, scale_ind, scores, 'hard_negative' if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold: return translation_vec2, scale_ind, scores, 'hard_negative' if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold: return translation_vec1, scale_ind, scores, 'uncertain' # If also the distractor is close, return with highest score return translation_vec1, scale_ind, scores, 'uncertain' if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores, 'hard_negative' return translation_vec1, scale_ind, scores, None def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor): return self.params.features.extract(im, pos, scales, sz)[0] def get_iou_features(self): return self.params.features.get_unique_attribute('iounet_features') def get_iou_backbone_features(self): return self.params.features.get_unique_attribute('iounet_backbone_features') def extract_processed_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> (TensorList, TensorList): x = self.extract_sample(im, pos, scales, sz) return self.preprocess_sample(self.project_sample(x)) def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList): if self.params.get('_feature_window', False): x = x * self.feature_window return x def project_sample(self, x: TensorList, proj_matrix = None): # Apply projection matrix if proj_matrix is None: proj_matrix = self.projection_matrix return operation.conv2d(x, proj_matrix).apply(self.projection_activation) def init_learning(self): # Get window function self.feature_window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz]) # Filter regularization self.filter_reg = self.fparams.attribute('filter_reg') # Activation function after the projection matrix (phi_1 in the paper) projection_activation = self.params.get('projection_activation', 'none') if isinstance(projection_activation, tuple): projection_activation, act_param = projection_activation if projection_activation == 'none': self.projection_activation = lambda x: x elif projection_activation == 'relu': self.projection_activation = torch.nn.ReLU(inplace=True) elif projection_activation == 'elu': self.projection_activation = torch.nn.ELU(inplace=True) elif projection_activation == 'mlu': self.projection_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param) else: raise ValueError('Unknown activation') # Activation function after the output scores (phi_2 in the paper) response_activation = self.params.get('response_activation', 'none') if isinstance(response_activation, tuple): response_activation, act_param = response_activation if response_activation == 'none': self.response_activation = lambda x: x elif response_activation == 'relu': self.response_activation = torch.nn.ReLU(inplace=True) elif response_activation == 'elu': self.response_activation = torch.nn.ELU(inplace=True) elif response_activation == 'mlu': self.response_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param) else: raise ValueError('Unknown activation') def generate_init_samples(self, im: torch.Tensor) -> TensorList: """Generate augmented initial samples.""" # Compute augmentation size aug_expansion_factor = self.params.get('augmentation_expansion_factor', None) aug_expansion_sz = self.img_sample_sz.clone() aug_output_sz = None if aug_expansion_factor is not None and aug_expansion_factor != 1: aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long() aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2 aug_expansion_sz = aug_expansion_sz.float() aug_output_sz = self.img_sample_sz.long().tolist() # Random shift operator get_rand_shift = lambda: None random_shift_factor = self.params.get('random_shift_factor', 0) if random_shift_factor > 0: get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor).long().tolist() # Create transofmations self.transforms = [augmentation.Identity(aug_output_sz)] if 'shift' in self.params.augmentation: self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation['shift']]) if 'relativeshift' in self.params.augmentation: get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist() self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation['relativeshift']]) if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']: self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift())) if 'blur' in self.params.augmentation: self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation['blur']]) if 'scale' in self.params.augmentation: self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation['scale']]) if 'rotate' in self.params.augmentation: self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation['rotate']]) # Generate initial samples init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, aug_expansion_sz, self.transforms) # Remove augmented samples for those that shall not have for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if not use_aug: init_samples[i] = init_samples[i][0:1, ...] # Add dropout samples if 'dropout' in self.params.augmentation: num, prob = self.params.augmentation['dropout'] self.transforms.extend(self.transforms[:1]*num) for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if use_aug: init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)]) return init_samples def init_projection_matrix(self, x): # Set if using projection matrix self.params.use_projection_matrix = self.params.get('use_projection_matrix', True) if self.params.use_projection_matrix: self.compressed_dim = self.fparams.attribute('compressed_dim', None) proj_init_method = self.params.get('proj_init_method', 'pca') if proj_init_method == 'pca': x_mat = TensorList([e.permute(1, 0, 2, 3).reshape(e.shape[1], -1).clone() for e in x]) x_mat -= x_mat.mean(dim=1, keepdim=True) cov_x = x_mat @ x_mat.t() self.projection_matrix = TensorList( [None if cdim is None else torch.svd(C)[0][:, :cdim].t().unsqueeze(-1).unsqueeze(-1).clone() for C, cdim in zip(cov_x, self.compressed_dim)]) elif proj_init_method == 'randn': self.projection_matrix = TensorList( [None if cdim is None else ex.new_zeros(cdim,ex.shape[1],1,1).normal_(0,1/math.sqrt(ex.shape[1])) for ex, cdim in zip(x, self.compressed_dim)]) else: self.compressed_dim = x.size(1) self.projection_matrix = TensorList([None]*len(x)) def init_label_function(self, train_x): # Allocate label function self.y = TensorList([x.new_zeros(self.params.sample_memory_size, 1, x.shape[2], x.shape[3]) for x in train_x]) # Output sigma factor output_sigma_factor = self.fparams.attribute('output_sigma_factor') self.sigma = (self.feature_sz / self.img_support_sz * self.base_target_sz).prod().sqrt() * output_sigma_factor * torch.ones(2) # Center pos in normalized coords target_center_norm = (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz) # Generate label functions for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz, self.kernel_size, train_x): center_pos = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2]) for i, T in enumerate(self.transforms[:x.shape[0]]): sample_center = center_pos + torch.Tensor(T.shift) / self.img_support_sz * sz y[i, 0, ...] = dcf.label_function_spatial(sz, sig, sample_center) # Return only the ones to use for initial training return TensorList([y[:x.shape[0], ...] for y, x in zip(self.y, train_x)]) def init_memory(self, train_x): # Initialize first-frame training samples self.num_init_samples = train_x.size(0) self.init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x]) self.init_training_samples = train_x # Sample counters and weights self.num_stored_samples = self.num_init_samples.copy() self.previous_replace_ind = [None] * len(self.num_stored_samples) self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x]) for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, self.num_init_samples): sw[:num] = init_sw # Initialize memory self.training_samples = TensorList( [x.new_zeros(self.params.sample_memory_size, cdim, x.shape[2], x.shape[3]) for x, cdim in zip(train_x, self.compressed_dim)]) def update_memory(self, sample_x: TensorList, sample_y: TensorList, learning_rate = None): replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, self.fparams, learning_rate) self.previous_replace_ind = replace_ind for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind): train_samp[ind:ind+1,...] = x for y_memory, y, ind in zip(self.y, sample_y, replace_ind): y_memory[ind:ind+1,...] = y if self.hinge_mask is not None: for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind): m[ind:ind+1,...] = (y >= self.params.hinge_threshold).float() self.num_stored_samples += 1 def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams, learning_rate = None): # Update weights and get index to replace in memory replace_ind = [] for sw, prev_ind, num_samp, num_init, fpar in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams): lr = learning_rate if lr is None: lr = fpar.learning_rate init_samp_weight = getattr(fpar, 'init_samples_minimum_weight', None) if init_samp_weight == 0: init_samp_weight = None s_ind = 0 if init_samp_weight is None else num_init if num_samp == 0 or lr == 1: sw[:] = 0 sw[0] = 1 r_ind = 0 else: # Get index to replace _, r_ind = torch.min(sw[s_ind:], 0) r_ind = r_ind.item() + s_ind # Update weights if prev_ind is None: sw /= 1 - lr sw[r_ind] = lr else: sw[r_ind] = sw[prev_ind] / (1 - lr) sw /= sw.sum() if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight: sw /= init_samp_weight + sw[num_init:].sum() sw[:num_init] = init_samp_weight / num_init replace_ind.append(r_ind) return replace_ind def get_label_function(self, sample_pos, sample_scale): # Generate label function train_y = TensorList() target_center_norm = (self.pos - sample_pos) / (sample_scale * self.img_support_sz) for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size): center = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2]) train_y.append(dcf.label_function_spatial(sz, sig, center)) return train_y def update_state(self, new_pos, new_scale = None): # Update scale if new_scale is not None: self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor) self.target_sz = self.base_target_sz * self.target_scale # Update pos inside_ratio = 0.2 inside_offset = (inside_ratio - 0.5) * self.target_sz self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset) def get_iounet_box(self, pos, sz, sample_pos, sample_scale): """All inputs in original image coordinates""" box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz - 1) / 2 box_sz = sz / sample_scale target_ul = box_center - (box_sz - 1) / 2 return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))]) def init_iou_net(self): # Setup IoU net self.iou_predictor = self.params.features.get_unique_attribute('iou_predictor') for p in self.iou_predictor.parameters(): p.requires_grad = False # Get target boxes for the different augmentations self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz, self.pos.round(), self.target_scale) target_boxes = TensorList() if self.params.iounet_augmentation: for T in self.transforms: if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)): break target_boxes.append(self.iou_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0])) else: target_boxes.append(self.iou_target_box.clone()) target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device) # Get iou features iou_backbone_features = self.get_iou_backbone_features() # Remove other augmentations such as rotation iou_backbone_features = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_features]) # Extract target feat with torch.no_grad(): target_feat = self.iou_predictor.get_modulation(iou_backbone_features, target_boxes) self.target_feat = TensorList([x.detach().mean(0) for x in target_feat]) if self.params.get('iounet_not_use_reference', False): self.target_feat = TensorList([torch.full_like(tf, tf.norm() / tf.numel()) for tf in self.target_feat]) def refine_target_box(self, sample_pos, sample_scale, scale_ind, update_scale = True): # Initial box for refinement init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale) # Extract features from the relevant scale iou_features = self.get_iou_features() iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features]) init_boxes = init_box.view(1,4).clone() if self.params.num_init_random_boxes > 0: # Get random initial boxes square_box_sz = init_box[2:].prod().sqrt() rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)]) minimal_edge_size = init_box[2:].min()/3 rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size) new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2] init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1) init_boxes = torch.cat([init_box.view(1,4), init_boxes]) # Refine boxes by maximizing iou output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes) # Remove weird boxes with extreme aspect ratios output_boxes[:, 2:].clamp_(1) aspect_ratio = output_boxes[:,2] / output_boxes[:,3] keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio) output_boxes = output_boxes[keep_ind,:] output_iou = output_iou[keep_ind] # If no box found if output_boxes.shape[0] == 0: return # Take average of top k boxes k = self.params.get('iounet_k', 5) topk = min(k, output_boxes.shape[0]) _, inds = torch.topk(output_iou, topk) predicted_box = output_boxes[inds, :].mean(0) predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0) # Update position new_pos = predicted_box[:2] + predicted_box[2:]/2 - (self.iou_img_sample_sz - 1) / 2 new_pos = new_pos.flip((0,)) * sample_scale + sample_pos new_target_sz = predicted_box[2:].flip((0,)) * sample_scale new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod()) self.pos_iounet = new_pos.clone() if self.params.get('use_iounet_pos_for_learning', True): self.pos = new_pos.clone() self.target_sz = new_target_sz if update_scale: self.target_scale = new_scale def optimize_boxes(self, iou_features, init_boxes): # Optimize iounet boxes output_boxes = init_boxes.view(1, -1, 4).to(self.params.device) step_length = self.params.box_refinement_step_length init_step_length = self.params.box_refinement_step_length if isinstance(step_length, (tuple, list)): init_step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to( self.params.device).view(1, 1, 4) box_refinement_space = self.params.get('box_refinement_space', 'default') step_length = init_step_length * output_boxes.new_ones(1, output_boxes.shape[1], 1) outputs_prev = -99999999 * output_boxes.new_ones(1, output_boxes.shape[1]) step = torch.zeros_like(output_boxes) if box_refinement_space == 'default': # Optimization using bounding box space used in original IoUNet for i_ in range(self.params.box_refinement_iter): # forward pass bb_init = output_boxes.clone().detach() bb_init.requires_grad = True outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient=torch.ones_like(outputs)) # Update mask and step length update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1) update_mask_float = update_mask.view(1, -1, 1).float() step_length[~update_mask, :] *= self.params.box_refinement_step_decay outputs_prev = outputs.detach().clone() # Update proposal step = update_mask_float * step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2) - ( 1.0 - update_mask_float) * step output_boxes = bb_init + step output_boxes.detach_() elif box_refinement_space == 'relative': # Optimization using relative bounding box space sz_norm = output_boxes[:, :1, 2:].clone() output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm) for i_ in range(self.params.box_refinement_iter): # forward pass bb_init_rel = output_boxes_rel.clone().detach() bb_init_rel.requires_grad = True bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm) outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient=torch.ones_like(outputs)) # Update mask and step length update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1) update_mask_float = update_mask.view(1, -1, 1).float() step_length[~update_mask, :] *= self.params.box_refinement_step_decay outputs_prev = outputs.detach().clone() # Update proposal step = update_mask_float * step_length * bb_init_rel.grad - (1.0 - update_mask_float) * step output_boxes_rel = bb_init_rel + step output_boxes_rel.detach_() # for s in outputs.view(-1): # print('{:.2f} '.format(s.item()), end='') # print('') # print('') output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm) else: raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space)) return output_boxes.view(-1, 4).cpu(), outputs.detach().view(-1).cpu() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/atom/optim.py ================================================ import torch from pytracking import optimization, TensorList, operation import math class FactorizedConvProblem(optimization.L2Problem): def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, projection_reg, params, sample_weights: TensorList, projection_activation, response_activation): self.training_samples = training_samples self.y = y self.filter_reg = filter_reg self.sample_weights = sample_weights self.params = params self.projection_reg = projection_reg self.projection_activation = projection_activation self.response_activation = response_activation self.diag_M = self.filter_reg.concat(projection_reg) def __call__(self, x: TensorList): """ Compute residuals :param x: [filters, projection_matrices] :return: [data_terms, filter_regularizations, proj_mat_regularizations] """ filter = x[:len(x)//2] # w2 in paper P = x[len(x)//2:] # w1 in paper # Do first convolution compressed_samples = operation.conv1x1(self.training_samples, P).apply(self.projection_activation) # Do second convolution residuals = operation.conv2d(compressed_samples, filter, mode='same').apply(self.response_activation) # Compute data residuals residuals = residuals - self.y residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals # Add regularization for projection matrix residuals.extend(self.filter_reg.apply(math.sqrt) * filter) # Add regularization for projection matrix residuals.extend(self.projection_reg.apply(math.sqrt) * P) return residuals def ip_input(self, a: TensorList, b: TensorList): num = len(a) // 2 # Number of filters a_filter = a[:num] b_filter = b[:num] a_P = a[num:] b_P = b[num:] # Filter inner product # ip_out = a_filter.reshape(-1) @ b_filter.reshape(-1) ip_out = operation.conv2d(a_filter, b_filter).view(-1) # Add projection matrix part # ip_out += a_P.reshape(-1) @ b_P.reshape(-1) ip_out += operation.conv2d(a_P.view(1,-1,1,1), b_P.view(1,-1,1,1)).view(-1) # Have independent inner products for each filter return ip_out.concat(ip_out.clone()) def M1(self, x: TensorList): return x / self.diag_M class ConvProblem(optimization.L2Problem): def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, sample_weights: TensorList, response_activation): self.training_samples = training_samples self.y = y self.filter_reg = filter_reg self.sample_weights = sample_weights self.response_activation = response_activation def __call__(self, x: TensorList): """ Compute residuals :param x: [filters] :return: [data_terms, filter_regularizations] """ # Do convolution and compute residuals residuals = operation.conv2d(self.training_samples, x, mode='same').apply(self.response_activation) residuals = residuals - self.y residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals # Add regularization for projection matrix residuals.extend(self.filter_reg.apply(math.sqrt) * x) return residuals def ip_input(self, a: TensorList, b: TensorList): # return a.reshape(-1) @ b.reshape(-1) # return (a * b).sum() return operation.conv2d(a, b).view(-1) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/base/__init__.py ================================================ from .basetracker import BaseTracker ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/base/basetracker.py ================================================ from _collections import OrderedDict class BaseTracker: """Base class for all trackers.""" def __init__(self, params): self.params = params self.visdom = None def initialize(self, image, info: dict) -> dict: """Overload this function in your tracker. This should initialize the model.""" raise NotImplementedError def track(self, image, info: dict = None) -> dict: """Overload this function in your tracker. This should track in the frame and update the model.""" raise NotImplementedError def visdom_draw_tracking(self, image, box, segmentation=None): if isinstance(box, OrderedDict): box = [v for k, v in box.items()] else: box = (box,) if segmentation is None: self.visdom.register((image, *box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking') ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/dimp/__init__.py ================================================ from .dimp import DiMP def get_tracker_class(): return DiMP ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/dimp/dimp.py ================================================ from pytracking.tracker.base import BaseTracker import torch import torch.nn.functional as F import math import time from pytracking import dcf, TensorList from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_tensor, plot_graph from pytracking.features.preprocessing import sample_patch_multiscale, sample_patch_transformed from pytracking.features import augmentation import ltr.data.bounding_box_utils as bbutils from ltr.models.target_classifier.initializer import FilterInitializerZero from ltr.models.layers import activation class DiMP(BaseTracker): multiobj_mode = 'parallel' def initialize_features(self): if not getattr(self, 'features_initialized', False): self.params.net.initialize() self.features_initialized = True def initialize(self, image, info: dict) -> dict: # Initialize some stuff self.frame_num = 1 if not self.params.has('device'): self.params.device = 'cuda' if self.params.use_gpu else 'cpu' # Initialize network self.initialize_features() # The DiMP network self.net = self.params.net # Time initialization tic = time.time() # Convert image im = numpy_to_torch(image) # Get target position and size state = info['init_bbox'] self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2]) self.target_sz = torch.Tensor([state[3], state[2]]) # Get object id self.object_id = info.get('object_ids', [None])[0] self.id_str = '' if self.object_id is None else ' {}'.format(self.object_id) # Set sizes self.image_sz = torch.Tensor([im.shape[2], im.shape[3]]) sz = self.params.image_sample_size sz = torch.Tensor([sz, sz] if isinstance(sz, int) else sz) if self.params.get('use_image_aspect_ratio', False): sz = self.image_sz * sz.prod().sqrt() / self.image_sz.prod().sqrt() stride = self.params.get('feature_stride', 32) sz = torch.round(sz / stride) * stride self.img_sample_sz = sz self.img_support_sz = self.img_sample_sz # Set search area search_area = torch.prod(self.target_sz * self.params.search_area_scale).item() self.target_scale = math.sqrt(search_area) / self.img_sample_sz.prod().sqrt() # Target size in base scale self.base_target_sz = self.target_sz / self.target_scale # Setup scale factors if not self.params.has('scale_factors'): self.params.scale_factors = torch.ones(1) elif isinstance(self.params.scale_factors, (list, tuple)): self.params.scale_factors = torch.Tensor(self.params.scale_factors) # Setup scale bounds self.min_scale_factor = torch.max(10 / self.base_target_sz) self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz) # Extract and transform sample init_backbone_feat = self.generate_init_samples(im) # Initialize classifier self.init_classifier(init_backbone_feat) # Initialize IoUNet if self.params.get('use_iou_net', True): self.init_iou_net(init_backbone_feat) out = {'time': time.time() - tic} return out def track(self, image, info: dict = None) -> dict: self.debug_info = {} self.frame_num += 1 self.debug_info['frame_num'] = self.frame_num # Convert image im = numpy_to_torch(image) # ------- LOCALIZATION ------- # # Extract backbone features backbone_feat, sample_coords, im_patches = self.extract_backbone_features(im, self.get_centered_sample_pos(), self.target_scale * self.params.scale_factors, self.img_sample_sz) # Extract classification features test_x = self.get_classification_features(backbone_feat) # Location of sample sample_pos, sample_scales = self.get_sample_location(sample_coords) # Compute classification scores scores_raw = self.classify_target(test_x) # Localize the target translation_vec, scale_ind, s, flag = self.localize_target(scores_raw, sample_pos, sample_scales) new_pos = sample_pos[scale_ind,:] + translation_vec # Update position and scale if flag != 'not_found': if self.params.get('use_iou_net', True): update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain' if self.params.get('use_classifier', True): self.update_state(new_pos) self.refine_target_box(backbone_feat, sample_pos[scale_ind,:], sample_scales[scale_ind], scale_ind, update_scale_flag) elif self.params.get('use_classifier', True): self.update_state(new_pos, sample_scales[scale_ind]) # ------- UPDATE ------- # update_flag = flag not in ['not_found', 'uncertain'] hard_negative = (flag == 'hard_negative') learning_rate = self.params.get('hard_negative_learning_rate', None) if hard_negative else None if update_flag and self.params.get('update_classifier', False): # Get train sample train_x = test_x[scale_ind:scale_ind+1, ...] # Create target_box and label for spatial sample target_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos[scale_ind,:], sample_scales[scale_ind]) # Update the classifier model self.update_classifier(train_x, target_box, learning_rate, s[scale_ind,...]) # Set the pos of the tracker to iounet pos if self.params.get('use_iou_net', True) and flag != 'not_found' and hasattr(self, 'pos_iounet'): self.pos = self.pos_iounet.clone() score_map = s[scale_ind, ...] max_score = torch.max(score_map).item() # Visualize and set debug info self.search_area_box = torch.cat((sample_coords[scale_ind,[1,0]], sample_coords[scale_ind,[3,2]] - sample_coords[scale_ind,[1,0]] - 1)) self.debug_info['flag' + self.id_str] = flag self.debug_info['max_score' + self.id_str] = max_score if self.visdom is not None: self.visdom.register(score_map, 'heatmap', 2, 'Score Map' + self.id_str) self.visdom.register(self.debug_info, 'info_dict', 1, 'Status') elif self.params.debug >= 2: show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score)) # Compute output bounding box new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]])) if self.params.get('output_not_found_box', False) and flag == 'not_found': output_state = [-1, -1, -1, -1] else: output_state = new_state.tolist() '''2020.4.26 ''' out = {'target_bbox': output_state, 'dcf_center':new_pos[[1,0]]} return out def get_sample_location(self, sample_coord): """Get the location of the extracted sample.""" sample_coord = sample_coord.float() sample_pos = 0.5*(sample_coord[:,:2] + sample_coord[:,2:] - 1) sample_scales = ((sample_coord[:,2:] - sample_coord[:,:2]) / self.img_sample_sz).prod(dim=1).sqrt() return sample_pos, sample_scales def get_centered_sample_pos(self): """Get the center position for the new sample. Make sure the target is correctly centered.""" return self.pos + ((self.feature_sz + self.kernel_size) % 2) * self.target_scale * \ self.img_support_sz / (2*self.feature_sz) def classify_target(self, sample_x: TensorList): """Classify target by applying the DiMP filter.""" with torch.no_grad(): scores = self.net.classifier.classify(self.target_filter, sample_x) return scores def localize_target(self, scores, sample_pos, sample_scales): """Run the target localization.""" scores = scores.squeeze(1) preprocess_method = self.params.get('score_preprocess', 'none') if preprocess_method == 'none': pass elif preprocess_method == 'exp': scores = scores.exp() elif preprocess_method == 'softmax': reg_val = getattr(self.net.classifier.filter_optimizer, 'softmax_reg', None) scores_view = scores.view(scores.shape[0], -1) scores_softmax = activation.softmax_reg(scores_view, dim=-1, reg=reg_val) scores = scores_softmax.view(scores.shape) else: raise Exception('Unknown score_preprocess in params.') score_filter_ksz = self.params.get('score_filter_ksz', 1) if score_filter_ksz > 1: assert score_filter_ksz % 2 == 1 kernel = scores.new_ones(1,1,score_filter_ksz,score_filter_ksz) scores = F.conv2d(scores.view(-1,1,*scores.shape[-2:]), kernel, padding=score_filter_ksz//2).view(scores.shape) if self.params.get('advanced_localization', False): return self.localize_advanced(scores, sample_pos, sample_scales) # Get maximum score_sz = torch.Tensor(list(scores.shape[-2:])) score_center = (score_sz - 1)/2 max_score, max_disp = dcf.max2d(scores) _, scale_ind = torch.max(max_score, dim=0) max_disp = max_disp[scale_ind,...].float().cpu().view(-1) target_disp = max_disp - score_center # Compute translation vector and scale change factor output_sz = score_sz - (self.kernel_size + 1) % 2 translation_vec = target_disp * (self.img_support_sz / output_sz) * sample_scales[scale_ind] return translation_vec, scale_ind, scores, None def localize_advanced(self, scores, sample_pos, sample_scales): """Run the target advanced localization (as in ATOM).""" sz = scores.shape[-2:] score_sz = torch.Tensor(list(sz)) output_sz = score_sz - (self.kernel_size + 1) % 2 score_center = (score_sz - 1)/2 scores_hn = scores if self.output_window is not None and self.params.get('perform_hn_without_windowing', False): scores_hn = scores.clone() scores *= self.output_window max_score1, max_disp1 = dcf.max2d(scores) _, scale_ind = torch.max(max_score1, dim=0) sample_scale = sample_scales[scale_ind] max_score1 = max_score1[scale_ind] max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1) target_disp1 = max_disp1 - score_center translation_vec1 = target_disp1 * (self.img_support_sz / output_sz) * sample_scale if max_score1.item() < self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores_hn, 'not_found' if max_score1.item() < self.params.get('uncertain_threshold', -float('inf')): return translation_vec1, scale_ind, scores_hn, 'uncertain' if max_score1.item() < self.params.get('hard_sample_threshold', -float('inf')): return translation_vec1, scale_ind, scores_hn, 'hard_negative' # Mask out target neighborhood target_neigh_sz = self.params.target_neighborhood_scale * (self.target_sz / sample_scale) * (output_sz / self.img_support_sz) tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0) tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0]) tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0) tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1]) scores_masked = scores_hn[scale_ind:scale_ind + 1, ...].clone() scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0 # Find new maximum max_score2, max_disp2 = dcf.max2d(scores_masked) max_disp2 = max_disp2.float().cpu().view(-1) target_disp2 = max_disp2 - score_center translation_vec2 = target_disp2 * (self.img_support_sz / output_sz) * sample_scale prev_target_vec = (self.pos - sample_pos[scale_ind,:]) / ((self.img_support_sz / output_sz) * sample_scale) # Handle the different cases if max_score2 > self.params.distractor_threshold * max_score1: disp_norm1 = torch.sqrt(torch.sum((target_disp1-prev_target_vec)**2)) disp_norm2 = torch.sqrt(torch.sum((target_disp2-prev_target_vec)**2)) disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2 if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold: return translation_vec1, scale_ind, scores_hn, 'hard_negative' if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold: return translation_vec2, scale_ind, scores_hn, 'hard_negative' if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold: return translation_vec1, scale_ind, scores_hn, 'uncertain' # If also the distractor is close, return with highest score return translation_vec1, scale_ind, scores_hn, 'uncertain' if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores_hn, 'hard_negative' return translation_vec1, scale_ind, scores_hn, 'normal' def extract_backbone_features(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor): im_patches, patch_coords = sample_patch_multiscale(im, pos, scales, sz, mode=self.params.get('border_mode', 'replicate'), max_scale_change=self.params.get('patch_max_scale_change', None)) with torch.no_grad(): backbone_feat = self.net.extract_backbone(im_patches) return backbone_feat, patch_coords, im_patches def get_classification_features(self, backbone_feat): with torch.no_grad(): return self.net.extract_classification_feat(backbone_feat) def get_iou_backbone_features(self, backbone_feat): return self.net.get_backbone_bbreg_feat(backbone_feat) def get_iou_features(self, backbone_feat): with torch.no_grad(): return self.net.bb_regressor.get_iou_feat(self.get_iou_backbone_features(backbone_feat)) def get_iou_modulation(self, iou_backbone_feat, target_boxes): with torch.no_grad(): return self.net.bb_regressor.get_modulation(iou_backbone_feat, target_boxes) def generate_init_samples(self, im: torch.Tensor) -> TensorList: """Perform data augmentation to generate initial training samples.""" mode = self.params.get('border_mode', 'replicate') if mode == 'inside': # Get new sample size if forced inside the image im_sz = torch.Tensor([im.shape[2], im.shape[3]]) sample_sz = self.target_scale * self.img_sample_sz shrink_factor = (sample_sz.float() / im_sz) if mode == 'inside': shrink_factor = shrink_factor.max() elif mode == 'inside_major': shrink_factor = shrink_factor.min() shrink_factor.clamp_(min=1, max=self.params.get('patch_max_scale_change', None)) sample_sz = (sample_sz.float() / shrink_factor) self.init_sample_scale = (sample_sz / self.img_sample_sz).prod().sqrt() tl = self.pos - (sample_sz - 1) / 2 br = self.pos + sample_sz / 2 + 1 global_shift = - ((-tl).clamp(0) - (br - im_sz).clamp(0)) / self.init_sample_scale else: self.init_sample_scale = self.target_scale global_shift = torch.zeros(2) self.init_sample_pos = self.pos.round() # Compute augmentation size aug_expansion_factor = self.params.get('augmentation_expansion_factor', None) aug_expansion_sz = self.img_sample_sz.clone() aug_output_sz = None if aug_expansion_factor is not None and aug_expansion_factor != 1: aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long() aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2 aug_expansion_sz = aug_expansion_sz.float() aug_output_sz = self.img_sample_sz.long().tolist() # Random shift for each sample get_rand_shift = lambda: None random_shift_factor = self.params.get('random_shift_factor', 0) if random_shift_factor > 0: get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor + global_shift).long().tolist() # Always put identity transformation first, since it is the unaugmented sample that is always used self.transforms = [augmentation.Identity(aug_output_sz, global_shift.long().tolist())] augs = self.params.augmentation if self.params.get('use_augmentation', True) else {} # Add all augmentations if 'shift' in augs: self.transforms.extend([augmentation.Translation(shift, aug_output_sz, global_shift.long().tolist()) for shift in augs['shift']]) if 'relativeshift' in augs: get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist() self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz, global_shift.long().tolist()) for shift in augs['relativeshift']]) if 'fliplr' in augs and augs['fliplr']: self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift())) if 'blur' in augs: self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in augs['blur']]) if 'scale' in augs: self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in augs['scale']]) if 'rotate' in augs: self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in augs['rotate']]) # Extract augmented image patches im_patches = sample_patch_transformed(im, self.init_sample_pos, self.init_sample_scale, aug_expansion_sz, self.transforms) # Extract initial backbone features with torch.no_grad(): init_backbone_feat = self.net.extract_backbone(im_patches) return init_backbone_feat def init_target_boxes(self): """Get the target bounding boxes for the initial augmented samples.""" self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale) init_target_boxes = TensorList() for T in self.transforms: init_target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0])) init_target_boxes = torch.cat(init_target_boxes.view(1, 4), 0).to(self.params.device) self.target_boxes = init_target_boxes.new_zeros(self.params.sample_memory_size, 4) self.target_boxes[:init_target_boxes.shape[0],:] = init_target_boxes return init_target_boxes def init_memory(self, train_x: TensorList): # Initialize first-frame spatial training samples self.num_init_samples = train_x.size(0) init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x]) # Sample counters and weights for spatial self.num_stored_samples = self.num_init_samples.copy() self.previous_replace_ind = [None] * len(self.num_stored_samples) self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x]) for sw, init_sw, num in zip(self.sample_weights, init_sample_weights, self.num_init_samples): sw[:num] = init_sw # Initialize memory self.training_samples = TensorList( [x.new_zeros(self.params.sample_memory_size, x.shape[1], x.shape[2], x.shape[3]) for x in train_x]) for ts, x in zip(self.training_samples, train_x): ts[:x.shape[0],...] = x def update_memory(self, sample_x: TensorList, target_box, learning_rate = None): # Update weights and get replace ind replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, learning_rate) self.previous_replace_ind = replace_ind # Update sample and label memory for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind): train_samp[ind:ind+1,...] = x # Update bb memory self.target_boxes[replace_ind[0],:] = target_box self.num_stored_samples += 1 def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, learning_rate = None): # Update weights and get index to replace replace_ind = [] for sw, prev_ind, num_samp, num_init in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples): lr = learning_rate if lr is None: lr = self.params.learning_rate init_samp_weight = self.params.get('init_samples_minimum_weight', None) if init_samp_weight == 0: init_samp_weight = None s_ind = 0 if init_samp_weight is None else num_init if num_samp == 0 or lr == 1: sw[:] = 0 sw[0] = 1 r_ind = 0 else: # Get index to replace if num_samp < sw.shape[0]: r_ind = num_samp else: _, r_ind = torch.min(sw[s_ind:], 0) r_ind = r_ind.item() + s_ind # Update weights if prev_ind is None: sw /= 1 - lr sw[r_ind] = lr else: sw[r_ind] = sw[prev_ind] / (1 - lr) sw /= sw.sum() if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight: sw /= init_samp_weight + sw[num_init:].sum() sw[:num_init] = init_samp_weight / num_init replace_ind.append(r_ind) return replace_ind def update_state(self, new_pos, new_scale = None): # Update scale if new_scale is not None: self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor) self.target_sz = self.base_target_sz * self.target_scale # Update pos inside_ratio = self.params.get('target_inside_ratio', 0.2) inside_offset = (inside_ratio - 0.5) * self.target_sz self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset) def get_iounet_box(self, pos, sz, sample_pos, sample_scale): """All inputs in original image coordinates. Generates a box in the cropped image sample reference frame, in the format used by the IoUNet.""" box_center = (pos - sample_pos) / sample_scale + (self.img_sample_sz - 1) / 2 box_sz = sz / sample_scale target_ul = box_center - (box_sz - 1) / 2 return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))]) def init_iou_net(self, backbone_feat): # Setup IoU net and objective for p in self.net.bb_regressor.parameters(): p.requires_grad = False # Get target boxes for the different augmentations self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale) target_boxes = TensorList() if self.params.iounet_augmentation: for T in self.transforms: if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)): break target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0])) else: target_boxes.append(self.classifier_target_box + torch.Tensor([self.transforms[0].shift[1], self.transforms[0].shift[0], 0, 0])) target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device) # Get iou features iou_backbone_feat = self.get_iou_backbone_features(backbone_feat) # Remove other augmentations such as rotation iou_backbone_feat = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_feat]) # Get modulation vector self.iou_modulation = self.get_iou_modulation(iou_backbone_feat, target_boxes) if torch.is_tensor(self.iou_modulation[0]): self.iou_modulation = TensorList([x.detach().mean(0) for x in self.iou_modulation]) def init_classifier(self, init_backbone_feat): # Get classification features x = self.get_classification_features(init_backbone_feat) # Overwrite some parameters in the classifier. (These are not generally changed) self._overwrite_classifier_params(feature_dim=x.shape[-3]) # Add the dropout augmentation here, since it requires extraction of the classification features if 'dropout' in self.params.augmentation and self.params.get('use_augmentation', True): num, prob = self.params.augmentation['dropout'] self.transforms.extend(self.transforms[:1]*num) x = torch.cat([x, F.dropout2d(x[0:1,...].expand(num,-1,-1,-1), p=prob, training=True)]) # Set feature size and other related sizes self.feature_sz = torch.Tensor(list(x.shape[-2:])) ksz = self.net.classifier.filter_size self.kernel_size = torch.Tensor([ksz, ksz] if isinstance(ksz, (int, float)) else ksz) self.output_sz = self.feature_sz + (self.kernel_size + 1)%2 # Construct output window self.output_window = None if self.params.get('window_output', False): if self.params.get('use_clipped_window', False): self.output_window = dcf.hann2d_clipped(self.output_sz.long(), (self.output_sz*self.params.effective_search_area / self.params.search_area_scale).long(), centered=True).to(self.params.device) else: self.output_window = dcf.hann2d(self.output_sz.long(), centered=True).to(self.params.device) self.output_window = self.output_window.squeeze(0) # Get target boxes for the different augmentations target_boxes = self.init_target_boxes() # Set number of iterations plot_loss = self.params.debug > 0 num_iter = self.params.get('net_opt_iter', None) # Get target filter by running the discriminative model prediction module with torch.no_grad(): self.target_filter, _, losses = self.net.classifier.get_filter(x, target_boxes, num_iter=num_iter, compute_losses=plot_loss) # Init memory if self.params.get('update_classifier', True): self.init_memory(TensorList([x])) if plot_loss: if isinstance(losses, dict): losses = losses['train'] self.losses = torch.cat(losses) if self.visdom is not None: self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str) elif self.params.debug >= 3: plot_graph(self.losses, 10, title='Training Loss' + self.id_str) def _overwrite_classifier_params(self, feature_dim): # Overwrite some parameters in the classifier. (These are not generally changed) pred_module = getattr(self.net.classifier.filter_optimizer, 'score_predictor', self.net.classifier.filter_optimizer) if self.params.get('label_threshold', None) is not None: self.net.classifier.filter_optimizer.label_threshold = self.params.label_threshold if self.params.get('label_shrink', None) is not None: self.net.classifier.filter_optimizer.label_shrink = self.params.label_shrink if self.params.get('softmax_reg', None) is not None: self.net.classifier.filter_optimizer.softmax_reg = self.params.softmax_reg if self.params.get('filter_reg', None) is not None: pred_module.filter_reg[0] = self.params.filter_reg pred_module.min_filter_reg = self.params.filter_reg if self.params.get('filter_init_zero', False): self.net.classifier.filter_initializer = FilterInitializerZero(self.net.classifier.filter_size, feature_dim) def update_classifier(self, train_x, target_box, learning_rate=None, scores=None): # Set flags and learning rate hard_negative_flag = learning_rate is not None if learning_rate is None: learning_rate = self.params.learning_rate # Update the tracker memory if hard_negative_flag or self.frame_num % self.params.get('train_sample_interval', 1) == 0: self.update_memory(TensorList([train_x]), target_box, learning_rate) # Decide the number of iterations to run num_iter = 0 low_score_th = self.params.get('low_score_opt_threshold', None) if hard_negative_flag: num_iter = self.params.get('net_opt_hn_iter', None) elif low_score_th is not None and low_score_th > scores.max().item(): num_iter = self.params.get('net_opt_low_iter', None) elif (self.frame_num - 1) % self.params.train_skipping == 0: num_iter = self.params.get('net_opt_update_iter', None) plot_loss = self.params.debug > 0 if num_iter > 0: # Get inputs for the DiMP filter optimizer module samples = self.training_samples[0][:self.num_stored_samples[0],...] target_boxes = self.target_boxes[:self.num_stored_samples[0],:].clone() sample_weights = self.sample_weights[0][:self.num_stored_samples[0]] # Run the filter optimizer module with torch.no_grad(): self.target_filter, _, losses = self.net.classifier.filter_optimizer(self.target_filter, num_iter=num_iter, feat=samples, bb=target_boxes, sample_weight=sample_weights, compute_losses=plot_loss) if plot_loss: if isinstance(losses, dict): losses = losses['train'] self.losses = torch.cat((self.losses, torch.cat(losses))) if self.visdom is not None: self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str) elif self.params.debug >= 3: plot_graph(self.losses, 10, title='Training Loss' + self.id_str) def refine_target_box(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True): """Run the ATOM IoUNet to refine the target bounding box.""" if hasattr(self.net.bb_regressor, 'predict_bb'): return self.direct_box_regression(backbone_feat, sample_pos, sample_scale, scale_ind, update_scale) # Initial box for refinement init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale) # Extract features from the relevant scale iou_features = self.get_iou_features(backbone_feat) iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features]) # Generate random initial boxes init_boxes = init_box.view(1,4).clone() if self.params.num_init_random_boxes > 0: square_box_sz = init_box[2:].prod().sqrt() rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)]) minimal_edge_size = init_box[2:].min()/3 rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size) new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2] init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1) init_boxes = torch.cat([init_box.view(1,4), init_boxes]) # Optimize the boxes output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes) # Remove weird boxes output_boxes[:, 2:].clamp_(1) aspect_ratio = output_boxes[:,2] / output_boxes[:,3] keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio) output_boxes = output_boxes[keep_ind,:] output_iou = output_iou[keep_ind] # If no box found if output_boxes.shape[0] == 0: return # Predict box k = self.params.get('iounet_k', 5) topk = min(k, output_boxes.shape[0]) _, inds = torch.topk(output_iou, topk) predicted_box = output_boxes[inds, :].mean(0) predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0) # Get new position and size new_pos = predicted_box[:2] + predicted_box[2:] / 2 new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos new_target_sz = predicted_box[2:].flip((0,)) * sample_scale new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod()) self.pos_iounet = new_pos.clone() if self.params.get('use_iounet_pos_for_learning', True): self.pos = new_pos.clone() self.target_sz = new_target_sz if update_scale: self.target_scale = new_scale # self.visualize_iou_pred(iou_features, predicted_box) def optimize_boxes(self, iou_features, init_boxes): box_refinement_space = self.params.get('box_refinement_space', 'default') if box_refinement_space == 'default': return self.optimize_boxes_default(iou_features, init_boxes) if box_refinement_space == 'relative': return self.optimize_boxes_relative(iou_features, init_boxes) raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space)) def optimize_boxes_default(self, iou_features, init_boxes): """Optimize iounet boxes with the default parametrization""" output_boxes = init_boxes.view(1, -1, 4).to(self.params.device) step_length = self.params.box_refinement_step_length if isinstance(step_length, (tuple, list)): step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]], device=self.params.device).view(1,1,4) for i_ in range(self.params.box_refinement_iter): # forward pass bb_init = output_boxes.clone().detach() bb_init.requires_grad = True outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient = torch.ones_like(outputs)) # Update proposal output_boxes = bb_init + step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2) output_boxes.detach_() step_length *= self.params.box_refinement_step_decay return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu() def optimize_boxes_relative(self, iou_features, init_boxes): """Optimize iounet boxes with the relative parametrization ised in PrDiMP""" output_boxes = init_boxes.view(1, -1, 4).to(self.params.device) step_length = self.params.box_refinement_step_length if isinstance(step_length, (tuple, list)): step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(self.params.device).view(1,1,4) sz_norm = output_boxes[:,:1,2:].clone() output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm) for i_ in range(self.params.box_refinement_iter): # forward pass bb_init_rel = output_boxes_rel.clone().detach() bb_init_rel.requires_grad = True bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm) outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient = torch.ones_like(outputs)) # Update proposal output_boxes_rel = bb_init_rel + step_length * bb_init_rel.grad output_boxes_rel.detach_() step_length *= self.params.box_refinement_step_decay # for s in outputs.view(-1): # print('{:.2f} '.format(s.item()), end='') # print('') # print('') output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm) return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu() def direct_box_regression(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True): """Implementation of direct bounding box regression.""" # Initial box for refinement init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale) # Extract features from the relevant scale iou_features = self.get_iou_features(backbone_feat) iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features]) # Generate random initial boxes init_boxes = init_box.view(1, 1, 4).clone().to(self.params.device) # Optimize the boxes output_boxes = self.net.bb_regressor.predict_bb(self.iou_modulation, iou_features, init_boxes).view(-1,4).cpu() # Remove weird boxes output_boxes[:, 2:].clamp_(1) predicted_box = output_boxes[0, :] # Get new position and size new_pos = predicted_box[:2] + predicted_box[2:] / 2 new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos new_target_sz = predicted_box[2:].flip((0,)) * sample_scale new_scale_bbr = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod()) new_scale = new_scale_bbr self.pos_iounet = new_pos.clone() if self.params.get('use_iounet_pos_for_learning', True): self.pos = new_pos.clone() self.target_sz = new_target_sz if update_scale: self.target_scale = new_scale def visualize_iou_pred(self, iou_features, center_box): center_box = center_box.view(1,1,4) sz_norm = center_box[...,2:].clone() center_box_rel = bbutils.rect_to_rel(center_box, sz_norm) pos_dist = 1.0 sz_dist = math.log(3.0) pos_step = 0.01 sz_step = 0.01 pos_scale = torch.arange(-pos_dist, pos_dist+pos_step, step=pos_step) sz_scale = torch.arange(-sz_dist, sz_dist+sz_step, step=sz_step) bbx = torch.zeros(1, pos_scale.numel(), 4) bbx[0,:,0] = pos_scale.clone() bby = torch.zeros(pos_scale.numel(), 1, 4) bby[:,0,1] = pos_scale.clone() bbw = torch.zeros(1, sz_scale.numel(), 4) bbw[0,:,2] = sz_scale.clone() bbh = torch.zeros(sz_scale.numel(), 1, 4) bbh[:,0,3] = sz_scale.clone() pos_boxes = bbutils.rel_to_rect((center_box_rel + bbx) + bby, sz_norm).view(1,-1,4).to(self.params.device) sz_boxes = bbutils.rel_to_rect((center_box_rel + bbw) + bbh, sz_norm).view(1,-1,4).to(self.params.device) pos_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, pos_boxes).exp() sz_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, sz_boxes).exp() show_tensor(pos_scores.view(pos_scale.numel(),-1), title='Position scores', fig_num=21) show_tensor(sz_scores.view(sz_scale.numel(),-1), title='Size scores', fig_num=22) def visdom_draw_tracking(self, image, box, segmentation=None): if hasattr(self, 'search_area_box'): self.visdom.register((image, box, self.search_area_box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, box), 'Tracking', 1, 'Tracking') ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/eco/__init__.py ================================================ from .eco import ECO def get_tracker_class(): return ECO ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/eco/eco.py ================================================ from pytracking.tracker.base import BaseTracker import torch import torch.nn.functional as F import math from pytracking import complex, dcf, fourier, TensorList from pytracking.libs.tensorlist import tensor_operation from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_tensor from pytracking.libs.optimization import GaussNewtonCG from .optim import FilterOptim, FactorizedConvProblem from pytracking.features import augmentation class ECO(BaseTracker): multiobj_mode = 'parallel' def initialize_features(self): if not getattr(self, 'features_initialized', False): self.params.features.initialize() self.features_initialized = True def initialize(self, image, info: dict) -> dict: state = info['init_bbox'] # Initialize some stuff self.frame_num = 1 if not self.params.has('device'): self.params.device = 'cuda' if self.params.use_gpu else 'cpu' # Initialize features self.initialize_features() # Chack if image is color self.params.features.set_is_color(image.shape[2] == 3) # Get feature specific params self.fparams = self.params.features.get_fparams('feature_params') # Get position and size self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2]) self.target_sz = torch.Tensor([state[3], state[2]]) # Set search area self.target_scale = 1.0 search_area = torch.prod(self.target_sz * self.params.search_area_scale).item() if search_area > self.params.max_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.max_image_sample_size) elif search_area < self.params.min_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.min_image_sample_size) # Target size in base scale self.base_target_sz = self.target_sz / self.target_scale # Use odd square search area and set sizes feat_max_stride = max(self.params.features.stride()) self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2) self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride) # Set other sizes (corresponds to ECO code) self.img_support_sz = self.img_sample_sz self.feature_sz = self.params.features.size(self.img_sample_sz) self.filter_sz = self.feature_sz + (self.feature_sz + 1) % 2 self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output self.compressed_dim = self.fparams.attribute('compressed_dim') # Number of filters self.num_filters = len(self.filter_sz) # Get window function self.window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz]) # Get interpolation function self.interp_fs = TensorList([dcf.get_interp_fourier(sz, self.params.interpolation_method, self.params.interpolation_bicubic_a, self.params.interpolation_centering, self.params.interpolation_windowing, self.params.device) for sz in self.filter_sz]) # Get regularization filter self.reg_filter = TensorList([dcf.get_reg_filter(self.img_support_sz, self.base_target_sz, fparams).to(self.params.device) for fparams in self.fparams]) self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1) # Get label function output_sigma_factor = self.fparams.attribute('output_sigma_factor') sigma = (self.filter_sz / self.img_support_sz) * torch.sqrt(self.base_target_sz.prod()) * output_sigma_factor self.yf = TensorList([dcf.label_function(sz, sig).to(self.params.device) for sz, sig in zip(self.filter_sz, sigma)]) # Optimization options self.params.precond_learning_rate = self.fparams.attribute('learning_rate') if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1: self.params.direction_forget_factor = 0 else: self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate # Convert image im = numpy_to_torch(image) # Setup bounds self.image_sz = torch.Tensor([im.shape[2], im.shape[3]]) self.min_scale_factor = torch.max(10 / self.base_target_sz) self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz) # Extract and transform sample x = self.generate_init_samples(im) # Initialize projection matrix x_mat = TensorList([e.permute(1,0,2,3).reshape(e.shape[1], -1).clone() for e in x]) x_mat -= x_mat.mean(dim=1, keepdim=True) cov_x = x_mat @ x_mat.t() self.projection_matrix = TensorList([torch.svd(C)[0][:,:cdim].clone() for C, cdim in zip(cov_x, self.compressed_dim)]) # Transform to get the training sample train_xf = self.preprocess_sample(x) # Shift the samples back if 'shift' in self.params.augmentation: for xf in train_xf: if xf.shape[0] == 1: continue for i, shift in enumerate(self.params.augmentation['shift']): shift_samp = 2 * math.pi * torch.Tensor(shift) / self.img_support_sz xf[1+i:2+i,...] = fourier.shift_fs(xf[1+i:2+i,...], shift=shift_samp) # Shift sample shift_samp = 2*math.pi * (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz) train_xf = fourier.shift_fs(train_xf, shift=shift_samp) # Initialize first-frame training samples num_init_samples = train_xf.size(0) self.init_sample_weights = TensorList([xf.new_ones(1) / xf.shape[0] for xf in train_xf]) self.init_training_samples = train_xf.permute(2, 3, 0, 1, 4) # Sample counters and weights self.num_stored_samples = num_init_samples self.previous_replace_ind = [None]*len(self.num_stored_samples) self.sample_weights = TensorList([xf.new_zeros(self.params.sample_memory_size) for xf in train_xf]) for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, num_init_samples): sw[:num] = init_sw # Initialize memory self.training_samples = TensorList( [xf.new_zeros(xf.shape[2], xf.shape[3], self.params.sample_memory_size, cdim, 2) for xf, cdim in zip(train_xf, self.compressed_dim)]) # Initialize filter self.filter = TensorList( [xf.new_zeros(1, cdim, xf.shape[2], xf.shape[3], 2) for xf, cdim in zip(train_xf, self.compressed_dim)]) # Do joint optimization self.joint_problem = FactorizedConvProblem(self.init_training_samples, self.yf, self.reg_filter, self.projection_matrix, self.params, self.init_sample_weights) joint_var = self.filter.concat(self.projection_matrix) self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug>=1), visdom=self.visdom) if self.params.update_projection_matrix: self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter) # Re-project samples with the new projection matrix compressed_samples = complex.mtimes(self.init_training_samples, self.projection_matrix) for train_samp, init_samp in zip(self.training_samples, compressed_samples): train_samp[:,:,:init_samp.shape[2],:,:] = init_samp # Initialize optimizer self.filter_optimizer = FilterOptim(self.params, self.reg_energy) self.filter_optimizer.register(self.filter, self.training_samples, self.yf, self.sample_weights, self.reg_filter) self.filter_optimizer.sample_energy = self.joint_problem.sample_energy self.filter_optimizer.residuals = self.joint_optimizer.residuals.clone() if not self.params.update_projection_matrix: self.filter_optimizer.run(self.params.init_CG_iter) # Post optimization self.filter_optimizer.run(self.params.post_init_CG_iter) self.symmetrize_filter() def track(self, image, info: dict = None) -> dict: self.debug_info = {} self.frame_num += 1 self.debug_info['frame_num'] = self.frame_num # Convert image im = numpy_to_torch(image) # ------- LOCALIZATION ------- # # Get sample sample_pos = self.pos.round() sample_scales = self.target_scale * self.params.scale_factors test_xf = self.extract_fourier_sample(im, self.pos, sample_scales, self.img_sample_sz) # Compute scores sf = self.apply_filter(test_xf) translation_vec, scale_ind, s = self.localize_target(sf) scale_change_factor = self.params.scale_factors[scale_ind] # Update position and scale self.update_state(sample_pos + translation_vec, self.target_scale * scale_change_factor) score_map = s[scale_ind, ...] max_score = torch.max(score_map).item() self.debug_info['max_score'] = max_score if self.visdom is not None: self.visdom.register(score_map, 'heatmap', 2, 'Score Map') self.visdom.register(self.debug_info, 'info_dict', 1, 'Status') elif self.params.debug >= 2: show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score)) # if self.params.debug >= 3: # for i, hf in enumerate(self.filter): # show_tensor(fourier.sample_fs(hf).abs().mean(1), 6+i) # ------- UPDATE ------- # # Get train sample train_xf = TensorList([xf[scale_ind:scale_ind+1, ...] for xf in test_xf]) # Shift the sample shift_samp = 2*math.pi * (self.pos - sample_pos) / (sample_scales[scale_ind] * self.img_support_sz) train_xf = fourier.shift_fs(train_xf, shift=shift_samp) # Update memory self.update_memory(train_xf) # Train filter if self.frame_num % self.params.train_skipping == 1: self.filter_optimizer.run(self.params.CG_iter, train_xf) self.symmetrize_filter() # Return new state new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]])) out = {'target_bbox': new_state.tolist()} return out def apply_filter(self, sample_xf: TensorList) -> torch.Tensor: return complex.mult(self.filter, sample_xf).sum(1, keepdim=True) def localize_target(self, sf: TensorList): if self.params.score_fusion_strategy == 'sum': scores = fourier.sample_fs(fourier.sum_fs(sf), self.output_sz) elif self.params.score_fusion_strategy == 'weightedsum': weight = self.fparams.attribute('translation_weight') scores = fourier.sample_fs(fourier.sum_fs(weight * sf), self.output_sz) elif self.params.score_fusion_strategy == 'transcale': alpha = self.fparams.attribute('scale_weight') beta = self.fparams.attribute('translation_weight') sample_sz = torch.round(self.output_sz.view(1,-1) * self.params.scale_factors.view(-1,1)) scores = 0 for sfe, a, b in zip(sf, alpha, beta): sfe = fourier.shift_fs(sfe, math.pi*torch.ones(2)) scores_scales = [] for sind, sz in enumerate(sample_sz): pd = (self.output_sz-sz)/2 scores_scales.append(F.pad(fourier.sample_fs(sfe[sind:sind+1,...], sz), (math.floor(pd[1].item()), math.ceil(pd[1].item()), math.floor(pd[0].item()), math.ceil(pd[0].item())))) scores_cat = torch.cat(scores_scales) scores = scores + (b - a) * scores_cat.mean(dim=0, keepdim=True) + a * scores_cat else: raise ValueError('Unknown score fusion strategy.') # Get maximum max_score, max_disp = dcf.max2d(scores) _, scale_ind = torch.max(max_score, dim=0) max_disp = max_disp.float().cpu() # Convert to displacements in the base scale if self.params.score_fusion_strategy in ['sum', 'weightedsum']: disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2 elif self.params.score_fusion_strategy == 'transcale': disp = max_disp - self.output_sz / 2 # Compute translation vector and scale change factor translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale if self.params.score_fusion_strategy in ['sum', 'weightedsum']: translation_vec *= self.params.scale_factors[scale_ind] return translation_vec, scale_ind, scores def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor): return self.params.features.extract(im, pos, scales, sz)[0] def extract_fourier_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> TensorList: x = self.extract_sample(im, pos, scales, sz) return self.preprocess_sample(self.project_sample(x)) def preprocess_sample(self, x: TensorList) -> TensorList: x *= self.window sample_xf = fourier.cfft2(x) return TensorList([dcf.interpolate_dft(xf, bf) for xf, bf in zip(sample_xf, self.interp_fs)]) def project_sample(self, x: TensorList): @tensor_operation def _project_sample(x: torch.Tensor, P: torch.Tensor): if P is None: return x return torch.matmul(x.permute(2, 3, 0, 1), P).permute(2, 3, 0, 1) return _project_sample(x, self.projection_matrix) def generate_init_samples(self, im: torch.Tensor) -> TensorList: # Do data augmentation transforms = [augmentation.Identity()] if 'shift' in self.params.augmentation: transforms.extend([augmentation.Translation(shift) for shift in self.params.augmentation['shift']]) if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']: transforms.append(augmentation.FlipHorizontal()) if 'rotate' in self.params.augmentation: transforms.extend([augmentation.Rotate(angle) for angle in self.params.augmentation['rotate']]) if 'blur' in self.params.augmentation: transforms.extend([augmentation.Blur(sigma) for sigma in self.params.augmentation['blur']]) init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, self.img_sample_sz, transforms) # Remove augmented samples for those that shall not have for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if not use_aug: init_samples[i] = init_samples[i][0:1, ...] if 'dropout' in self.params.augmentation: num, prob = self.params.augmentation['dropout'] for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if use_aug: init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)]) return init_samples def update_memory(self, sample_xf: TensorList): # Update weights and get index to replace replace_ind = self.update_sample_weights() for train_samp, xf, ind in zip(self.training_samples, sample_xf, replace_ind): train_samp[:,:,ind:ind+1,:,:] = xf.permute(2, 3, 0, 1, 4) def update_sample_weights(self): replace_ind = [] for sw, prev_ind, num_samp, fparams in zip(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.fparams): if num_samp == 0 or fparams.learning_rate == 1: sw[:] = 0 sw[0] = 1 r_ind = 0 else: # Get index to replace _, r_ind = torch.min(sw, 0) r_ind = r_ind.item() # Update weights if prev_ind is None: sw /= 1 - fparams.learning_rate sw[r_ind] = fparams.learning_rate else: sw[r_ind] = sw[prev_ind] / (1 - fparams.learning_rate) sw /= sw.sum() replace_ind.append(r_ind) self.previous_replace_ind = replace_ind.copy() self.num_stored_samples += 1 return replace_ind def update_state(self, new_pos, new_scale): # Update scale self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor) self.target_sz = self.base_target_sz * self.target_scale # Update pos inside_ratio = 0.2 inside_offset = (inside_ratio - 0.5) * self.target_sz self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset) def symmetrize_filter(self): for hf in self.filter: hf[:,:,:,0,:] /= 2 hf[:,:,:,0,:] += complex.conj(hf[:,:,:,0,:].flip((2,))) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/tracker/eco/optim.py ================================================ import torch import torch.nn.functional as F from pytracking import complex, optimization, fourier, TensorList from pytracking.utils.plotting import plot_graph import math class FactorizedConvProblem(optimization.L2Problem): def __init__(self, training_samples: TensorList, yf:TensorList, reg_filter: torch.Tensor, init_proj_mat: TensorList, params, sample_weights: torch.Tensor = None): self.training_samples = training_samples self.yf = complex.complex(yf).permute(2, 3, 0, 1, 4) self.reg_filter = reg_filter self.sample_weights_sqrt = None if sample_weights is None else sample_weights.sqrt() self.params = params # Sample energy for preconditioner compressed_samples = complex.mtimes(self.training_samples, init_proj_mat) self.sample_energy = complex.abs_sqr(compressed_samples).mean(dim=2, keepdim=True).permute(2, 3, 0, 1) self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1) # Projection energy for preconditioner self.proj_energy = 2 * fourier.inner_prod_fs(yf, yf) / self.training_samples.size(3) # Filter part of preconditioner self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy + (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + \ self.params.precond_reg_param * self.reg_energy self.diag_M.unsqueeze_(-1) # Projection matrix part of preconditioner self.diag_M.extend(self.params.precond_proj_param * (self.proj_energy + self.params.projection_reg)) def __call__(self, x: TensorList): """ Compute residuals :param x: [filters, projection_matrices] :return: [data_terms, filter_regularizations, proj_mat_regularizations] """ hf = x[:len(x)//2] P = x[len(x)//2:] compressed_samples = complex.mtimes(self.training_samples, P) residuals = complex.mtimes(compressed_samples, hf.permute(2, 3, 1, 0, 4)) # (h, w, num_samp, num_filt, 2) residuals = residuals - self.yf if self.sample_weights_sqrt is not None: residuals = complex.mult(self.sample_weights_sqrt.view(1, 1, -1, 1), residuals) # Add spatial regularization for hfe, reg_filter in zip(hf, self.reg_filter): reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1) reg_pad2 = min(reg_filter.shape[-1] - 1, hfe.shape[-2] - 1) # Add part needed for convolution if reg_pad2 > 0: hfe_left_padd = complex.conj(hfe[...,1:reg_pad2+1,:].clone().detach().flip((2,3))) hfe_conv = torch.cat([hfe_left_padd, hfe], -2) else: hfe_conv = hfe.clone() # Shift data to batch dimension hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2]) # Do first convolution hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2)) residuals.append(hfe_conv) # Add regularization for projection matrix residuals.extend(math.sqrt(self.params.projection_reg) * P) return residuals def ip_input(self, a: TensorList, b: TensorList): num = len(a) // 2 # Number of filters a_filter = a[:num] b_filter = b[:num] a_P = a[num:] b_P = b[num:] # Filter inner product ip_out = fourier.inner_prod_fs(a_filter, b_filter) # Add projection matrix part ip_out += a_P.reshape(-1) @ b_P.reshape(-1) # Have independent inner products for each filter return ip_out.concat(ip_out.clone()) def ip_output(self, a: TensorList, b: TensorList): num = len(a) // 3 # Number of filters a_data = a[:num].permute(2,3,0,1,4) b_data = b[:num].permute(2,3,0,1,4) a_filt_reg = a[num:2*num] b_filt_reg = b[num:2*num] a_P_reg = a[2*num:] b_P_reg = b[2*num:] ip_data = sum(fourier.inner_prod_fs(a_data, b_data)) ip_filt_reg = ip_data.new_zeros(1) for ar, br, res_data, reg_filter in zip(a_filt_reg, b_filt_reg, a_data, self.reg_filter): reg_pad2 = min(reg_filter.shape[-1] - 1, res_data.shape[-2] - 1) arp = ar.reshape(1, -1, 2, ar.shape[2], ar.shape[3]).permute(0, 1, 3, 4, 2) brp = br.reshape(1, -1, 2, br.shape[2], br.shape[3]).permute(0, 1, 3, 4, 2) ip_filt_reg += fourier.inner_prod_fs(arp[:,:,:,2*reg_pad2:,:], brp[:,:,:,2*reg_pad2:,:]) ip_P_reg = sum(a_P_reg.view(-1) @ b_P_reg.view(-1)) return ip_data + ip_filt_reg + ip_P_reg def M1(self, x: TensorList): return x / self.diag_M class FilterOptim(optimization.ConjugateGradientBase): def __init__(self, params, reg_energy): super(FilterOptim, self).__init__(params.fletcher_reeves, params.standard_alpha, params.direction_forget_factor, (params.debug >= 3)) # Parameters self.params = params self.reg_energy = reg_energy self.sample_energy = None self.residuals = torch.zeros(0) def register(self, filter, training_samples, yf, sample_weights, reg_filter): self.filter = filter self.training_samples = training_samples # (h, w, num_samples, num_channels, 2) self.yf = yf self.sample_weights = sample_weights self.reg_filter = reg_filter def run(self, num_iter, new_xf: TensorList = None): if num_iter == 0: return if new_xf is not None: new_sample_energy = complex.abs_sqr(new_xf) if self.sample_energy is None: self.sample_energy = new_sample_energy else: self.sample_energy = (1 - self.params.precond_learning_rate) * self.sample_energy + self.params.precond_learning_rate * new_sample_energy # Compute right hand side self.b = complex.mtimes(self.sample_weights.view(1,1,1,-1), self.training_samples).permute(2,3,0,1,4) self.b = complex.mult_conj(self.yf, self.b) self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy + (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + self.params.precond_reg_param * self.reg_energy _, res = self.run_CG(num_iter, self.filter) if self.debug: self.residuals = torch.cat((self.residuals, res)) plot_graph(self.residuals, 9) def A(self, hf: TensorList): # Classify sh = complex.mtimes(self.training_samples, hf.permute(2,3,1,0,4)) # (h, w, num_samp, num_filt, 2) sh = complex.mult(self.sample_weights.view(1,1,-1,1), sh) # Multiply with transpose hf_out = complex.mtimes(sh.permute(0,1,3,2,4), self.training_samples, conj_b=True).permute(2,3,0,1,4) # Add regularization for hfe, hfe_out, reg_filter in zip(hf, hf_out, self.reg_filter): reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1) reg_pad2 = min(reg_filter.shape[-1] - 1, 2*hfe.shape[-2]- 2) # Add part needed for convolution if reg_pad2 > 0: hfe_conv = torch.cat([complex.conj(hfe[...,1:reg_pad2+1,:].flip((2,3))), hfe], -2) else: hfe_conv = hfe.clone() # Shift data to batch dimension hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2]) # Do first convolution hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2)) # Do second convolution remove_size = min(reg_pad2, hfe.shape[-2]-1) hfe_conv = F.conv2d(hfe_conv[...,remove_size:], reg_filter) # Reshape back and add hfe_out += hfe_conv.reshape(hfe.shape[0], hfe.shape[1], 2, hfe.shape[2], hfe.shape[3]).permute(0,1,3,4,2) return hf_out def ip(self, a: torch.Tensor, b: torch.Tensor): return fourier.inner_prod_fs(a, b) def M1(self, hf): return complex.div(hf, self.diag_M) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/util_scripts/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/util_scripts/download_results.py ================================================ import os import sys import gdown import re import shutil import argparse import tempfile env_path = os.path.join(os.path.dirname(__file__), '../..') if env_path not in sys.path: sys.path.append(env_path) from pytracking.evaluation.environment import env_settings results_link_dict = { "dimp": { "prdimp50_003.zip": "1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc", "prdimp50_002.zip": "1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz", "prdimp50_001.zip": "17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS", "prdimp50_000.zip": "1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6", "prdimp18_004.zip": "1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM", "prdimp18_003.zip": "1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO", "prdimp18_002.zip": "17lMllYhygCqgE81DoHX4BZar3xc3auzM", "prdimp18_001.zip": "1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G", "prdimp18_000.zip": "1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z", "prdimp50_004.zip": "1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO", "dimp50_004.zip": "1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa", "dimp50_000.zip": "1LCgf5sg453Z4bY37A_W5mbXeG68U1fET", "dimp18_000.zip": "17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g", "dimp18_001.zip": "1AsiliVgISyDTouYOQYVOXA0srj3YskhJ", "dimp50_got_001.zip": "1EE5FcPXqMBkv_0ghfzytCMmbKxWxy04p", "dimp18_002.zip": "1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y", "dimp50_got_002.zip": "1ALXzVkn58GZ1E0I22vrbXkEXwy5u0xOc", "dimp18_got_000.zip": "1BxowlgGEonnuaVXwiDwiYr7VV7BRWLvr", "dimp50_001.zip": "1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K", "dimp18_got_002.zip": "1awqXQnFRr5NwjLfI-Ngtt3zT7XmQIwzs", "dimp18_got_001.zip": "1rr2J6NuuYJ5E4wDUw-PrxaNKjIsfgAyk", "dimp50_got_000.zip": "1ruP8XJOu0woq-bvKdHJ9_Y9RceHDrDjm", "dimp18_004.zip": "1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg", "dimp18_003.zip": "1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8", "dimp50_003.zip": "1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy", "dimp50_002.zip": "1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4", }, "atom": { "default_004.zip": "1BapnQh_8iRM44DXj862eOZV4q8zQLdmT", "default_003.zip": "1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5", "default_got_000.zip": "1uJnC0PPQhavwRbAL7VQ2Zow8YdLVzeCb", "default_got_001.zip": "1YzJm0H31veDW-lMxwy8MYNpMULgsYHKf", "default_000.zip": "1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5", "default_002.zip": "1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC", "default_001.zip": "1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt", "default_got_002.zip": "1qGtArxdAy0uWSd-HqFT5zmXpR6TCm4Vc", }, } def _download_file(file_id, path): link = 'https://drive.google.com/uc?id=' + file_id gdown.download(link, path, quiet=True) def download_results(download_path, trackers='all'): """ Script to automatically download tracker results for PyTracking. args: download_path - Directory where the zipped results are downloaded trackers - Tracker results which are to be downloaded. If set to 'all', all available results are downloaded. If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded. Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are downloaded. The value can be set to either 'all', in which case all available results for the tracker are downloaded. Else the value should be a list of parameter file names. """ print('Using download path ''{}'''.format(download_path)) os.makedirs(download_path, exist_ok=True) if isinstance(trackers, str): if trackers == 'all': trackers = {k: 'all' for k in results_link_dict.keys()} elif trackers in results_link_dict: trackers = {trackers: 'all'} else: raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict') elif isinstance(trackers, dict): pass else: raise Exception('tracker_list must be set to ''all'', or be a dict') for trk, runfiles in trackers.items(): trk_path = os.path.join(download_path, trk) if not os.path.exists(trk_path): os.makedirs(trk_path) if runfiles == 'all': for params, fileid in results_link_dict[trk].items(): print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) elif isinstance(runfiles, (list, tuple)): for p in runfiles: for params, fileid in results_link_dict[trk].items(): if re.match(r'{}(|_(\d\d\d)).zip'.format(p), params) is not None: print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) else: raise Exception('tracker_list values must either be set to ''all'', or be a list of param names') def unpack_tracking_results(download_path, output_path=None): """ Unpacks zipped benchmark results. The directory 'download_path' should have the following structure - root - tracker1 - param1.zip - param2.zip . . - tracker2 - param1.zip - param2.zip . . args: download_path - Path to the directory where the zipped results are stored output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path by default """ if output_path is None: output_path = env_settings().results_path if not os.path.exists(output_path): os.makedirs(output_path) trackers = os.listdir(download_path) for t in trackers: runfiles = os.listdir(os.path.join(download_path, t)) for r in runfiles: save_path = os.path.join(output_path, t) if not os.path.exists(save_path): os.makedirs(save_path) shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip') def main(): parser = argparse.ArgumentParser(description='Download and unpack zipped results') parser.add_argument('--tracker', type=str, default='all', help='Name of tracker results to download, or ''all''.') parser.add_argument('--output_path', type=str, default=None, help='Path to the directory where the results will be unpacked.') parser.add_argument('--temp_download_path', type=str, default=None, help='Temporary path used for downloading the Zip files.') parser.add_argument('--download', type=bool, default=True, help='Whether to download results or unpack existing downloaded files.') args = parser.parse_args() download_path = args.temp_download_path if download_path is None: download_path = '{}/pytracking_results/'.format(tempfile.gettempdir()) if args.download: download_results(download_path, args.tracker) unpack_tracking_results(download_path, args.output_path) if __name__ == '__main__': main() ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/util_scripts/pack_got10k_results.py ================================================ import numpy as np import os import shutil from pytracking.evaluation.environment import env_settings def pack_got10k_results(tracker_name, param_name, output_name): """ Packs got10k results into a zip folder which can be directly uploaded to the evaluation server. The packed file is saved in the folder env_settings().got_packed_results_path args: tracker_name - name of the tracker param_name - name of the parameter file output_name - name of the packed zip file """ output_path = os.path.join(env_settings().got_packed_results_path, output_name) if not os.path.exists(output_path): os.makedirs(output_path) results_path = env_settings().results_path for i in range(1,181): seq_name = 'GOT-10k_Test_{:06d}'.format(i) seq_output_path = '{}/{}'.format(output_path, seq_name) if not os.path.exists(seq_output_path): os.makedirs(seq_output_path) for run_id in range(3): res = np.loadtxt('{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64) times = np.loadtxt( '{}/{}/{}_{:03d}/{}_time.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64) np.savetxt('{}/{}_{:03d}.txt'.format(seq_output_path, seq_name, run_id+1), res, delimiter=',', fmt='%f') np.savetxt('{}/{}_time.txt'.format(seq_output_path, seq_name), times, fmt='%f') # Generate ZIP file shutil.make_archive(output_path, 'zip', output_path) # Remove raw text files shutil.rmtree(output_path) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/util_scripts/pack_trackingnet_results.py ================================================ import numpy as np import os import shutil from pytracking.evaluation.environment import env_settings from pytracking.evaluation.datasets import get_dataset def pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None): """ Packs trackingnet results into a zip folder which can be directly uploaded to the evaluation server. The packed file is saved in the folder env_settings().tn_packed_results_path args: tracker_name - name of the tracker param_name - name of the parameter file run_id - run id for the tracker output_name - name of the packed zip file """ if output_name is None: if run_id is None: output_name = '{}_{}'.format(tracker_name, param_name) else: output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id) output_path = os.path.join(env_settings().tn_packed_results_path, output_name) if not os.path.exists(output_path): os.makedirs(output_path) results_path = env_settings().results_path tn_dataset = get_dataset('trackingnet') for seq in tn_dataset: seq_name = seq.name if run_id is None: seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name) else: seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name) results = np.loadtxt(seq_results_path, dtype=np.float64) np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f') # Generate ZIP file shutil.make_archive(output_path, 'zip', output_path) # Remove raw text files shutil.rmtree(output_path) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/__init__.py ================================================ from .params import TrackerParams, FeatureParams, Choice ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/convert_vot_anno_to_rect.py ================================================ import numpy as np def convert_vot_anno_to_rect(vot_anno, type): if len(vot_anno) == 4: return vot_anno if type == 'union': x1 = min(vot_anno[0::2]) x2 = max(vot_anno[0::2]) y1 = min(vot_anno[1::2]) y2 = max(vot_anno[1::2]) return [x1, y1, x2 - x1, y2 - y1] elif type == 'preserve_area': if len(vot_anno) != 8: raise ValueError vot_anno = np.array(vot_anno) cx = np.mean(vot_anno[0::2]) cy = np.mean(vot_anno[1::2]) x1 = min(vot_anno[0::2]) x2 = max(vot_anno[0::2]) y1 = min(vot_anno[1::2]) y2 = max(vot_anno[1::2]) A1 = np.linalg.norm(vot_anno[0:2] - vot_anno[2: 4]) * np.linalg.norm(vot_anno[2: 4] - vot_anno[4:6]) A2 = (x2 - x1) * (y2 - y1) s = np.sqrt(A1 / A2) w = s * (x2 - x1) + 1 h = s * (y2 - y1) + 1 x = cx - 0.5*w y = cy - 0.5*h return [x, y, w, h] else: raise ValueError ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/load_text.py ================================================ import numpy as np import pandas as pd def load_text_numpy(path, delimiter, dtype): if isinstance(delimiter, (tuple, list)): for d in delimiter: try: ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype) return ground_truth_rect except: pass raise Exception('Could not read file {}'.format(path)) else: ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype) return ground_truth_rect def load_text_pandas(path, delimiter, dtype): if isinstance(delimiter, (tuple, list)): for d in delimiter: try: ground_truth_rect = pd.read_csv(path, delimiter=d, header=None, dtype=dtype, na_filter=False, low_memory=False).values return ground_truth_rect except Exception as e: pass raise Exception('Could not read file {}'.format(path)) else: ground_truth_rect = pd.read_csv(path, delimiter=delimiter, header=None, dtype=dtype, na_filter=False, low_memory=False).values return ground_truth_rect def load_text(path, delimiter=' ', dtype=np.float32, backend='numpy'): if backend == 'numpy': return load_text_numpy(path, delimiter, dtype) elif backend == 'pandas': return load_text_pandas(path, delimiter, dtype) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/loading.py ================================================ import os import ltr.admin.loading as ltr_loading from pytracking.evaluation.environment import env_settings def load_network(net_path, **kwargs): """Load network for tracking. args: net_path - Path to network. If it is not an absolute path, it is relative to the network_path in the local.py. See ltr.admin.loading.load_network for further details. **kwargs - Additional key-word arguments that are sent to ltr.admin.loading.load_network. """ kwargs['backbone_pretrained'] = False if os.path.isabs(net_path): path_full = net_path net, _ = ltr_loading.load_network(path_full, **kwargs) elif isinstance(env_settings().network_path, (list, tuple)): net = None for p in env_settings().network_path: path_full = os.path.join(p, net_path) try: net, _ = ltr_loading.load_network(path_full, **kwargs) break except Exception as e: # print(e) pass assert net is not None, 'Failed to load network' else: path_full = os.path.join(env_settings().network_path, net_path) net, _ = ltr_loading.load_network(path_full, **kwargs) return net ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/params.py ================================================ from pytracking import TensorList import random class TrackerParams: """Class for tracker parameters.""" def set_default_values(self, default_vals: dict): for name, val in default_vals.items(): if not hasattr(self, name): setattr(self, name, val) def get(self, name: str, *default): """Get a parameter value with the given name. If it does not exists, it return the default value given as a second argument or returns an error if no default value is given.""" if len(default) > 1: raise ValueError('Can only give one default value.') if not default: return getattr(self, name) return getattr(self, name, default[0]) def has(self, name: str): """Check if there exist a parameter with the given name.""" return hasattr(self, name) class FeatureParams: """Class for feature specific parameters""" def __init__(self, *args, **kwargs): if len(args) > 0: raise ValueError for name, val in kwargs.items(): if isinstance(val, list): setattr(self, name, TensorList(val)) else: setattr(self, name, val) def Choice(*args): """Can be used to sample random parameter values.""" return random.choice(args) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/plotting.py ================================================ import matplotlib.pyplot as plt import numpy as np import torch import cv2 def draw_figure(fig): fig.canvas.draw() fig.canvas.flush_events() plt.pause(0.001) def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None): """Display a 2D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim == 3: a_np = np.transpose(a_np, (1, 2, 0)) if ax is None: fig = plt.figure(fig_num) plt.tight_layout() plt.cla() plt.imshow(a_np, vmin=range[0], vmax=range[1]) plt.axis('off') plt.axis('equal') if title is not None: plt.title(title) draw_figure(fig) else: ax.cla() ax.imshow(a_np, vmin=range[0], vmax=range[1]) ax.set_axis_off() ax.axis('equal') if title is not None: ax.set_title(title) draw_figure(plt.gcf()) def plot_graph(a: torch.Tensor, fig_num = None, title = None): """Plot graph. Data is a 1D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim > 1: raise ValueError fig = plt.figure(fig_num) # plt.tight_layout() plt.cla() plt.plot(a_np) if title is not None: plt.title(title) draw_figure(fig) def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None): im_np = im.clone().cpu().squeeze().numpy() im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8)) boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int) # Draw proposals for i_ in range(boxes.shape[0]): if disp_ids is None or disp_ids[i_]: bb = boxes[i_, :] disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256) cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), disp_color, 1) if iou_pred is not None: text_pos = (bb[0], bb[1] - 5) cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False) im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float() return im_tensor def _pascal_color_map(N=256, normalized=False): """ Python implementation of the color map function for the PASCAL VOC data set. Official Matlab version can be found in the PASCAL VOC devkit http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit """ def bitget(byteval, idx): return (byteval & (1 << idx)) != 0 dtype = 'float32' if normalized else 'uint8' cmap = np.zeros((N, 3), dtype=dtype) for i in range(N): r = g = b = 0 c = i for j in range(8): r = r | (bitget(c, 0) << 7 - j) g = g | (bitget(c, 1) << 7 - j) b = b | (bitget(c, 2) << 7 - j) c = c >> 3 cmap[i] = np.array([r, g, b]) cmap = cmap / 255 if normalized else cmap return cmap def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None): """ Overlay mask over image. Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py This function allows you to overlay a mask over an image with some transparency. # Arguments im: Numpy Array. Array with the image. The shape must be (H, W, 3) and the pixels must be represented as `np.uint8` data type. ann: Numpy Array. Array with the mask. The shape must be (H, W) and the values must be intergers alpha: Float. Proportion of alpha to apply at the overlaid mask. colors: Numpy Array. Optional custom colormap. It must have shape (N, 3) being N the maximum number of colors to represent. contour_thickness: Integer. Thickness of each object index contour draw over the overlay. This function requires to have installed the package `opencv-python`. # Returns Numpy Array: Image of the overlay with shape (H, W, 3) and data type `np.uint8`. """ im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int) if im.shape[:-1] != ann.shape: raise ValueError('First two dimensions of `im` and `ann` must match') if im.shape[-1] != 3: raise ValueError('im must have three channels at the 3 dimension') colors = colors or _pascal_color_map() colors = np.asarray(colors, dtype=np.uint8) mask = colors[ann] fg = im * alpha + (1 - alpha) * mask img = im.copy() img[ann > 0] = fg[ann > 0] if contour_thickness: # pragma: no cover import cv2 for obj_id in np.unique(ann[ann > 0]): contours = cv2.findContours((ann == obj_id).astype( np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(), contour_thickness) return img ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/utils/visdom.py ================================================ import visdom import visdom.server from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_image_with_boxes, overlay_mask import cv2 import torch import copy import numpy as np from collections import OrderedDict class VisBase: def __init__(self, visdom, show_data, title): self.visdom = visdom self.show_data = show_data self.title = title self.raw_data = None def update(self, data, **kwargs): self.save_data(data, **kwargs) if self.show_data: self.draw_data() def save_data(self, data, **kwargs): raise NotImplementedError def draw_data(self): raise NotImplementedError def toggle_display(self, new_mode=None): if new_mode is not None: self.show_data = new_mode else: self.show_data = not self.show_data if self.show_data: self.draw_data() else: self.visdom.close(self.title) class VisImage(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): data = data.float() self.raw_data = data def draw_data(self): self.visdom.image(self.raw_data.clone(), opts={'title': self.title}, win=self.title) class VisHeatmap(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): data = data.squeeze().flip(0) self.raw_data = data def draw_data(self): self.visdom.heatmap(self.raw_data.clone(), opts={'title': self.title}, win=self.title) class VisFeaturemap(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.block_list = None def block_list_callback_handler(self, data): self.block_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui') self.draw_data() def save_data(self, data): data = data.view(-1, *data.shape[-2:]) data = data.flip(1) if self.block_list is None: self.block_list = [] self.draw_feat = [] for i in range(data.shape[0]): self.block_list.append({'type': 'checkbox', 'name': 'Channel {:04d}'.format(i), 'value': False}) self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui') self.visdom.register_event_handler(self.block_list_callback_handler, 'featuremap_ui') self.raw_data = data def draw_data(self): if self.block_list is not None and self.show_data: for i, d in enumerate(self.block_list): if d['value']: fig_title = '{} ch: {:04d}'.format(self.title, i) self.visdom.heatmap(self.raw_data[i, :, :].clone(), opts={'title': fig_title}, win=fig_title) class VisCostVolume(VisBase): def __init__(self, visdom, show_data, title, flip=False): super().__init__(visdom, show_data, title) self.show_slice = False self.slice_pos = None self.flip = flip def show_cost_volume(self): data = self.raw_data.clone() # data_perm = data.permute(2, 0, 3, 1).contiguous() data_perm = data.permute(0, 2, 1, 3).contiguous() if self.flip: data_perm = data_perm.permute(2, 3, 0, 1).contiguous() data_perm = data_perm.view(data_perm.shape[0] * data_perm.shape[1], -1) self.visdom.heatmap(data_perm.flip(0), opts={'title': self.title}, win=self.title) def set_zoom_pos(self, slice_pos): self.slice_pos = slice_pos def toggle_show_slice(self, new_mode=None): if new_mode is not None: self.show_slice = new_mode else: self.show_slice = not self.show_slice def show_cost_volume_slice(self): slice_pos = self.slice_pos # slice_pos: [row, col] cost_volume_data = self.raw_data.clone() if self.flip: cost_volume_slice = cost_volume_data[:, :, slice_pos[0], slice_pos[1]] else: cost_volume_slice = cost_volume_data[slice_pos[0], slice_pos[1], :, :] self.visdom.heatmap(cost_volume_slice.flip(0), opts={'title': self.title}, win=self.title) def save_data(self, data): data = data.view(data.shape[-2], data.shape[-1], data.shape[-2], data.shape[-1]) self.raw_data = data def draw_data(self): if self.show_slice: self.show_cost_volume_slice() else: self.show_cost_volume() class VisCostVolumeUI(VisBase): def cv_ui_handler(self, data): zoom_toggled = False if data['event_type'] == 'KeyPress': if data['key'] == 'ArrowRight': self.zoom_pos[1] = min(self.zoom_pos[1] + 1, self.feat_shape[1]-1) elif data['key'] == 'ArrowLeft': self.zoom_pos[1] = max(self.zoom_pos[1] - 1, 0) elif data['key'] == 'ArrowUp': self.zoom_pos[0] = max(self.zoom_pos[0] - 1, 0) elif data['key'] == 'ArrowDown': self.zoom_pos[0] = min(self.zoom_pos[0] + 1, self.feat_shape[0]-1) elif data['key'] == 'Enter': self.zoom_mode = not self.zoom_mode zoom_toggled = True # Update image self.show_image() # Update cost volumes for block_title, block in self.registered_blocks.items(): if isinstance(block, VisCostVolume): block.set_zoom_pos(self.zoom_pos) block.toggle_show_slice(self.zoom_mode) if (self.zoom_mode or zoom_toggled) and block.show_data: block.draw_data() def __init__(self, visdom, show_data, title, feat_shape, registered_blocks): super().__init__(visdom, show_data, title) self.feat_shape = feat_shape self.zoom_mode = False self.zoom_pos = [int((feat_shape[0] - 1) / 2), int((feat_shape[1] - 1) / 2)] self.registered_blocks = registered_blocks self.visdom.register_event_handler(self.cv_ui_handler, title) def draw_grid(self, data): stride_r = int(data.shape[1] / self.feat_shape[0]) stride_c = int(data.shape[2] / self.feat_shape[1]) # Draw grid data[:, list(range(0, data.shape[1], stride_r)), :] = 0 data[:, :, list(range(0, data.shape[2], stride_c))] = 0 data[0, list(range(0, data.shape[1], stride_r)), :] = 255 data[0, :, list(range(0, data.shape[2], stride_c))] = 255 return data def shade_cell(self, data): stride_r = int(data.shape[1] / self.feat_shape[0]) stride_c = int(data.shape[2] / self.feat_shape[1]) r1 = self.zoom_pos[0]*stride_r r2 = min((self.zoom_pos[0] + 1)*stride_r, data.shape[1]) c1 = self.zoom_pos[1] * stride_c c2 = min((self.zoom_pos[1] + 1) * stride_c, data.shape[2]) factor = 0.8 if self.zoom_mode else 0.5 data[:, r1:r2, c1:c2] = data[:, r1:r2, c1:c2] * (1 - factor) + torch.tensor([255.0, 0.0, 0.0]).view(3, 1, 1).to(data.device) * factor return data def show_image(self, data=None): if data is None: data = self.raw_data.clone() data = self.draw_grid(data) data = self.shade_cell(data) self.visdom.image(data, opts={'title': self.title}, win=self.title) def save_data(self, data): # Ignore feat shape data = data[0] data = data.float() self.raw_data = data def draw_data(self): self.show_image(self.raw_data.clone()) class VisInfoDict(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.raw_data = OrderedDict() def generate_display_text(self, data): display_text = '' for key, value in data.items(): key = key.replace('_', ' ') if value is None: display_text += '{}: {}
'.format(key, 'None') elif isinstance(value, (str, int)): display_text += '{}: {}
'.format(key, value) else: display_text += '{}: {:.2f}
'.format(key, value) return display_text def save_data(self, data): for key, val in data.items(): self.raw_data[key] = val def draw_data(self): data = copy.deepcopy(self.raw_data) display_text = self.generate_display_text(data) self.visdom.text(display_text, opts={'title': self.title}, win=self.title) class VisText(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): self.raw_data = data def draw_data(self): data = copy.deepcopy(self.raw_data) self.visdom.text(data, opts={'title': self.title}, win=self.title) class VisLinePlot(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): self.raw_data = data def draw_data(self): if isinstance(self.raw_data, (list, tuple)): data_y = self.raw_data[0].clone() data_x = self.raw_data[1].clone() else: data_y = self.raw_data.clone() data_x = torch.arange(data_y.shape[0]) self.visdom.line(data_y, data_x, opts={'title': self.title}, win=self.title) class VisTracking(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): image = data[0] boxes_masks = data[1:] boxes, masks = [], [] for bm in boxes_masks: if bm is None: continue if isinstance(bm, list): boxes.append(torch.Tensor(bm)); continue if len(bm.shape) > 1: # Binarize segmentation if a float tensor is provided if bm.dtype != np.uint8: bm = (bm > 0.5).astype(np.uint8) masks.append(bm); continue boxes.append(bm.float()) self.raw_data = [image, boxes, masks] def draw_data(self): disp_image = self.raw_data[0].copy() resize_factor = 1 if max(disp_image.shape) > 480: resize_factor = 480.0 / float(max(disp_image.shape)) disp_image = cv2.resize(disp_image, None, fx=resize_factor, fy=resize_factor) for i, mask in enumerate(self.raw_data[2]): self.raw_data[2][i] = cv2.resize(mask, None, fx=resize_factor, fy=resize_factor) boxes = [resize_factor * b.clone() for b in self.raw_data[1]] for i, disp_rect in enumerate(boxes): color = ((255*((i%3)>0)), 255*((i+1)%2), (255*(i%5))//4) cv2.rectangle(disp_image, (int(disp_rect[0]), int(disp_rect[1])), (int(disp_rect[0] + disp_rect[2]), int(disp_rect[1] + disp_rect[3])), color, 2) for i, mask in enumerate(self.raw_data[2], 1): disp_image = overlay_mask(disp_image, mask * i) disp_image = numpy_to_torch(disp_image).squeeze(0) disp_image = disp_image.float() self.visdom.image(disp_image, opts={'title': self.title}, win=self.title) class VisBBReg(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.block_list = [] def block_list_callback_handler(self, data): self.block_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis') self.draw_data() def save_data(self, data): self.image = data[0].float() self.init_boxes = data[1] self.final_boxes = data[2] self.final_ious = data[3] def draw_data(self): if len(self.block_list) == 0: self.block_list.append({'type': 'checkbox', 'name': 'ID 0', 'value': True}) self.block_list.append({'type': 'checkbox', 'name': 'ID 1', 'value': True}) self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis') self.visdom.register_event_handler(self.block_list_callback_handler, 'bbreg_vis') disp_image = self.image ids = [x['value'] for x in self.block_list] init_box_image = show_image_with_boxes(disp_image.clone(), self.init_boxes.clone(), disp_ids=ids) final_box_image = show_image_with_boxes(disp_image.clone(), self.final_boxes.clone(), self.final_ious.clone(), disp_ids=ids) self.visdom.image(init_box_image, opts={'title': 'Init Boxes'}, win='Init Boxes') self.visdom.image(final_box_image, opts={'title': 'Final Boxes'}, win='Final Boxes') class Visdom: def __init__(self, debug=0, ui_info=None, visdom_info=None): self.debug = debug self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'), port=visdom_info.get('port', 8097)) self.registered_blocks = {} self.blocks_list = [] self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') self.visdom.register_event_handler(self.block_list_callback_handler, 'block_list') if ui_info is not None: self.visdom.register_event_handler(ui_info['handler'], ui_info['win_id']) def block_list_callback_handler(self, data): field_name = self.blocks_list[data['propertyId']]['name'] self.registered_blocks[field_name].toggle_display(data['value']) self.blocks_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') def register(self, data, mode, debug_level=0, title='Data', **kwargs): if title not in self.registered_blocks.keys(): show_data = self.debug >= debug_level if title != 'Tracking': self.blocks_list.append({'type': 'checkbox', 'name': title, 'value': show_data}) self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') if mode == 'image': self.registered_blocks[title] = VisImage(self.visdom, show_data, title) elif mode == 'heatmap': self.registered_blocks[title] = VisHeatmap(self.visdom, show_data, title) elif mode == 'cost_volume': self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title) elif mode == 'cost_volume_flip': self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title, flip=True) elif mode == 'cost_volume_ui': self.registered_blocks[title] = VisCostVolumeUI(self.visdom, show_data, title, data[1], self.registered_blocks) elif mode == 'info_dict': self.registered_blocks[title] = VisInfoDict(self.visdom, show_data, title) elif mode == 'text': self.registered_blocks[title] = VisText(self.visdom, show_data, title) elif mode == 'lineplot': self.registered_blocks[title] = VisLinePlot(self.visdom, show_data, title) elif mode == 'Tracking': self.registered_blocks[title] = VisTracking(self.visdom, show_data, title) elif mode == 'bbreg': self.registered_blocks[title] = VisBBReg(self.visdom, show_data, title) elif mode == 'featmap': self.registered_blocks[title] = VisFeaturemap(self.visdom, show_data, title) else: raise ValueError('Visdom Error: Unknown data mode {}'.format(mode)) # Update self.registered_blocks[title].update(data, **kwargs) ================================================ FILE: artrackv2_mindspore/external/AR/pytracking/vot20_utils.py ================================================ import numpy as np def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) def rect_from_mask(mask): ''' create an axis-aligned rectangle from a given binary mask mask in created as a minimal rectangle containing all non-zero pixels ''' x_ = np.sum(mask, axis=0) y_ = np.sum(mask, axis=1) x0 = np.min(np.nonzero(x_)) x1 = np.max(np.nonzero(x_)) y0 = np.min(np.nonzero(y_)) y1 = np.max(np.nonzero(y_)) return [x0, y0, x1 - x0 + 1, y1 - y0 + 1] def mask_from_rect(rect, output_sz): ''' create a binary mask from a given rectangle rect: axis-aligned rectangle [x0, y0, width, height] output_sz: size of the output [width, height] ''' mask = np.zeros((output_sz[1], output_sz[0]), dtype=np.uint8) x0 = max(int(round(rect[0])), 0) y0 = max(int(round(rect[1])), 0) x1 = min(int(round(rect[0] + rect[2])), output_sz[0]) y1 = min(int(round(rect[1] + rect[3])), output_sz[1]) mask[y0:y1, x0:x1] = 1 return mask def bbox_clip(x1, y1, x2, y2, boundary, min_sz=10): '''boundary (H,W)''' x1_new = max(0, min(x1, boundary[1] - min_sz)) y1_new = max(0, min(y1, boundary[0] - min_sz)) x2_new = max(min_sz, min(x2, boundary[1])) y2_new = max(min_sz, min(y2, boundary[0])) return x1_new, y1_new, x2_new, y2_new ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/.gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .vim-template* # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/LICENSE ================================================ MIT License Copyright (c) 2018 Jiayuan Mao Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/README.md ================================================ # PreciseRoIPooling This repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation). **Acquisition of Localization Confidence for Accurate Object Detection** _Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.) https://arxiv.org/abs/1807.11590 ## Brief In short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is: - different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates. - different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous. For a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.
## Implementation PrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome. ## Usage (PyTorch 1.0) In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented). Since we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` ## Usage (PyTorch 0.4) **!!! Please first checkout to the branch pytorch0.4.** In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented). To use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` Here, - RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor. - `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`. - The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`. ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore ================================================ *.o /_prroi_pooling ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : __init__.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. from .prroi_pool import * ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : functional.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch import torch.autograd as ag __all__ = ['prroi_pool2d'] _prroi_pooling = None def _import_prroi_pooling(): global _prroi_pooling if _prroi_pooling is None: try: from os.path import join as pjoin, dirname from torch.utils.cpp_extension import load as load_extension root_dir = pjoin(dirname(__file__), 'src') _prroi_pooling = load_extension( '_prroi_pooling', [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], verbose=True ) except ImportError: raise ImportError('Can not compile Precise RoI Pooling library.') return _prroi_pooling class PrRoIPool2DFunction(ag.Function): @staticmethod def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): _prroi_pooling = _import_prroi_pooling() assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) pooled_height = int(pooled_height) pooled_width = int(pooled_width) spatial_scale = float(spatial_scale) features = features.contiguous() rois = rois.contiguous() params = (pooled_height, pooled_width, spatial_scale) if features.is_cuda: output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) ctx.params = params # everything here is contiguous. ctx.save_for_backward(features, rois, output) else: raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') return output @staticmethod def backward(ctx, grad_output): _prroi_pooling = _import_prroi_pooling() features, rois, output = ctx.saved_tensors grad_input = grad_coor = None if features.requires_grad: grad_output = grad_output.contiguous() grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) if rois.requires_grad: grad_output = grad_output.contiguous() grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) return grad_input, grad_coor, None, None, None prroi_pool2d = PrRoIPool2DFunction.apply ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : prroi_pool.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch.nn as nn from .functional import prroi_pool2d __all__ = ['PrRoIPool2D'] class PrRoIPool2D(nn.Module): def __init__(self, pooled_height, pooled_width, spatial_scale): super().__init__() self.pooled_height = int(pooled_height) self.pooled_width = int(pooled_width) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) def extra_repr(self): return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__) ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c ================================================ /* * File : prroi_pooling_gpu.c * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include #include #include #include #include "prroi_pooling_gpu_impl.cuh" at::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) { int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options()); if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return output; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingForwardGpu( stream, features.data(), rois.data(), output.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count ); AT_CUDA_CHECK(cudaGetLastError()); return output; } at::Tensor prroi_pooling_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto features_diff = at::zeros_like(features); int nr_rois = rois.size(0); int batch_size = features.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = batch_size * nr_channels * height * width; if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return features_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), features_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); AT_CUDA_CHECK(cudaGetLastError()); return features_diff; } at::Tensor prroi_pooling_coor_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto coor_diff = at::zeros_like(rois); int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = nr_rois * 5; if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return coor_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingCoorBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), coor_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); AT_CUDA_CHECK(cudaGetLastError()); return coor_diff; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("prroi_pooling_forward_cuda", &prroi_pooling_forward_cuda, "PRRoIPooling_forward"); m.def("prroi_pooling_backward_cuda", &prroi_pooling_backward_cuda, "PRRoIPooling_backward"); m.def("prroi_pooling_coor_backward_cuda", &prroi_pooling_coor_backward_cuda, "PRRoIPooling_backward_coor"); } ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h ================================================ /* * File : prroi_pooling_gpu.h * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); int prroi_pooling_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scale ); int prroi_pooling_coor_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scal ); ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py ================================================ # -*- coding: utf-8 -*- # File : test_prroi_pooling2d.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 18/02/2018 # # This file is part of Jacinle. import unittest import torch import torch.nn as nn import torch.nn.functional as F from jactorch.utils.unittest import TorchTestCase from prroi_pool import PrRoIPool2D class TestPrRoIPool2D(TorchTestCase): def test_forward(self): pool = PrRoIPool2D(7, 7, spatial_scale=0.5) features = torch.rand((4, 16, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 14, 14], [1, 14, 14, 28, 28], ]).float().cuda() out = pool(features, rois) out_gold = F.avg_pool2d(features, kernel_size=2, stride=1) self.assertTensorClose(out, torch.stack(( out_gold[0, :, :7, :7], out_gold[1, :, 7:14, 7:14], ), dim=0)) def test_backward_shapeonly(self): pool = PrRoIPool2D(2, 2, spatial_scale=0.5) features = torch.rand((4, 2, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 4, 4], [1, 14, 14, 18, 18], ]).float().cuda() features.requires_grad = rois.requires_grad = True out = pool(features, rois) loss = out.sum() loss.backward() self.assertTupleEqual(features.size(), features.grad.size()) self.assertTupleEqual(rois.size(), rois.grad.size()) if __name__ == '__main__': unittest.main() ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: artrackv2_mindspore/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: artrackv2_mindspore/external/vot20/cttrack/config.yaml ================================================ registry: - ./trackers.ini stack: vot2020 ================================================ FILE: artrackv2_mindspore/external/vot20/cttrack/trackers.ini ================================================ [cttrack_large] # label = cttrack_large protocol = traxpython command = from cttrack_start import main;main() # Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths) # paths = /home/lr/workspace/CTTrack: paths = # Additional environment paths env_PATH = : ================================================ FILE: artrackv2_mindspore/lib/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/lib/config/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/lib/config/ostrack/config.py ================================================ from easydict import EasyDict as edict import yaml """ Add default config for OSTrack. """ cfg = edict() # MODEL cfg.MODEL = edict() cfg.MODEL.PRETRAIN_FILE = "mae_pretrain_vit_base.pth" cfg.MODEL.EXTRA_MERGER = False cfg.MODEL.RETURN_INTER = False cfg.MODEL.RETURN_STAGES = [2, 5, 8, 11] # MODEL.BACKBONE cfg.MODEL.BACKBONE = edict() cfg.MODEL.BACKBONE.TYPE = "vit_base_patch16_224" cfg.MODEL.BACKBONE.PATCHSIZE = 16 cfg.MODEL.BACKBONE.EMBEDDIM = 768 cfg.MODEL.BACKBONE.STRIDE = 16 cfg.MODEL.BACKBONE.MID_PE = False cfg.MODEL.BACKBONE.SEP_SEG = False cfg.MODEL.BACKBONE.CAT_MODE = 'direct' cfg.MODEL.BACKBONE.MERGE_LAYER = 0 cfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False cfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore' cfg.MODEL.BACKBONE.CE_LOC = [] cfg.MODEL.BACKBONE.CE_KEEP_RATIO = [] cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX # MODEL.DECODER cfg.MODEL.DECODER = edict() cfg.MODEL.DECODER.TYPE = "mask" cfg.MODEL.DECODER.MASK_RATIO = 0.75 cfg.MODEL.DECODER.EMBEDDIM = 512 cfg.MODEL.DECODER.DEPTH = 8 cfg.MODEL.DECODER.NUMHEADS = 16 cfg.MODEL.DECODER.MLPRATIO = 4 # MODEL.HEAD cfg.MODEL.BINS = 400 cfg.MODEL.ENCODER_LAYER = 3 cfg.MODEL.NUM_HEADS = 16 cfg.MODEL.MLP_RATIO = 4 cfg.MODEL.QKV_BIAS = True cfg.MODEL.DROP_RATE = 0.1 cfg.MODEL.ATTN_DROP = 0.0 cfg.MODEL.DROP_PATH = 0.0 cfg.MODEL.DECODER_LAYER = 6 cfg.MODEL.HEAD = edict() cfg.MODEL.HEAD.TYPE = "PIX" cfg.MODEL.HEAD.NUM_CHANNELS = 1024 # TRAIN cfg.TRAIN = edict() cfg.TRAIN.LR = 0.0001 cfg.TRAIN.WEIGHT_DECAY = 0.0001 cfg.TRAIN.EPOCH = 500 cfg.TRAIN.LR_DROP_EPOCH = 400 cfg.TRAIN.BATCH_SIZE = 16 cfg.TRAIN.NUM_WORKER = 10 cfg.TRAIN.OPTIMIZER = "ADAMW" cfg.TRAIN.BACKBONE_MULTIPLIER = 0.1 cfg.TRAIN.GIOU_WEIGHT = 2.0 cfg.TRAIN.L1_WEIGHT = 5.0 cfg.TRAIN.FREEZE_LAYERS = [0, ] cfg.TRAIN.PRINT_INTERVAL = 50 cfg.TRAIN.VAL_EPOCH_INTERVAL = 20 cfg.TRAIN.GRAD_CLIP_NORM = 0.1 cfg.TRAIN.AMP = False cfg.TRAIN.CE_START_EPOCH = 20 # candidate elimination start epoch cfg.TRAIN.CE_WARM_EPOCH = 80 # candidate elimination warm up epoch cfg.TRAIN.DROP_PATH_RATE = 0.1 # drop path rate for ViT backbone # TRAIN.SCHEDULER cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.TYPE = "step" cfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1 # DATA cfg.DATA = edict() cfg.DATA.SAMPLER_MODE = "causal" # sampling methods cfg.DATA.MEAN = [0.485, 0.456, 0.406] cfg.DATA.STD = [0.229, 0.224, 0.225] cfg.DATA.MAX_SAMPLE_INTERVAL = 200 cfg.DATA.MAX_GAP = 300 cfg.DATA.MAX_INTERVAL = 5 cfg.DATA.INTERVAL_PROB = 0.0 cfg.DATA.TEMP = 2 # DATA.TRAIN cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.DATASETS_NAME = ["LASOT", "GOT10K_vottrain"] cfg.DATA.TRAIN.DATASETS_RATIO = [1, 1] cfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000 # DATA.VAL cfg.DATA.VAL = edict() cfg.DATA.VAL.DATASETS_NAME = ["GOT10K_votval"] cfg.DATA.VAL.DATASETS_RATIO = [1] cfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000 # DATA.SEARCH cfg.DATA.SEARCH = edict() cfg.DATA.SEARCH.SIZE = 256 cfg.DATA.SEARCH.FACTOR = 5.0 cfg.DATA.SEARCH.CENTER_JITTER = 4.5 cfg.DATA.SEARCH.SCALE_JITTER = 0.5 cfg.DATA.SEARCH.NUMBER = 1 # DATA.TEMPLATE cfg.DATA.TEMPLATE = edict() cfg.DATA.TEMPLATE.NUMBER = 1 cfg.DATA.TEMPLATE.SIZE = 128 cfg.DATA.TEMPLATE.FACTOR = 2.0 cfg.DATA.TEMPLATE.CENTER_JITTER = 0 cfg.DATA.TEMPLATE.SCALE_JITTER = 0 # TEST cfg.TEST = edict() cfg.TEST.TEMPLATE_FACTOR = 2.0 cfg.TEST.TEMPLATE_SIZE = 128 cfg.TEST.SEARCH_FACTOR = 5.0 cfg.TEST.SEARCH_SIZE = 256 cfg.TEST.EPOCH = 500 def _edict2dict(dest_dict, src_edict): if isinstance(dest_dict, dict) and isinstance(src_edict, dict): for k, v in src_edict.items(): if not isinstance(v, edict): dest_dict[k] = v else: dest_dict[k] = {} _edict2dict(dest_dict[k], v) else: return def gen_config(config_file): cfg_dict = {} _edict2dict(cfg_dict, cfg) with open(config_file, 'w') as f: yaml.dump(cfg_dict, f, default_flow_style=False) def _update_config(base_cfg, exp_cfg): if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict): for k, v in exp_cfg.items(): if k in base_cfg: if not isinstance(v, dict): base_cfg[k] = v else: _update_config(base_cfg[k], v) else: raise ValueError("{} not exist in config.py".format(k)) else: return def update_config_from_file(filename, base_cfg=None): exp_config = None with open(filename) as f: exp_config = edict(yaml.safe_load(f)) if base_cfg is not None: _update_config(base_cfg, exp_config) else: _update_config(cfg, exp_config) ================================================ FILE: artrackv2_mindspore/lib/models/__init__.py ================================================ from .ostrack.ostrack import build_ostrack ================================================ FILE: artrackv2_mindspore/lib/models/component/__init__.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : __init__.py.py # Copyright (c) Skye-Song. All Rights Reserved ================================================ FILE: artrackv2_mindspore/lib/models/component/attention.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : attention.py # Copyright (c) Skye-Song. All Rights Reserved import sys sys.path.append("/home/baiyifan/code/AR2_mindspore_cp/2stage") import mindspore as ms import mindspore.nn as nn import mindspore.ops as ops from einops import rearrange from lib.utils.image import * class Attention(nn.Cell): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) def construct(self, x, padding_mask=None, **kwargs): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C//head) attn = (q @ k.swapaxes(-2, -1)) * self.scale # (B, head, N, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x = (attn @ v).swapaxes(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class ClsMixAttention(nn.Cell): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) def construct(self, x, t_h, t_w, s_h, s_w, online_size=1, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_cls, q_t, q_s = ops.split(q, [1, t_h * t_w * (1 + online_size), s_h * s_w], axis=2) k_cls, k_t, k_s = ops.split(k, [1, t_h * t_w * (1 + online_size), s_h * s_w], axis=2) v_cls, v_t, v_s = ops.split(v, [1, t_h * t_w * (1 + online_size), s_h * s_w], axis=2) # cls token attention attn = (q_cls @ k.swapaxes(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_cls = rearrange(attn @ v, 'b h t d -> b t (h d)') # template attention attn = (q_t @ k_t.swapaxes(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k.swapaxes(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v, 'b h t d -> b t (h d)') x = ops.cat([x_cls, x_t, x_s], axis=1) x = self.proj(x) x = self.proj_drop(x) return x class MixAttention(nn.Cell): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) def construct(self, x, t_h, t_w, s_h, s_w, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2) k_t, k_s = ops.split(k, [t_h * t_w * 2, s_h * s_w], axis=2) v_t, v_s = ops.split(v, [t_h * t_w * 2, s_h * s_w], axis=2) # template attention attn = (q_t @ k_t.swapaxes(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k.swapaxes(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v, 'b h t d -> b t (h d)') x = ops.cat([x_t, x_s], axis=1) x = self.proj(x) x = self.proj_drop(x) return x class NottAttention(nn.Cell): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) def construct(self, x, t_h, t_w, s_h, s_w, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2) k_t, k_s = ops.split(k, [t_h * t_w * 2, s_h * s_w], axis=2) v_t, v_s = ops.split(v, [t_h * t_w * 2, s_h * s_w], axis=2) # template attention attn = (q_t @ k_s.swapaxes(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k.swapaxes(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v, 'b h t d -> b t (h d)') x = ops.cat([x_t, x_s], axis=1) x = self.proj(x) x = self.proj_drop(x) return x class NossAttention(nn.Cell): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) def construct(self, x, t_h, t_w, s_h, s_w, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2) k_t, k_s = ops.split(k, [t_h * t_w * 2, s_h * s_w], axis=2) v_t, v_s = ops.split(v, [t_h * t_w * 2, s_h * s_w], axis=2) # template attention attn = (q_t @ k.swapaxes(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k_t.swapaxes(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)') x = ops.cat([x_t, x_s], axis=1) x = self.proj(x) x = self.proj_drop(x) return x class CrossAttention(nn.Cell): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) def construct(self, x, t_h, t_w, s_h, s_w): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2) k_t, k_s = ops.split(k, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], axis=4) v_t, v_s = ops.split(v, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], axis=4) # template attention attn = (q_t @ k_s.swapaxes(-2, -1)) * self.scale attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k_t.swapaxes(-2, -1)) * self.scale attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)') x = ops.cat([x_t, x_s], axis=1) x = self.proj(x) x = self.proj_drop(x) return x ================================================ FILE: artrackv2_mindspore/lib/models/component/block.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : block.py # Copyright (c) Skye-Song. All Rights Reserved import sys sys.path.append("/home/baiyifan/weizhenhuan/2stage/lib/models/component") from attention import * from lib.models.timm import * class Block(nn.Cell): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., attention = "Attention", act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() if norm_layer is None: norm_layer = nn.LayerNorm dim_tuple=dim if isinstance(dim,int): dim_tuple=tuple([dim]) self.norm1 = norm_layer(dim_tuple) self.attn = globals()[attention](dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim_tuple) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def construct(self, x, **kwargs): x = x + self.drop_path(self.attn(self.norm1(x), **kwargs)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x ================================================ FILE: artrackv2_mindspore/lib/models/component/drop.py ================================================ """ DropBlock, DropPath PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. Papers: DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) Code: DropBlock impl inspired by two Tensorflow impl that I liked: - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py Hacked together by / Copyright 2020 Ross Wightman """ import mindspore as ms from mindspore import nn from mindspore import ops def drop_block_2d( x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. This layer has been tested on a few training runs with success, but needs further validation and possibly optimization for lower runtime impact. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) # seed_drop_rate, the gamma parameter gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) # Forces the block to be inside the feature map. w_i, h_i = ops.meshgrid(ops.arange(W).to(x.device), ops.arange(H).to(x.device)) valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) valid_block = ops.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) if batchwise: # one mask for whole batch, quite a bit faster uniform_noise = ops.rand((1, C, H, W), dtype=x.dtype, device=x.device) else: uniform_noise = ops.rand_like(x) block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) block_mask = -ops.max_pool2d( -block_mask, kernel_size=clipped_block_size, # block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = ops.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else ops.randn_like(x) if inplace: x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) else: x = x * block_mask + normal_noise * (1 - block_mask) else: normalize_scale = (block_mask.numel() / block_mask.to(dtype=ms.float32).sum().add(1e-7)).to(x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x def drop_block_fast_2d( x: ms.Tensor, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid block mask at edges. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) if batchwise: # one mask for whole batch, quite a bit faster block_mask = ops.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma else: # mask per batch element block_mask = ops.rand_like(x) < gamma block_mask = ops.max_pool2d( block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = ops.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else ops.randn_like(x) if inplace: x.mul_(1. - block_mask).add_(normal_noise * block_mask) else: x = x * (1. - block_mask) + normal_noise * block_mask else: block_mask = 1 - block_mask normalize_scale = (block_mask.numel() / block_mask.to(dtype=ms.float32).sum().add(1e-7)).to(dtype=x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x class DropBlock2d(nn.Cell): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf """ def __init__(self, drop_prob=0.1, block_size=7, gamma_scale=1.0, with_noise=False, inplace=False, batchwise=False, fast=True): super(DropBlock2d, self).__init__() self.drop_prob = drop_prob self.gamma_scale = gamma_scale self.block_size = block_size self.with_noise = with_noise self.inplace = inplace self.batchwise = batchwise self.fast = fast # FIXME finish comparisons of fast vs not def construct(self, x): if not self.training or not self.drop_prob: return x if self.fast: return drop_block_fast_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) else: return drop_block_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + ops.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Cell): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def construct(self, x): return drop_path(x, self.drop_prob, self.training) ================================================ FILE: artrackv2_mindspore/lib/models/component/mlp.py ================================================ """ MLP module w/ dropout and configurable activation layer Hacked together by / Copyright 2020 Ross Wightman """ import mindspore as ms from mindspore import nn from mindspore import ops class Mlp(nn.Cell): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Dense(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Dense(hidden_features, out_features) self.drop = nn.Dropout(p=drop) def construct(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class MultiLayerMlp(nn.Cell): """ Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) if BN: self.layers = nn.CellList(nn.SequentialCell(nn.Dense(n, k), nn.BatchNorm1d(k)) for n, k in zip([input_dim] + h, h + [output_dim])) else: self.layers = nn.CellList(nn.Dense(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def construct(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class GluMlp(nn.Cell): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 self.fc1 = nn.Dense(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Dense(hidden_features // 2, out_features) self.drop = nn.Dropout(p=drop) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) def construct(self, x): x = self.fc1(x) x, gates = x.chunk(2, dim=-1) x = x * self.act(gates) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class GatedMlp(nn.Cell): """ MLP as used in gMLP """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, gate_layer=None, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Dense(in_features, hidden_features) self.act = act_layer() if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 # FIXME base reduction on gate property? else: self.gate = nn.Identity() self.fc2 = nn.Dense(hidden_features, out_features) self.drop = nn.Dropout(p=drop) def construct(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.gate(x) x = self.fc2(x) x = self.drop(x) return x class ConvMlp(nn.Cell): """ MLP using 1x1 convs that keeps spatial dims """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(p=drop) def construct(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x ================================================ FILE: artrackv2_mindspore/lib/models/component/norm.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : norm.py # Copyright (c) Skye-Song. All Rights Reserved import torch class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() # rsqrt(x): 1/sqrt(x), r: reciprocal bias = b - rm * scale return x * scale + bias ================================================ FILE: artrackv2_mindspore/lib/models/component/patch_embed.py ================================================ """ Image to Patch Embedding using Conv2d A convolution based approach to patchifying a 2D image w/ embedding projection. Based on the impl in https://github.com/google-research/vision_transformer Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from itertools import repeat import collections.abc # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) to_ntuple = _ntuple class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): super().__init__() self.flatten = flatten self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # BCHW -> BNC x = self.norm(x) return x ================================================ FILE: artrackv2_mindspore/lib/models/component/pos_embed.py ================================================ # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # Position embedding utils # -------------------------------------------------------- import numpy as np import torch # -------------------------------------------------------- # 2D sine-cosine position embedding # References: # Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py # MoCo v3: https://github.com/facebookresearch/moco-v3 # -------------------------------------------------------- def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): """ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) """ grid_h = np.arange(grid_size, dtype=np.float32) grid_w = np.arange(grid_size, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if cls_token: pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): assert embed_dim % 2 == 0 # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): """ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) """ assert embed_dim % 2 == 0 omega = np.arange(embed_dim // 2, dtype=float) omega /= embed_dim / 2. omega = 1. / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product emb_sin = np.sin(out) # (M, D/2) emb_cos = np.cos(out) # (M, D/2) emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb # -------------------------------------------------------- # Interpolate position embeddings for high-resolution # References: # DeiT: https://github.com/facebookresearch/deit # -------------------------------------------------------- def interpolate_pos_embed(model, checkpoint_model): if 'pos_embed' in checkpoint_model: pos_embed_checkpoint = checkpoint_model['pos_embed'] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.patch_embed.num_patches num_extra_tokens = model.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) checkpoint_model['pos_embed'] = new_pos_embed ================================================ FILE: artrackv2_mindspore/lib/models/component/weight_init.py ================================================ import torch import math import warnings from torch.nn.init import _calculate_fan_in_and_fan_out def _no_grad_trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b) def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == 'fan_in': denom = fan_in elif mode == 'fan_out': denom = fan_out elif mode == 'fan_avg': denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) elif distribution == "normal": tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') ================================================ FILE: artrackv2_mindspore/lib/models/layers/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/lib/models/layers/attn.py ================================================ import sys sys.path.append("/home/baiyifan/code/AR2_mindspore_cp/2stage") import mindspore as ms from mindspore import ops from mindspore import nn from mindspore.common.initializer import initializer,TruncatedNormal from lib.models.layers.rpe import generate_2d_concatenated_self_attention_relative_positional_encoding_index class Attention(nn.Cell): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., rpe=False, z_size=7, x_size=14): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_drop = nn.Dropout(p=proj_drop) self.rpe =rpe if self.rpe: relative_position_index = \ generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size], [x_size, x_size]) self.register_buffer("relative_position_index", relative_position_index) # define a parameter table of relative position bias self.relative_position_bias_table = ops.empty((num_heads,relative_position_index.max() + 1)) relative_position_bias_table_shape = self.relative_position_bias_table.shape self.relative_position_bias_table = initializer(TruncatedNormal(sigma=0.02),relative_position_bias_table_shape) self.relative_position_bias_table = ms.Parameter(self.relative_position_bias_table) def construct(self, x, mask=None, return_attention=False): # x: B, N, C # mask: [B, N, ] torch.bool B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.swapaxes(-2, -1)) * self.scale if self.rpe: relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0) attn += relative_position_bias if mask is not None: attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2), float('-inf'),) split_attn = False len_t = 49 if split_attn: attn_t = ops.softmax(attn[..., :len_t],axis=-1) attn_s = ops.softmax(attn[..., len_t:],axis=-1) attn = ops.cat([attn_t, attn_s], dim=-1) else: attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x = (attn @ v).swapaxes(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn else: return x class Attention_talking_head(nn.Cell): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., rpe=True, z_size=7, x_size=14): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = nn.Dropout(p=attn_drop) self.proj = nn.Dense(dim, dim) self.proj_l = nn.Dense(num_heads, num_heads) self.proj_w = nn.Dense(num_heads, num_heads) self.proj_drop = nn.Dropout(p=proj_drop) self.rpe = rpe if self.rpe: relative_position_index = \ generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size], [x_size, x_size]) self.register_buffer("relative_position_index", relative_position_index) # define a parameter table of relative position bias self.relative_position_bias_table = ops.empty((num_heads,relative_position_index.max() + 1)) relative_position_bias_table_shape = self.relative_position_bias_table.shape self.relative_position_bias_table=initializer(TruncatedNormal(sigma=0.02),relative_position_bias_table_shape) self.relative_position_bias_table=ms.Parameter(self.relative_position_bias_table) def construct(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = (q @ k.swapaxes(-2, -1)) if self.rpe: relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0) attn += relative_position_bias if mask is not None: attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2), float('-inf'),) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = ops.softmax(attn,axis=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).swapaxes(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x ================================================ FILE: artrackv2_mindspore/lib/models/layers/attn_blocks.py ================================================ import math import torch import torch.nn as nn from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_ from lib.models.layers.attn import Attention def candidate_elimination(attn: torch.Tensor, tokens: torch.Tensor, lens_t: int, keep_ratio: float, global_index: torch.Tensor, box_mask_z: torch.Tensor): """ Eliminate potential background candidates for computation reduction and noise cancellation. Args: attn (torch.Tensor): [B, num_heads, L_t + L_s, L_t + L_s], attention weights tokens (torch.Tensor): [B, L_t + L_s, C], template and search region tokens lens_t (int): length of template keep_ratio (float): keep ratio of search region tokens (candidates) global_index (torch.Tensor): global index of search region tokens box_mask_z (torch.Tensor): template mask used to accumulate attention weights Returns: tokens_new (torch.Tensor): tokens after candidate elimination keep_index (torch.Tensor): indices of kept search region tokens removed_index (torch.Tensor): indices of removed search region tokens """ lens_s = attn.shape[-1] - lens_t bs, hn, _, _ = attn.shape lens_keep = math.ceil(keep_ratio * lens_s) if lens_keep == lens_s: return tokens, global_index, None attn_t = attn[:, :, :lens_t, lens_t:] if box_mask_z is not None: box_mask_z = box_mask_z.unsqueeze(1).unsqueeze(-1).expand(-1, attn_t.shape[1], -1, attn_t.shape[-1]) # attn_t = attn_t[:, :, box_mask_z, :] attn_t = attn_t[box_mask_z] attn_t = attn_t.view(bs, hn, -1, lens_s) attn_t = attn_t.mean(dim=2).mean(dim=1) # B, H, L-T, L_s --> B, L_s # attn_t = [attn_t[i, :, box_mask_z[i, :], :] for i in range(attn_t.size(0))] # attn_t = [attn_t[i].mean(dim=1).mean(dim=0) for i in range(len(attn_t))] # attn_t = torch.stack(attn_t, dim=0) else: attn_t = attn_t.mean(dim=2).mean(dim=1) # B, H, L-T, L_s --> B, L_s # use sort instead of topk, due to the speed issue # https://github.com/pytorch/pytorch/issues/22812 sorted_attn, indices = torch.sort(attn_t, dim=1, descending=True) topk_attn, topk_idx = sorted_attn[:, :lens_keep], indices[:, :lens_keep] non_topk_attn, non_topk_idx = sorted_attn[:, lens_keep:], indices[:, lens_keep:] keep_index = global_index.gather(dim=1, index=topk_idx) removed_index = global_index.gather(dim=1, index=non_topk_idx) # separate template and search tokens tokens_t = tokens[:, :lens_t] tokens_s = tokens[:, lens_t:] # obtain the attentive and inattentive tokens B, L, C = tokens_s.shape # topk_idx_ = topk_idx.unsqueeze(-1).expand(B, lens_keep, C) attentive_tokens = tokens_s.gather(dim=1, index=topk_idx.unsqueeze(-1).expand(B, -1, C)) # inattentive_tokens = tokens_s.gather(dim=1, index=non_topk_idx.unsqueeze(-1).expand(B, -1, C)) # compute the weighted combination of inattentive tokens # fused_token = non_topk_attn @ inattentive_tokens # concatenate these tokens # tokens_new = torch.cat([tokens_t, attentive_tokens, fused_token], dim=0) tokens_new = torch.cat([tokens_t, attentive_tokens], dim=1) return tokens_new, keep_index, removed_index class CEBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, keep_ratio_search=1.0,): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.keep_ratio_search = keep_ratio_search def forward(self, x, global_index_template, global_index_search, mask=None, ce_template_mask=None, keep_ratio_search=None): x_attn, attn = self.attn(self.norm1(x), mask, True) x = x + self.drop_path(x_attn) lens_t = global_index_template.shape[1] removed_index_search = None if self.keep_ratio_search < 1 and (keep_ratio_search is None or keep_ratio_search < 1): keep_ratio_search = self.keep_ratio_search if keep_ratio_search is None else keep_ratio_search x, global_index_search, removed_index_search = candidate_elimination(attn, x, lens_t, keep_ratio_search, global_index_search, ce_template_mask) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, global_index_template, global_index_search, removed_index_search, attn class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, mask=None): x = x + self.drop_path(self.attn(self.norm1(x), mask)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x ================================================ FILE: artrackv2_mindspore/lib/models/layers/head.py ================================================ import sys import mindspore as ms import mindspore.nn as nn from mindspore import ops from mindspore import Tensor from mindspore.nn import Identity from mindspore.nn.probability.distribution import Categorical from lib.models.timm import * import copy from typing import Optional def top_k_top_p_filtering_batch(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k # ops.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices) # ...表示其他维度由计算机自行推断 for i in range(logits.shape[0]): indices_to_remove = logits[i] < ops.topk(logits[i], top_k)[0][..., -1, None] logits[i][indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷 if top_p > 0.0: for i in range(logits.shape[0]): sorted_logits, sorted_indices = ops.sort(logits[i], descending=True) # 对logits进行递减排序 cumulative_probs = ops.cumsum(ops.softmax(sorted_logits, axis=-1), axis=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[i][indices_to_remove] = filter_value return logits def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, freeze_bn=False): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) class Corner_Predictor(nn.Cell): """ Corner Predictor module""" def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False): super(Corner_Predictor, self).__init__() self.feat_sz = feat_sz self.stride = stride self.img_sz = self.feat_sz * self.stride '''top-left corner''' self.conv1_tl = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_tl = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_tl = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_tl = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_tl = nn.Conv2d(channel // 8, 1, kernel_size=1) '''bottom-right corner''' self.conv1_br = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_br = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_br = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_br = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_br = nn.Conv2d(channel // 8, 1, kernel_size=1) '''about coordinates and indexs''' self.indice = ops.arange(0, self.feat_sz).view(-1, 1) * self.stride # generate mesh-grid self.coord_x = self.indice.repeat((self.feat_sz, 1)).view((self.feat_sz * self.feat_sz,)).float() self.coord_y = self.indice.repeat((1, self.feat_sz)).view((self.feat_sz * self.feat_sz,)).float() def construct(self, x, return_dist=False, softmax=True): """ Forward pass with input x. """ score_map_tl, score_map_br = self.get_score_map(x) if return_dist: coorx_tl, coory_tl, prob_vec_tl = self.soft_argmax(score_map_tl, return_dist=True, softmax=softmax) coorx_br, coory_br, prob_vec_br = self.soft_argmax(score_map_br, return_dist=True, softmax=softmax) return ops.stack((coorx_tl, coory_tl, coorx_br, coory_br), axis=1) / self.img_sz, prob_vec_tl, prob_vec_br else: coorx_tl, coory_tl = self.soft_argmax(score_map_tl) coorx_br, coory_br = self.soft_argmax(score_map_br) return ops.stack((coorx_tl, coory_tl, coorx_br, coory_br), axis=1) / self.img_sz def get_score_map(self, x): # top-left branch x_tl1 = self.conv1_tl(x) x_tl2 = self.conv2_tl(x_tl1) x_tl3 = self.conv3_tl(x_tl2) x_tl4 = self.conv4_tl(x_tl3) score_map_tl = self.conv5_tl(x_tl4) # bottom-right branch x_br1 = self.conv1_br(x) x_br2 = self.conv2_br(x_br1) x_br3 = self.conv3_br(x_br2) x_br4 = self.conv4_br(x_br3) score_map_br = self.conv5_br(x_br4) return score_map_tl, score_map_br def soft_argmax(self, score_map, return_dist=False, softmax=True): """ get soft-argmax coordinate for a given heatmap """ score_vec = score_map.view((-1, self.feat_sz * self.feat_sz)) # (batch, feat_sz * feat_sz) prob_vec = ops.softmax(score_vec, axis=1) exp_x = ops.sum((self.coord_x * prob_vec), dim=1) exp_y = ops.sum((self.coord_y * prob_vec), dim=1) if return_dist: if softmax: return exp_x, exp_y, prob_vec else: return exp_x, exp_y, score_vec else: return exp_x, exp_y class CenterPredictor(nn.Cell, ): def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False): super(CenterPredictor, self).__init__() self.feat_sz = feat_sz self.stride = stride self.img_sz = self.feat_sz * self.stride # corner predict self.conv1_ctr = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_ctr = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_ctr = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_ctr = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_ctr = nn.Conv2d(channel // 8, 1, kernel_size=1) # size regress self.conv1_offset = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_offset = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_offset = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_offset = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_offset = nn.Conv2d(channel // 8, 2, kernel_size=1) # size regress self.conv1_size = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_size = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_size = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_size = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_size = nn.Conv2d(channel // 8, 2, kernel_size=1) for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def construct(self, x, gt_score_map=None): """ Forward pass with input x. """ score_map_ctr, size_map, offset_map = self.get_score_map(x) # assert gt_score_map is None if gt_score_map is None: bbox = self.cal_bbox(score_map_ctr, size_map, offset_map) else: bbox = self.cal_bbox(gt_score_map.unsqueeze(1), size_map, offset_map) return score_map_ctr, bbox, size_map, offset_map def cal_bbox(self, score_map_ctr, size_map, offset_map, return_score=False): max_score, idx = ops.max(score_map_ctr.flatten(1), axis=1, keepdim=True) idx_y = idx // self.feat_sz idx_x = idx % self.feat_sz idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) # bbox = ops.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2, # idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], axis=1) / self.feat_sz # cx, cy, w, h bbox = ops.cat([(idx_x.to(ms.float) + offset[:, :1]) / self.feat_sz, (idx_y.to(ms.float) + offset[:, 1:]) / self.feat_sz, size.squeeze(-1)], axis=1) if return_score: return bbox, max_score return bbox def get_pred(self, score_map_ctr, size_map, offset_map): max_score, idx = ops.max(score_map_ctr.flatten(1), axis=1, keepdim=True) idx_y = idx // self.feat_sz idx_x = idx % self.feat_sz idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) # bbox = ops.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2, # idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz return size * self.feat_sz, offset def get_score_map(self, x): def _sigmoid(x): y = ops.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4) return y # ctr branch x_ctr1 = self.conv1_ctr(x) x_ctr2 = self.conv2_ctr(x_ctr1) x_ctr3 = self.conv3_ctr(x_ctr2) x_ctr4 = self.conv4_ctr(x_ctr3) score_map_ctr = self.conv5_ctr(x_ctr4) # offset branch x_offset1 = self.conv1_offset(x) x_offset2 = self.conv2_offset(x_offset1) x_offset3 = self.conv3_offset(x_offset2) x_offset4 = self.conv4_offset(x_offset3) score_map_offset = self.conv5_offset(x_offset4) # size branch x_size1 = self.conv1_size(x) x_size2 = self.conv2_size(x_size1) x_size3 = self.conv3_size(x_size2) x_size4 = self.conv4_size(x_size3) score_map_size = self.conv5_size(x_size4) return _sigmoid(score_map_ctr), _sigmoid(score_map_size), score_map_offset class MLP(nn.Cell): """ Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) if BN: self.layers = nn.CellList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k)) for n, k in zip([input_dim] + h, h + [output_dim])) else: self.layers = nn.CellList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def construct(self, x): for i, layer in enumerate(self.layers): x = ops.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class SelfAttention(nn.Cell): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_pos_encoding_only=False): super(SelfAttention, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 if attn_pos_encoding_only: self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias) else: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_pos_encoding_only = attn_pos_encoding_only def construct(self, x, q_ape, k_ape, attn_pos): ''' Args: x (ms.Tensor): (B, L, C) q_ape (ms.Tensor | None): (1 or B, L, C), absolute positional encoding for q k_ape (ms.Tensor | None): (1 or B, L, C), absolute positional encoding for k attn_pos (ms.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding Returns: ms.Tensor: (B, L, C) ''' B, N, C = x.shape if self.attn_pos_encoding_only: assert q_ape is None and k_ape is None qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] else: q = x + q_ape if q_ape is not None else x q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = x + k_ape if k_ape is not None else x k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.swapaxes(-2, -1) attn = attn * self.scale if attn_pos is not None: attn = attn + attn_pos attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x = attn @ v x = x.swapaxes(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class CrossAttention(nn.Cell): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_pos_encoding_only=False): super(CrossAttention, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 if attn_pos_encoding_only: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias) else: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_pos_encoding_only = attn_pos_encoding_only def construct(self, q, kv, q_ape, k_ape, attn_pos): ''' Args: q (ms.Tensor): (B, L_q, C) kv (ms.Tensor): (B, L_kv, C) q_ape (ms.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q k_ape (ms.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k attn_pos (ms.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding Returns: ms.Tensor: (B, L_q, C) ''' B, q_N, C = q.shape kv_N = kv.shape[1] if self.attn_pos_encoding_only: assert q_ape is None and k_ape is None q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv[0], kv[1] else: q = q + q_ape if q_ape is not None else q q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = kv + k_ape if k_ape is not None else kv k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.swapaxes(-2, -1) attn = attn * self.scale if attn_pos is not None: attn = attn + attn_pos attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x = attn @ v x = x.swapaxes(1, 2).reshape(B, q_N, C) x = self.proj(x) x = self.proj_drop(x) return x class Mlp(nn.Cell): """ Multilayer perceptron.""" def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def construct(self, x): ''' Args: x (ms.Tensor): (B, L, C), input tensor Returns: ms.Tensor: (B, L, C), output tensor ''' x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class FeatureFusion(nn.Cell): def __init__(self, dim, num_heads, mlp_ratio=2., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=False): super(FeatureFusion, self).__init__() self.z_norm1 = norm_layer(dim) self.x_norm1 = norm_layer(dim) self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.z_norm2_1 = norm_layer(dim) self.z_norm2_2 = norm_layer(dim) self.x_norm2_1 = norm_layer(dim) self.x_norm2_2 = norm_layer(dim) self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) mlp_hidden_dim = int(dim * mlp_ratio) self.z_norm3 = norm_layer(dim) self.x_norm3 = norm_layer(dim) print(mlp_ratio) self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.drop_path = drop_path def construct(self, z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos): z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None, z_self_attn_pos)) x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None, x_self_attn_pos)) z = z + self.drop_path(self.z_x_cross_attention(self.z_norm2_1(z), self.x_norm2_1(x), None, None, z_x_cross_attn_pos)) x = x + self.drop_path(self.x_z_cross_attention(self.x_norm2_2(x), self.z_norm2_2(z), None, None, x_z_cross_attn_pos)) z = z + self.drop_path(self.z_mlp(self.z_norm3(z))) x = x + self.drop_path(self.x_mlp(self.x_norm3(x))) return z, x class FeatureFusionEncoder(nn.Cell): def __init__(self, feature_fusion_layers, z_pos_enc, x_pos_enc, z_rel_pos_index, x_rel_pos_index, z_x_rel_pos_index, x_z_rel_pos_index, z_rel_pos_bias_table, x_rel_pos_bias_table, z_x_rel_pos_bias_table, x_z_rel_pos_bias_table): super(FeatureFusionEncoder, self).__init__() self.layers = nn.CellList(feature_fusion_layers) self.z_pos_enc = z_pos_enc self.x_pos_enc = x_pos_enc self.register_buffer('z_rel_pos_index', z_rel_pos_index, False) self.register_buffer('x_rel_pos_index', x_rel_pos_index, False) self.register_buffer('z_x_rel_pos_index', z_x_rel_pos_index, False) self.register_buffer('x_z_rel_pos_index', x_z_rel_pos_index, False) self.z_rel_pos_bias_table = z_rel_pos_bias_table self.x_rel_pos_bias_table = x_rel_pos_bias_table self.z_x_rel_pos_bias_table = z_x_rel_pos_bias_table self.x_z_rel_pos_bias_table = x_z_rel_pos_bias_table #self.conv1 = ms.nn.Conv2d(384,768,1,1,0) #self.conv2 = ms.nn.Conv2d(768,768,2,1,1) #self.conv3 = ms.nn.Conv2d(768,384,1,1,0) #self.norm1 = ms.nn.LayerNorm(384) #self.norm2 = ms.nn.LayerNorm(768) #self.norm3 = ms.nn.LayerNorm(384) def construct(self, z, x, z_pos, x_pos): ''' Args: z (ms.Tensor): (B, L_z, C), template image feature tokens x (ms.Tensor): (B, L_x, C), search image feature tokens z_pos (ms.Tensor | None): (1 or B, L_z, C), optional positional encoding for z x_pos (ms.Tensor | None): (1 or B, L_x, C), optional positional encoding for x Returns: Tuple[ms.Tensor, ms.Tensor]: (B, L_z, C): template image feature tokens (B, L_x, C): search image feature tokens ''' # Support untied positional encoding only for simplicity assert z_pos is None and x_pos is None # untied positional encoding z_q_pos, z_k_pos = self.z_pos_enc() x_q_pos, x_k_pos = self.x_pos_enc() z_self_attn_pos = (z_q_pos @ z_k_pos.swapaxes(-2, -1)).unsqueeze(0) x_self_attn_pos = (x_q_pos @ x_k_pos.swapaxes(-2, -1)).unsqueeze(0) z_x_cross_attn_pos = (z_q_pos @ x_k_pos.swapaxes(-2, -1)).unsqueeze(0) x_z_cross_attn_pos = (x_q_pos @ z_k_pos.swapaxes(-2, -1)).unsqueeze(0) # relative positional encoding z_self_attn_pos = z_self_attn_pos + self.z_rel_pos_bias_table(self.z_rel_pos_index) x_self_attn_pos = x_self_attn_pos + self.x_rel_pos_bias_table(self.x_rel_pos_index) z_x_cross_attn_pos = z_x_cross_attn_pos + self.z_x_rel_pos_bias_table(self.z_x_rel_pos_index) x_z_cross_attn_pos = x_z_cross_attn_pos + self.x_z_rel_pos_bias_table(self.x_z_rel_pos_index) # x = self.norm1(x) # B,L,C = x.shape # x = x.permute(0,2,1).reshape(B,C,14,14) # x_temp = x # x = self.conv3(self.conv2((self.conv1(x)))) # x = x[:,:,1:,1:] # x = x+x_temp # x = x.reshape(B,C,L).permute(0,2,1) # x = self.norm3(x) for layer in self.layers: z, x = layer(z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos) return z, x class Learned2DPositionalEncoder(nn.Cell): def __init__(self, dim, w, h): super(Learned2DPositionalEncoder, self).__init__() self.w_pos = nn.Parameter(ops.empty(w, dim)) self.h_pos = nn.Parameter(ops.empty(h, dim)) trunc_normal_(self.w_pos, std=0.02) trunc_normal_(self.h_pos, std=0.02) def construct(self): w = self.w_pos.shape[0] h = self.h_pos.shape[0] return (self.w_pos[None, :, :] + self.h_pos[:, None, :]).view(h * w, -1) class Untied2DPositionalEncoder(nn.Cell): def __init__(self, dim, num_heads, w, h, scale=None, with_q=True, with_k=True): super(Untied2DPositionalEncoder, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.pos = Learned2DPositionalEncoder(dim, w, h) self.norm = nn.LayerNorm(dim) self.pos_q_linear = None self.pos_k_linear = None if with_q: self.pos_q_linear = nn.Linear(dim, dim) if with_k: self.pos_k_linear = nn.Linear(dim, dim) self.num_heads = num_heads head_dim = dim // num_heads self.scale = scale or head_dim ** -0.5 def construct(self): pos = self.norm(self.pos()) seq_len = pos.shape[0] if self.pos_q_linear is not None and self.pos_k_linear is not None: pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1) * self.scale pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1) return pos_q, pos_k elif self.pos_q_linear is not None: pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1) * self.scale return pos_q elif self.pos_k_linear is not None: pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1) return pos_k else: raise RuntimeError def generate_2d_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = ops.meshgrid(ops.arange(z_shape[0]), ops.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = ops.meshgrid(ops.arange(x_shape[0]), ops.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :] diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :] diff = ops.stack((diff_h, diff_w), axis=-1) _, indices = ops.unique(diff.view(-1, 2), return_inverse=True, dim=0) return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1]) class RelativePosition2DEncoder(nn.Cell): def __init__(self, num_heads, embed_size): super(RelativePosition2DEncoder, self).__init__() self.relative_position_bias_table = nn.Parameter(ops.empty((num_heads, embed_size))) trunc_normal_(self.relative_position_bias_table, std=0.02) def construct(self, attn_rpe_index): ''' Args: attn_rpe_index (ms.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size Returns: ms.Tensor: (1, num_heads, *) ''' return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0) class DropPathAllocator: def __init__(self, max_drop_path_rate, stochastic_depth_decay = True): self.max_drop_path_rate = max_drop_path_rate self.stochastic_depth_decay = stochastic_depth_decay self.allocated = [] self.allocating = [] def __enter__(self): self.allocating = [] def __exit__(self, exc_type, exc_val, exc_tb): if len(self.allocating) != 0: self.allocated.append(self.allocating) self.allocating = None if not self.stochastic_depth_decay: for depth_module in self.allocated: for module in depth_module: if isinstance(module, DropPath): module.drop_prob = self.max_drop_path_rate else: depth = self.get_depth() dpr = [x.item() for x in ops.linspace(0, self.max_drop_path_rate, depth)] assert len(dpr) == len(self.allocated) for drop_path_rate, depth_modules in zip(dpr, self.allocated): for module in depth_modules: if isinstance(module, DropPath): module.drop_prob = drop_path_rate def __len__(self): length = 0 for depth_modules in self.allocated: length += len(depth_modules) return length def increase_depth(self): self.allocated.append(self.allocating) self.allocating = [] def get_depth(self): return len(self.allocated) def allocate(self): if self.max_drop_path_rate == 0 or (self.stochastic_depth_decay and self.get_depth() == 0): drop_path_module = Identity() else: drop_path_module = DropPath() self.allocating.append(drop_path_module) return drop_path_module def get_all_allocated(self): allocated = [] for depth_module in self.allocated: for module in depth_module: allocated.append(module) return allocated def build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, dim, z_size, x_size, drop_path): z_shape = [z_size, z_size] x_shape = [x_size, x_size] encoder_layers = [] for i in range(encoder_layer): encoder_layers.append( FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop, drop_path=drop_path.allocate(), attn_pos_encoding_only=True) ) z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1]) x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1]) z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape) x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape) z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape) x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape) z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1) x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1) z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1) x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1) return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index, x_self_attn_rel_pos_index, z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index, z_self_attn_rel_pos_bias_table, x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table, x_z_cross_attn_rel_pos_bias_table) class TargetQueryDecoderLayer(nn.Cell): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm): super(TargetQueryDecoderLayer, self).__init__() self.norm_1 = norm_layer(dim) #self.self_attn1 = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop) self.self_attn1 = nn.MultiheadAttention(dim, num_heads, dropout=drop) self.norm_2_query = norm_layer(dim) self.norm_2_memory = norm_layer(dim) # self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop) self.multihead_attn = nn.MultiheadAttention(dim, num_heads, dropout=drop) self.norm_3 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlpz = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) #self.norm_4 = norm_layer(dim) #self.self_attn2 = nn.MultiheadAttention(dim, num_heads, dropout=drop) #self.norm_5_query = norm_layer(dim) #self.norm_5_memory = norm_layer(dim) # self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop) #self.multihead_attn2 = nn.MultiheadAttention(dim, num_heads, dropout=drop) #self.norm_6 = norm_layer(dim) #mlp_hidden_dim = int(dim * mlp_ratio) #self.mlpx = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.drop_path = drop_path def construct(self, query, memoryz, query_pos, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, ): ''' Args: query (ms.Tensor): (B, num_queries, C) memory (ms.Tensor): (B, L, C) query_pos (ms.Tensor): (1 or B, num_queries, C) memory_pos (ms.Tensor): (1 or B, L, C) Returns: ms.Tensor: (B, num_queries, C) ''' #memory = ops.cat((memoryx,memoryz),dim=1) tgt = query q = k = self.norm_1(query) + query_pos query = query + self.drop_path(self.self_attn1(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]) q2 = self.norm_2_query(query) + query_pos memory = memoryz k2 = self.norm_2_memory(memory).permute(1, 0 ,2) memory_in = memory.permute(1, 0 ,2) query = query + self.drop_path( self.multihead_attn(query=q2, key=k2, value=memory_in, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]) query = query + self.drop_path(self.mlpz(self.norm_3(query))) return query def _get_clones(module, N): return nn.CellList([copy.deepcopy(module) for i in range(N)]) class TargetQueryDecoderBlock(nn.Cell): def __init__(self, dim, decoder_layers, num_layer): super(TargetQueryDecoderBlock, self).__init__() self.layers = nn.CellList(decoder_layers) self.num_layers = num_layer self.norm = nn.LayerNorm(dim) def construct(self, tgt, z, query_pos: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None): ''' Args: z (ops.Tensor): (B, L_z, C) x (ms.Tensor): (B, L_x, C) Returns: ms.Tensor: (B, num_queries, C) ''' output = tgt for layer in self.layers: output = layer(output, z, query_pos, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask) output = self.norm(output) return output def build_decoder(decoder_layer, drop_path, dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate): num_layers = decoder_layer decoder_layers = [] for _ in range(num_layers): decoder_layers.append( TargetQueryDecoderLayer(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path.allocate())) drop_path.increase_depth() decoder = TargetQueryDecoderBlock(dim, decoder_layers, num_layers) return decoder def generate_square_subsequent_mask(sz): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ mask = (ops.triu(ops.ones(sz, sz)) == 1).swapaxes(0, 1) #for i in range(int(sz/4 - 1)): # j = i+1 # for k in range(4): # mask[j*4+k, 0:j*4] = 0 mask = mask.float().masked_fill(mask == 0, float( '-inf')).masked_fill(mask == 1, float(0.0)) return mask class Pix2Track(nn.Cell): def __init__(self, in_channel=64, feat_sz=20, feat_tz=10, stride=16, encoder_layer=3, decoder_layer=3, bins=400,num_heads=12, mlp_ratio=2, qkv_bias=True, drop_rate=0.0,attn_drop=0.0, drop_path=nn.Identity): super(Pix2Track, self).__init__() self.bins = bins self.word_embeddings = nn.Embedding(self.bins * 3 + 2, in_channel, padding_idx=self.bins * 3, max_norm=1, norm_type=2.0) print(self.bins) self.position_embeddings = nn.Embedding( 5, in_channel) self.prev_position_embeddings = nn.Embedding(5, in_channel) self.output_bias = ms.Parameter(ops.zeros(self.bins * 3 + 2)) #self.out_norm_cls = nn.LayerNorm(in_channel) self.identity_search = ms.Parameter(ops.zeros(1, 1, 768)) self.identity_search = trunc_normal_(self.identity_search, std=.02) self.encoder_layer = encoder_layer self.drop_path = drop_path self.tz = feat_tz * feat_tz self.sz = feat_sz * feat_sz trunc_normal_(self.word_embeddings.weight, std=.02) if self.encoder_layer > 0 : self.encoder = build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, in_channel, feat_tz, feat_sz, self.drop_path) else: self.encoder = None self.decoder = build_decoder(decoder_layer, self.drop_path, in_channel, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, feat_tz, feat_sz) def construct(self, zx_feat, pos_z, pos_x, identity, seqs_input=None, head_type=None, stage=None, search_feature=None): emb_weight = self.word_embeddings.weight.clone() share_weight = emb_weight.T z_feat = zx_feat[:, :self.tz] x_feat = zx_feat[:, self.tz:] z_pos = None x_pos = None out_list = [] bs = zx_feat.shape[0] if self.encoder != None: z_feat, x_feat = self.encoder(z_feat, x_feat, None, None) output_x_feat = x_feat.clone() #print("this is original x_feat") #print(x_feat) #if search_feature == None: #print("I input none") # x_feat = ops.cat((x_feat, x_feat), dim=1) # else: #print("i input something") # x_feat = ops.cat((x_feat, search_feature), axis=1) #print("this is train_x_feat") #print(x_feat) #print(x_feat.shape) #print(x_feat) #print(x_feat.shape) #print(stage) if stage == None: seqs_input = seqs_input.to(ms.int64).to(zx_feat.device) tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) query_embed_ = self.position_embeddings.weight.unsqueeze(1) prev_embed = self.prev_position_embeddings.weight.unsqueeze(1) query_embed = ops.cat([prev_embed, query_embed_], axis=0) query_embed = query_embed.repeat(1, bs, 1) #print(tgt.shape) decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search, query_embed[:len(tgt)], tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device)) #decoder_feat = self.out_norm_cls(decoder_feat) at = ops.matmul(decoder_feat_cls, share_weight) at = at + self.output_bias output = {'feat': at, "state": "train"} #print("dododo!") else: b = seqs_input #b = seqs_input.unsqueeze(0) #print(b) a = ops.ones(bs, 1) * self.bins * 3 #print(a) a = a.to(b) #print(a.shape) #print(b.shape) c = ops.cat([b, a], axis=1) #c = a #print(c) #print(c) bs_lst = bs / 2 seqs_input = c.to(zx_feat.device).to(ms.int32) #print(seqs_input) #print("may i do this?") for i in range(5): tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) #print("may i do do do!") query_embed_ = self.position_embeddings.weight.unsqueeze(1) prev_embed = self.prev_position_embeddings.weight.unsqueeze(0).repeat(4,1,1).permute(1,0,2).reshape(4*5, -1).unsqueeze(1) query_embed = ops.cat([prev_embed, query_embed_], axis=0) #query_embed = query_embed_.repeat(1, bs, 1) query_embed = query_embed.repeat(1, bs, 1) #print(tgt.shape) #print(query_embed.shape) #print(len(tgt)) #print(z_feat.shape) #print(x_feat.shape) decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search, query_embed[:len(tgt)], tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device)) # print(decoder_feat_cls) #decoder_feat_cls = self.out_norm_cls(decoder_feat_cls) out = ops.matmul(decoder_feat_cls.swapaxes(0, 1)[:, -1, :], share_weight) + self.output_bias if i == 4: temp = ops.matmul(decoder_feat_cls, share_weight) + self.output_bias # temp = temp.softmax(-1) #out_logits = top_k_top_p_filtering_batch(out, 0, 0.4) #next_token = seqs_input[:, -1:].clone() #for j in range(next_token.shape[0]): # next_token[j] = ops.multinomial(ops.softmax(out_logits[j].squeeze(0), axis=-1), num_samples=1) #out = out.softmax(-1) #value, extra_seq = out.topk(axis=-1, k=1)[0], out.topk(axis=-1, k=1)[1] #seqs_input = ops.cat([seqs_input, next_token], axis=-1) #if i == 0: # seqs_output = next_token # values = value #else: # seqs_output = ops.cat([seqs_output, next_token], axis=-1) # values = ops.cat([values, value], axis=-1) out_list.append(out.unsqueeze(0)) out_val = ops.softmax(out[:, :self.bins*3],axis=-1) out = ops.softmax(out,axis=-1) if head_type == "half": #print("can i do that?") if i <= 3: prob_out = out_val else: prob_out = out prob = Categorical(prob_out) max_indicies = ops.argmax(prob_out, -1) samplex_indices = prob.sample() #temp_bs = len(max_indicies) // 2 #assert len(max_indicies) % 2 == 0 selected_indices = ops.cat([max_indicies], axis=0) for j in range(bs): if j == 0 : value = prob_out[j, max_indicies[j]].unsqueeze(0) else: value = ops.cat([value, prob_out[j, max_indicies[j]].unsqueeze(0)], axis=0) # else: # value = ops.cat([value, prob_out[j, samplex_indices[j]].unsqueeze(0)], axis=0) selected_indices = selected_indices.unsqueeze(1) value = value.unsqueeze(1) seqs_input = ops.cat([seqs_input, selected_indices], axis=-1) if i == 0: seqs_output = selected_indices values = value else: seqs_output = ops.cat([seqs_output, selected_indices], axis=-1) values = ops.cat([values, value], axis=-1) continue value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1] seqs_input = ops.cat([seqs_input, extra_seq], axis=-1) if i == 0: seqs_output = extra_seq values = value else: seqs_output = ops.cat([seqs_output, extra_seq], axis=-1) values = ops.cat([values, value], axis=-1) #print(seqs_input) #print(seqs_input) #print(x_feat.shape) #print(z_feat.shape) #print(seqs_input) if not(not out_list): feat = ops.cat(out_list) #print(seqs_input) output = {'seqs': seqs_output, 'class': values, 'feat': feat, "state": "val/test", "x_feat": output_x_feat.detach()} return output def build_box_head(cfg, hidden_dim): stride = cfg.MODEL.BACKBONE.STRIDE if cfg.MODEL.HEAD.TYPE == "MLP": mlp_head = MLP(hidden_dim, hidden_dim, 4, 3) # dim_in, dim_hidden, dim_out, 3 layers return mlp_head elif "CORNER" in cfg.MODEL.HEAD.TYPE: feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) channel = getattr(cfg.MODEL, "NUM_CHANNELS", 256) print("head channel: %d" % channel) if cfg.MODEL.HEAD.TYPE == "CORNER": corner_head = Corner_Predictor(inplanes=cfg.MODEL.HIDDEN_DIM, channel=channel, feat_sz=feat_sz, stride=stride) else: raise ValueError() return corner_head elif cfg.MODEL.HEAD.TYPE == "CENTER": in_channel = hidden_dim out_channel = cfg.MODEL.HEAD.NUM_CHANNELS feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) center_head = CenterPredictor(inplanes=in_channel, channel=out_channel, feat_sz=feat_sz, stride=stride) return center_head elif cfg.MODEL.HEAD.TYPE == "PIX": in_channel = hidden_dim feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) feat_tz = int(cfg.DATA.TEMPLATE.SIZE / stride) decoder_layer = cfg.MODEL.DECODER_LAYER encoder_layer = cfg.MODEL.ENCODER_LAYER bins = cfg.MODEL.BINS num_heads = cfg.MODEL.NUM_HEADS mlp_ratio = cfg.MODEL.MLP_RATIO qkv_bias = cfg.MODEL.QKV_BIAS drop_rate = cfg.MODEL.DROP_RATE attn_drop = cfg.MODEL.ATTN_DROP drop_path = cfg.MODEL.DROP_PATH drop_path_allocator = DropPathAllocator(drop_path) pix_head = Pix2Track(in_channel=in_channel, feat_sz=feat_sz, feat_tz=feat_tz, stride=stride, encoder_layer=encoder_layer, decoder_layer=decoder_layer, bins=bins, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate, attn_drop=attn_drop, drop_path=drop_path_allocator) return pix_head else: raise ValueError("HEAD TYPE %s is not supported." % cfg.MODEL.HEAD_TYPE) ================================================ FILE: artrackv2_mindspore/lib/models/layers/mask_decoder.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : vit_decoder.py # Copyright (c) Skye-Song. All Rights Reserved import mindspore as ms import mindspore.nn as nn from mindspore import ops from mindspore import Tensor import sys from lib.utils.box_ops import box_xywh_to_cxywh, box_cxcywh_to_xyxy from lib.models.component.block import Block from einops import rearrange from lib.utils.image import * from mindspore.common.initializer import initializer,Normal,XavierUniform,Constant class MaskDecoder(nn.Cell): def __init__(self, mask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, pool_size=8, mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False): super().__init__() self.mask_ratio = mask_ratio self.num_patches = num_patches self.patch_size = patch_size self.decoder_embed = nn.Dense(embed_dim, decoder_embed_dim, has_bias=True) self.mask_token = ms.Parameter(ops.zeros((1, 1, decoder_embed_dim))) self.decoder_pos_embed = ms.Parameter(ops.zeros((1, num_patches, decoder_embed_dim)), requires_grad=False) # fixed sin-cos embedding self.decoder_blocks = nn.CellList([ Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer) for i in range(decoder_depth)]) decoder_embed_dim_tuple=decoder_embed_dim if isinstance(decoder_embed_dim,int): decoder_embed_dim_tuple=tuple([decoder_embed_dim]) self.decoder_norm = norm_layer(decoder_embed_dim_tuple) self.decoder_pred = nn.Dense(decoder_embed_dim, patch_size ** 2 * 3, has_bias=True) # decoder to patch self.norm_pix_loss = norm_pix_loss def random_masking(self, x): """ Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. x: [N, L, D], sequence """ N, L, D = x.shape # batch, length, dim len_keep = int(L * (1 - self.mask_ratio)) noise = ops.rand(N, L, device=x.device) # noise in [0, 1] # sort noise for each sample ids_shuffle = ops.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = ops.argsort(ids_shuffle, dim=1) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] x_keep = ops.gather_elements(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) # generate the binary mask: 0 is keep, 1 is remove mask = ops.ones([N, L], device=x.device) mask[:, :len_keep] = 0 # unshuffle to get the binary mask mask = ops.gather_elements(mask, dim=1, index=ids_restore) # get the masked x mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] - x_keep.shape[1], 1) x_ = ops.cat([x_keep, mask_tokens], axis=1) # no cls token x_masked = ops.gather_elements(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle return x_masked, mask def forward_decoder(self, x, eval=False): # embed tokens x = self.decoder_embed(x) mask = None # append mask tokens to sequence if not eval: x, mask = self.random_masking(x) # add pos embed x = x + self.decoder_pos_embed # apply Transformer blocks for blk in self.decoder_blocks: x = blk(x) x = self.decoder_norm(x) # predictor projection x = self.decoder_pred(x) return x, mask def unpatchify(self, x): """ x: (N, L, patch_size**2 *3) imgs: (N, 3, H, W) """ p = self.patch_size h = w = int(x.shape[1] ** .5) assert h * w == x.shape[1] x = x.reshape((x.shape[0], h, w, p, p, 3)) x = ops.permute(x, (0,5,1,3,2,4)) imgs = x.reshape((x.shape[0], 3, h * p, h * p)) return imgs def patchify(self, imgs): """ imgs: (N, 3, H, W) x: (N, L, patch_size**2 *3) """ p = self.patch_size assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 h = w = imgs.shape[2] // p x = imgs.reshape((imgs.shape[0], 3, h, p, w, p)) x = ops.permute(x, (0,2,4,3,5,1)) x = x.reshape((imgs.shape[0], h * w, p ** 2 * 3)) return x def forward_loss(self, imgs, pred, mask=None): """ imgs: [N, 3, H, W] pred: [N, L, p*p*3] mask: [N, L], 0 is keep, 1 is remove, """ target = self.patchify(imgs) if self.norm_pix_loss: mean = target.mean(dim=-1, keepdims=True) var = target.var(dim=-1, keepdims=True) target = (target - mean) / (var + 1.e-6) ** .5 loss = (pred - target) ** 2 loss = loss.mean(dim=-1) # [N, L], mean loss per patc if mask == None: loss = loss.sum() / pred.shape[1] / pred.shape[0] # mean loss on removed patches else: loss = loss.sum() / pred.shape[1] / pred.shape[0] return loss def construct(self, x, images=None, gt_bboxes=None, eval=False,): x_numpy = x.asnumpy() x_numpy = rearrange(x_numpy, 'b c h w -> b (h w) c') x = Tensor(x_numpy) pred, mask = self.forward_decoder(x, eval) # [N, L, p*p*3] if eval: return self.unpatchify(pred) if mask != None: loss = self.forward_loss(imgs=images, pred=pred, mask=mask) else: loss = self.forward_loss(imgs=images, pred=pred) pred = self.unpatchify(pred) return pred, loss def mask_decoder(): model = MaskDecoder( mask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False) return model def build_maskdecoder(cfg): pool_size = int(cfg.DATA.TEMPLATE.SIZE / cfg.MODEL.BACKBONE.PATCHSIZE) num_patches = (cfg.DATA.TEMPLATE.SIZE // cfg.MODEL.BACKBONE.PATCHSIZE) ** 2 model = MaskDecoder( mask_ratio=cfg.MODEL.DECODER.MASK_RATIO, patch_size=cfg.MODEL.BACKBONE.PATCHSIZE, num_patches=num_patches, embed_dim=cfg.MODEL.BACKBONE.EMBEDDIM, decoder_embed_dim=cfg.MODEL.DECODER.EMBEDDIM, decoder_depth=cfg.MODEL.DECODER.DEPTH, decoder_num_heads=cfg.MODEL.DECODER.NUMHEADS, pool_size=pool_size, mlp_ratio=cfg.MODEL.DECODER.MLPRATIO, norm_layer=nn.LayerNorm, norm_pix_loss=False) return model ================================================ FILE: artrackv2_mindspore/lib/models/layers/patch_embed.py ================================================ import sys sys.path.append("/home/baiyifan/code/AR2_mindspore_cp/2stage") import mindspore.nn as nn from mindspore import ops from lib.models.timm import to_2tuple class PatchEmbed(nn.Cell): """ 2D Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size,pad_mode='valid',has_bias=True) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def construct(self, x): # allow different input size # B, C, H, W = x.shape # _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") # _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") x = self.proj(x) if self.flatten: x = ops.flatten(x,start_dim=2) x = x.swapaxes(1, 2) # BCHW -> BNC x = self.norm(x) return x ================================================ FILE: artrackv2_mindspore/lib/models/layers/rpe.py ================================================ import torch import torch.nn as nn from timm.models.layers import trunc_normal_ def generate_2d_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :] diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :] diff = torch.stack((diff_h, diff_w), dim=-1) _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0) return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1]) def generate_2d_concatenated_self_attention_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h)) concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w)) diff_h = concatenated_2d_index_h[:, None] - concatenated_2d_index_h[None, :] diff_w = concatenated_2d_index_w[:, None] - concatenated_2d_index_w[None, :] z_len = z_shape[0] * z_shape[1] x_len = x_shape[0] * x_shape[1] a = torch.empty((z_len + x_len), dtype=torch.int64) a[:z_len] = 0 a[z_len:] = 1 b=a[:, None].repeat(1, z_len + x_len) c=a[None, :].repeat(z_len + x_len, 1) diff = torch.stack((diff_h, diff_w, b, c), dim=-1) _, indices = torch.unique(diff.view((z_len + x_len) * (z_len + x_len), 4), return_inverse=True, dim=0) return indices.view((z_len + x_len), (z_len + x_len)) def generate_2d_concatenated_cross_attention_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h)) concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w)) diff_h = x_2d_index_h[:, None] - concatenated_2d_index_h[None, :] diff_w = x_2d_index_w[:, None] - concatenated_2d_index_w[None, :] z_len = z_shape[0] * z_shape[1] x_len = x_shape[0] * x_shape[1] a = torch.empty(z_len + x_len, dtype=torch.int64) a[: z_len] = 0 a[z_len:] = 1 c = a[None, :].repeat(x_len, 1) diff = torch.stack((diff_h, diff_w, c), dim=-1) _, indices = torch.unique(diff.view(x_len * (z_len + x_len), 3), return_inverse=True, dim=0) return indices.view(x_len, (z_len + x_len)) class RelativePosition2DEncoder(nn.Module): def __init__(self, num_heads, embed_size): super(RelativePosition2DEncoder, self).__init__() self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size))) trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, attn_rpe_index): ''' Args: attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size Returns: torch.Tensor: (1, num_heads, *) ''' return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0) ================================================ FILE: artrackv2_mindspore/lib/models/layers/self_practice.py ================================================ import torch import mindspore as ms import torch.nn as nn import mindspore.nn as msnn from mindspore import ops import numpy as np x = np.random.randn(3,5) x1 = torch.tensor(x) x2 = ms.tensor(x) length1 = torch.tensor([[2,4,1],[3,1,0],[4,2,2]]) length2 = ms.tensor([[2,4,1],[3,1,0],[4,2,2]]) y1 = torch.gather(x1,dim=1,index = length1) y2 = ops.gather_elements(x2,dim=1,index= length2) print(y1) print(y2) ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/Vit_model_test.py ================================================ import sys sys.path.append("/home/djh/python-code/Artrackv2/2stage") from lib.models.ostrack.vit import * from lib.test.evaluation.tracker import Tracker from lib.models.layers.mask_decoder import build_maskdecoder from lib.models.layers.head import DropPathAllocator tracker = Tracker('ostrack', '2stage_256_got', 'got10k_test', None) param = tracker.get_parameters() cfg = param.cfg patch_start_index = 1 kwargs = {'patch_size': 16, 'embed_dim': 768, 'depth': 12, 'num_heads': 12, 'drop_path_rate': 0.1} model = VisionTransformer(**kwargs) model.finetune_track(cfg=cfg, patch_start_index=patch_start_index) cross_2_decoder = build_maskdecoder(cfg) drop_path = cfg.MODEL.DROP_PATH drop_path_allocator = DropPathAllocator(drop_path) num_heads = cfg.MODEL.NUM_HEADS mlp_ratio = cfg.MODEL.MLP_RATIO qkv_bias = cfg.MODEL.QKV_BIAS drop_rate = cfg.MODEL.DROP_RATE attn_drop = cfg.MODEL.ATTN_DROP #def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False): score_mlp = build_score_decoder(cfg) cover_mlp = build_score_decoder(cfg) model = OSTrack( backbone, #decoder, cross_2_decoder, score_mlp, #cover_mlp, ) for name,param in model.parameters_and_names(): print (param.name) ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/__init__.py ================================================ from .ostrack import build_ostrack ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/base_backbone.py ================================================ from functools import partial import mindspore as ms import mindspore.nn as nn import mindspore.ops as ops import sys sys.path.append("/home/baiyifan/code/AR2_mindspore_cp/2stage") from lib.models.timm import * from lib.models.layers.patch_embed import PatchEmbed from lib.models.ostrack.utils import combine_tokens, recover_tokens import time def generate_square_subsequent_mask(sz, sx, ss): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ # 0 means mask, 1 means visible sum = sz + sx + ss mask = (ops.triu(ops.ones((sum, sum))) == 1).swapaxes(0, 1) mask[:, :] = 0 mask[:int(sz/2), :int(sz/2)] = 1 #template self mask[int(sz/2):sz, int(sz/2):sz] = 1 # dt self mask[int(sz/2):sz, sz:sz+sx] = 1 # dt search mask[int(sz / 2):sz, -1] = 1 # dt search mask[sz:sz+sx, :sz+sx] = 1 # sr dt-t-sr mask[sz+sx:, :] = 1 # co dt-t-sr-co # mask[sz+sx:, :sz] = 0 return ~mask class BaseBackbone(nn.Cell): def __init__(self): super().__init__() # for original ViT self.pos_embed = None self.img_size = [224, 224] self.patch_size = 16 self.embed_dim = 384 self.cat_mode = 'direct' self.pos_embed_z = None self.pos_embed_x = None self.bins = 400 in_channel = 768 self.range = 2 self.word_embeddings = nn.Embedding(self.bins * self.range + 6, in_channel, padding_idx=self.bins * self.range+4) # mindspore的nn.Embedding中没有max_norm,norm_type,所以只能删去参数max_norm=1, norm_type=2.0 print(self.bins) self.position_embeddings = nn.Embedding( 5, in_channel) self.output_bias = ms.Parameter(ops.zeros(self.bins * self.range + 6)) self.prev_position_embeddings = nn.Embedding(7 * 4, in_channel) self.template_segment_pos_embed = None self.search_segment_pos_embed = None self.return_inter = False self.return_stage = [2, 5, 8, 11] self.add_cls_token = False self.add_sep_seg = False def finetune_track(self, cfg, patch_start_index=1): search_size = to_2tuple(cfg.DATA.SEARCH.SIZE) template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE) new_patch_size = cfg.MODEL.BACKBONE.STRIDE self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE self.return_inter = cfg.MODEL.RETURN_INTER self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG # resize patch embedding if new_patch_size != self.patch_size: print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!') old_patch_embed = {} for name, param in self.patch_embed.named_parameters(): if 'weight' in name: param = ops.interpolate(param, size=(new_patch_size, new_patch_size), mode='bicubic', align_corners=False) param = ms.Parameter(param) old_patch_embed[name] = param print("Attention:old_patch_embed:",old_patch_embed) self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3, embed_dim=self.embed_dim) self.patch_embed.proj.bias = old_patch_embed['proj.bias'] self.patch_embed.proj.weight = old_patch_embed['proj.weight'] # for patch embedding patch_pos_embed = self.pos_embed[:, patch_start_index:, :] patch_pos_embed = patch_pos_embed.swapaxes(1, 2) B, E, Q = patch_pos_embed.shape P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W) # for search region H, W = search_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size search_patch_pos_embed = ops.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) search_patch_pos_embed = ops.flatten(search_patch_pos_embed,start_dim=2) search_patch_pos_embed = search_patch_pos_embed.swapaxes(1, 2) # for template region H, W = template_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size template_patch_pos_embed = ops.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) template_patch_pos_embed = ops.flatten(template_patch_pos_embed,start_dim=2).swapaxes(1, 2) self.pos_embed_z = ms.Parameter(template_patch_pos_embed) self.pos_embed_z0 = ms.Parameter(template_patch_pos_embed) self.pos_embed_z1 = ms.Parameter(template_patch_pos_embed) self.pos_embed_x = ms.Parameter(search_patch_pos_embed) # for cls token (keep it but not used) if self.add_cls_token and patch_start_index > 0: cls_pos_embed = self.pos_embed[:, 0:1, :] self.cls_pos_embed = ms.Parameter(cls_pos_embed) # separate token and segment token if self.add_sep_seg: self.template_segment_pos_embed = ms.Parameter(ops.zeros((1, 1, self.embed_dim))) self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02) self.search_segment_pos_embed = ms.Parameter(ops.zeros((1, 1, self.embed_dim))) self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02) # self.cls_token = None # self.pos_embed = None if self.return_inter: for i_layer in self.fpn_stage: if i_layer != 11: norm_layer = partial(nn.LayerNorm, eps=1e-6) layer = norm_layer(self.embed_dim) layer_name = f'norm{i_layer}' self.add_module(layer_name, layer) def forward_features(self, z_0, z_1_feat, x, identity, seqs_input): share_weight = self.word_embeddings.embedding_table.T out_list = [] begin = self.bins * self.range begin_2 = self.bins * self.range + 1 begin_3 = self.bins * self.range + 2 begin_4 = self.bins * self.range + 3 score = self.bins * self.range + 5 B, H, W = x.shape[0], x.shape[2], x.shape[3] a = ops.cat((ops.ones((B, 1),dtype=x.dtype) * begin, ops.ones((B, 1),dtype=x.dtype) * begin_2, ops.ones((B, 1),dtype=x.dtype) * begin_3, ops.ones((B, 1),dtype=x.dtype) * begin_4, ops.ones((B, 1),dtype=x.dtype) * score), axis=1) b = seqs_input # c = ops.cat([a], axis=1) c = ops.cat([b, a], axis=1) seqs_input_ = c.to(ms.int64) output_x_feat = x.copy() # original:output_x_feat = x.clone() tgt = self.word_embeddings(seqs_input_).permute(1, 0, 2) x = self.patch_embed(x) z_0 = self.patch_embed(z_0) z_1 = z_1_feat s_x = x.shape[1] s_z = z_0.shape[1] + z_1.shape[1] s_s = seqs_input.shape[1] z_0 += identity[:, 0, :].tile((B, self.pos_embed_z.shape[1], 1)) z_1 += identity[:, 1, :].tile((B, self.pos_embed_z.shape[1], 1)) x += identity[:, 2, :].tile((B, self.pos_embed_x.shape[1], 1)) query_embed_ = self.position_embeddings.embedding_table.unsqueeze(1) prev_embed_ = self.prev_position_embeddings.embedding_table.unsqueeze(1) query_embed = ops.cat([prev_embed_, query_embed_], axis=0) #query_embed = ops.cat([query_embed_], axis=0) query_embed = query_embed.tile((1, B, 1)) tgt = tgt.swapaxes(0, 1) query_embed = query_embed.swapaxes(0, 1) # print(self.pos_embed_z0.value()) # print(self.pos_embed_z1.value()) # print(self.pos_embed_x.value()) z_0 += self.pos_embed_z0 z_1 += self.pos_embed_z1 x += self.pos_embed_x s_s = seqs_input_.shape[1] mask = generate_square_subsequent_mask(s_z, s_x, s_s) tgt += query_embed[:, :tgt.shape[1]] z = ops.cat((z_0, z_1), axis=1) zx = combine_tokens(z, x, mode=self.cat_mode) zxs = ops.cat((zx, tgt), axis=1) zxs = self.pos_drop(zxs) m1 = zxs[:, -5:-1] for j, blk in enumerate(self.blocks): zxs = blk(zxs, padding_mask=mask) zxs_numpy = zxs.numpy() m3 = zxs[:, -5:-1] for j, blk in enumerate(self.extension): zxs = blk(zxs, padding_mask=mask) lens_z = self.pos_embed_z.shape[1] lens_x = self.pos_embed_x.shape[1] z_0_feat = zxs[:, :lens_z] z_1_feat = zxs[:, lens_z:lens_z*2] x_feat = zxs[:, lens_z*2:lens_z*2+lens_x] m2 = zxs[:, -5:-1] x_out = self.norm(zxs[:, -5:-1]) score_feat = zxs[:, -1] seq_feat = x_out at = ops.matmul(x_out, share_weight) out = at + self.output_bias temp = out.swapaxes(0, 1) out_list.append(out.unsqueeze(0)) out = ops.softmax(out,-1) value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1] for i in range(4): value, extra_seq = out[:, i, :].topk(dim=-1, k=1)[0], out[:, i, :].topk(dim=-1, k=1)[1] if i == 0: seqs_output = extra_seq values = value else: seqs_output = ops.cat([seqs_output, extra_seq], axis=-1) values = ops.cat([values, value], axis=-1) output = {'seqs': seqs_output, 'class': values, 'feat': temp, "state": "val/test", "x_feat": ops.stop_gradient(output_x_feat), "seq_feat": seq_feat} return output, z_0_feat, z_1_feat, x_feat, score_feat def construct(self, z_0, z_1_feat, x, identity, seqs_input, **kwargs): """ Joint feature extraction and relation modeling for the basic ViT backbone. Args: z (ops.Tensor): template feature, [B, C, H_z, W_z] x (ops.Tensor): search region feature, [B, C, H_x, W_x] Returns: x (ops.Tensor): merged template and search region feature, [B, L_z+L_x, C] attn : None """ output = self.forward_features(z_0, z_1_feat, x, identity, seqs_input) return output ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/load_parameter_test.py ================================================ import torch import sys sys.path.append("/home/djh/python-code/Artrackv2/2stage") from mindspore import Tensor from mindspore import save_checkpoint def get_keymap_txt(pth_file): # 如果是tar压缩文件。需要执行下面这段代码 checkpoint = torch.load(pth_file,map_location="cpu") state_dict = checkpoint['net'] # end # # 否则就是执行下面这段代码 # map_path = pth_file.split('.')[0] + '_key_map.txt' # map_file = open(map_path, 'w') # state_dict = torch.load(pth_file, map_location=torch.device('cpu')) # if 'model_state' in state_dict: # state_dict = state_dict['model_state'] # elif 'module' in state_dict: # state_dict = state_dict['module'] # elif 'model' in state_dict: # state_dict = state_dict['model'] # # end list = [] dict = {} for name,value in state_dict.items(): print(name) if name == "cross_2_decoder.decoder_blocks.0.norm1.weight": print(state_dict[name]) new_name = name.replace("norm1.weight","norm1.gamma") new_name = new_name.replace("norm1.bias","norm1.beta") new_name = new_name.replace("norm2.weight","norm2.gamma") new_name = new_name.replace("norm2.bias","norm2.beta") new_name = new_name.replace("decoder_norm.weight","decoder_norm.gamma") new_name = new_name.replace("decoder_norm.bias","decoder_norm.beta") new_name = new_name.replace("word_embeddings.weight", "word_embeddings.embedding_table") new_name = new_name.replace("position_embeddings.weight", "position_embeddings.embedding_table") new_name = new_name.replace("prev_position_embeddings.weight", "prev_position_embeddings.embedding_table") new_name = new_name.replace("norm.weight","norm.gamma") new_name = new_name.replace("norm.bias","norm.beta") list.append(new_name) ms_params_list=[] for name,value in dict.items(): param_dict={} param_dict['name'] = name param_dict['data'] = Tensor(value.numpy()) if name=="backbone.pos_embed_z0": print(param_dict['data']) ms_params_list.append(param_dict) # 要生成转换文件的话,就执行下面这句注释代码 # save_checkpoint(ms_params_list, "/home/djh/python-code/Artrackv2/checkpoint1.ckpt") pth_file = "/mnt/d/Download/OSTrack_ep0030.pth.tar" get_keymap_txt(pth_file) ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/ostrack.py ================================================ """ Basic OSTrack model. """ import sys from copy import deepcopy import math import os from typing import List import mindspore as ms from mindspore import nn from lib.models.timm import * from lib.models.ostrack.vit import vit_base_patch16_224, vit_large_patch16_224 from lib.models.ostrack.vit_ce import vit_large_patch16_224_ce, vit_base_patch16_224_ce from lib.models.layers.mask_decoder import build_maskdecoder from lib.models.layers.head import build_decoder, MLP, DropPathAllocator class OSTrack(nn.Cell): """ This is the base class for OSTrack """ def __init__(self, transformer, #decoder, cross_2_decoder, score_mlp, #cover_mlp, ): """ Initializes the model. Parameters: transformer: torch module of the transformer architecture. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.backbone = transformer self.score_mlp = score_mlp self.identity = ms.Parameter(ops.zeros((1, 3, 768))) self.identity = trunc_normal_(self.identity, std=.02) self.cross_2_decoder = cross_2_decoder def construct(self, template: ms.Tensor, dz_feat: ms.Tensor, search: ms.Tensor, ce_template_mask=None, ce_keep_rate=None, return_last_attn=False, seq_input=None, head_type=None, stage=None, search_feature=None, target_in_search_img=None, gt_bboxes=None, ): template_0 = template[:, 0] out, z_0_feat, z_1_feat, x_feat, score_feat = self.backbone(z_0=template_0, z_1_feat=dz_feat, x=search, identity=self.identity, seqs_input=seq_input, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate, return_last_attn=return_last_attn,) seq_feat = out['seq_feat'].permute(1, 0 ,2) share_weight = self.backbone.word_embeddings.embedding_table[:800, :].unsqueeze(0).tile((seq_feat.shape[1], 1, 1)) pos = self.backbone.position_embeddings.embedding_table.unsqueeze(0).tile((seq_feat.shape[1], 1, 1)).permute(1, 0 ,2) score = self.score_mlp(score_feat) ops.clamp(score, min=0.0, max=1.0) out['score'] = score loss = ms.tensor(0.0, dtype=ms.float32) if target_in_search_img != None: target_in_search_gt = self.backbone.patch_embed(target_in_search_img) z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5), z_1_feat.shape[2]).permute(0, 3, 1, 2) target_in_search_gt = self.cross_2_decoder.unpatchify(target_in_search_gt) update_img, loss_temp = self.cross_2_decoder(z_1_feat, target_in_search_gt) update_feat = self.cross_2_decoder.patchify(update_img) out['dz_feat'] = update_feat loss += loss_temp out['renew_loss'] = loss else: z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5), z_1_feat.shape[2]).permute(0, 3, 1, 2) update_feat = self.cross_2_decoder(z_1_feat, eval=True) update_feat = self.cross_2_decoder.patchify(update_feat) out['dz_feat'] = update_feat return out def forward_head(self, cat_feature, pos_z, pos_x, identity, seq_input=None, gt_score_map=None, head_type=None, stage=None, search_feature=None): """ cat_feature: output embeddings of the backbone, it can be (HW1+HW2, B, C) or (HW2, B, C) """ if self.head_type == "CORNER": # run the corner head pred_box, score_map = self.box_head(opt_feat, True) outputs_coord = box_xyxy_to_cxcywh(pred_box) outputs_coord_new = outputs_coord.view(bs, Nq, 4) out = {'pred_boxes': outputs_coord_new, 'score_map': score_map, } return out elif self.head_type == "CENTER": # run the center head score_map_ctr, bbox, size_map, offset_map = self.box_head(opt_feat, gt_score_map) # outputs_coord = box_xyxy_to_cxcywh(bbox) outputs_coord = bbox outputs_coord_new = outputs_coord.view(bs, Nq, 4) out = {'pred_boxes': outputs_coord_new, 'score_map': score_map_ctr, 'size_map': size_map, 'offset_map': offset_map} return out elif self.head_type == "PIX": output_dict = self.box_head(cat_feature, pos_z, pos_x, identity, seq_input, head_type, stage, search_feature) return output_dict else: raise NotImplementedError class MlpScoreDecoder(nn.Cell): def __init__(self, in_dim, hidden_dim, num_layers, bn=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) out_dim = 1 # score if bn: self.layers = nn.SequentialCell(*[nn.SequentialCell(nn.Dense(n, k), nn.BatchNorm1d(k), nn.ReLU()) if i < num_layers - 1 else nn.SequentialCell(nn.Dense(n, k), nn.BatchNorm1d(k)) for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))]) else: self.layers = nn.SequentialCell(*[nn.SequentialCell(nn.Dense(n, k), nn.ReLU()) if i < num_layers - 1 else nn.Dense(n, k) for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))]) def construct(self, reg_tokens): """ reg tokens shape: (b, 4, embed_dim) """ x = self.layers(reg_tokens) # (b, 4, 1) x = ops.mean(x,axis=1) # (b, 1) return x def build_score_decoder(cfg): return MlpScoreDecoder( in_dim=cfg.MODEL.BACKBONE.EMBEDDIM, hidden_dim=cfg.MODEL.BACKBONE.EMBEDDIM, num_layers=2, bn=False ) def build_ostrack(cfg, training=True): current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root pretrained_path = "/home/baiyifan/code/vitrack/" if cfg.MODEL.PRETRAIN_FILE and ('OSTrack' not in cfg.MODEL.PRETRAIN_FILE) and training: pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE) else: pretrained = '' if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224': backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224': print("i use vit_large") backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224_ce': backbone = vit_base_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, ce_loc=cfg.MODEL.BACKBONE.CE_LOC, ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO, ) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224_ce': backbone = vit_large_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, ce_loc=cfg.MODEL.BACKBONE.CE_LOC, ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO, ) hidden_dim = backbone.embed_dim patch_start_index = 1 else: raise NotImplementedError backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index) #decoder = build_maskdecoder(cfg) cross_2_decoder = build_maskdecoder(cfg) drop_path = cfg.MODEL.DROP_PATH drop_path_allocator = DropPathAllocator(drop_path) num_heads = cfg.MODEL.NUM_HEADS mlp_ratio = cfg.MODEL.MLP_RATIO qkv_bias = cfg.MODEL.QKV_BIAS drop_rate = cfg.MODEL.DROP_RATE attn_drop = cfg.MODEL.ATTN_DROP score_mlp = build_score_decoder(cfg) cover_mlp = build_score_decoder(cfg) model = OSTrack( backbone, #decoder, cross_2_decoder, score_mlp, #cover_mlp, ) from mindspore.amp import auto_mixed_precision model = auto_mixed_precision(model, 'O0') load_from = cfg.MODEL.PRETRAIN_FILE param_dict = ms.load_checkpoint(load_from) param_not_load, _ = ms.load_param_into_net(model, param_dict) print("未加载权重:",param_not_load) print('Load pretrained model from: ' + load_from) model.backbone.pos_embed_z0 = model.backbone.pos_embed_z1 return model ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/ostrack_test.py ================================================ import sys sys.path.append("/home/djh/python-code/Artrackv2/2stage") from lib.models.ostrack.vit import * from lib.test.evaluation.tracker import Tracker from lib.models.ostrack import * tracker = Tracker('ostrack', '2stage_256_got', 'got10k_test', None) param = tracker.get_parameters() cfg = param.cfg model = build_ostrack(cfg,training=False) ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/utils.py ================================================ import math import mindspore as ms from mindspore import ops def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False): # [B, HW, C] len_t = template_tokens.shape[1] len_s = search_tokens.shape[1] if mode == 'direct': merged_feature = ops.cat((template_tokens, search_tokens), axis=1) elif mode == 'template_central': central_pivot = len_s // 2 first_half = search_tokens[:, :central_pivot, :] second_half = search_tokens[:, central_pivot:, :] merged_feature = ops.cat((first_half, template_tokens, second_half), axis=1) elif mode == 'partition': feat_size_s = int(math.sqrt(len_s)) feat_size_t = int(math.sqrt(len_t)) window_size = math.ceil(feat_size_t / 2.) # pad feature maps to multiples of window size B, _, C = template_tokens.shape H = W = feat_size_t template_tokens = template_tokens.view(B, H, W, C) pad_l = pad_b = pad_r = 0 # pad_r = (window_size - W % window_size) % window_size pad_t = (window_size - H % window_size) % window_size template_tokens = ops.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = template_tokens.shape template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C) template_tokens = ops.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], axis=2) _, Hc, Wc, _ = template_tokens.shape template_tokens = template_tokens.view(B, -1, C) merged_feature = ops.cat([template_tokens, search_tokens], axis=1) # calculate new h and w, which may be useful for SwinT or others merged_h, merged_w = feat_size_s + Hc, feat_size_s if return_res: return merged_feature, merged_h, merged_w else: raise NotImplementedError return merged_feature def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'): if mode == 'direct': recovered_tokens = merged_tokens elif mode == 'template_central': central_pivot = len_search_token // 2 len_remain = len_search_token - central_pivot len_half_and_t = central_pivot + len_template_token first_half = merged_tokens[:, :central_pivot, :] second_half = merged_tokens[:, -len_remain:, :] template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :] recovered_tokens = ops.cat((template_tokens, first_half, second_half), axis=1) elif mode == 'partition': recovered_tokens = merged_tokens else: raise NotImplementedError return recovered_tokens def window_partition(x, window_size: int): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size: int, H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/vit.py ================================================ """ Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 The official jax code is released and available at https://github.com/google-research/vision_transformer DeiT model defs and weights from https://github.com/facebookresearch/deit, paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2021 Ross Wightman Modified by Botao Ye """ import sys sys.path.append("/home/baiyifan/code/AR2_mindspore_cp/2stage") import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy from lib.models.timm import * from lib.models.layers.patch_embed import PatchEmbed from lib.models.ostrack.base_backbone import BaseBackbone import mindspore as ms from mindspore import nn as msnn import mindspore.ops as ops import mindspore.numpy as np from mindspore.common.initializer import initializer import numpy class Attention(msnn.Cell): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = msnn.Dense(dim, dim * 3, has_bias=qkv_bias) self.attn_drop = msnn.Dropout(p=attn_drop) self.proj = msnn.Dense(dim, dim) self.proj_drop = msnn.Dropout(p=proj_drop) def construct(self, x, return_attention=False,padding_mask=None): B, N, C = x.shape # print("x:",x) # weight = ms.tensor(self.qkv.weight.value(),dtype=ms.float32).numpy() # bias = ms.tensor(self.qkv.bias.value(),dtype=ms.float32).numpy() # m = ops.ones_like(x,dtype=ms.float32) # test = self.qkv(m).numpy() # with numpy.printoptions (precision=12): # print("weight:",weight) # print("bias:",bias) # print("test:",test) # numpy.savez("/home/baiyifan/code/Artrackv2_mindspore/2stage/outputv1.npz",weight,bias,test) qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (camsnnot use tensor as tuple) attn = (q @ k.swapaxes(-2, -1)) * self.scale if padding_mask != None: attn = attn.masked_fill(padding_mask, float("-inf")) attn = ops.softmax(attn,axis=-1) attn = self.attn_drop(attn) x = (attn @ v).swapaxes(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn return x class Block(msnn.Cell): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=msnn.GELU, norm_layer=msnn.LayerNorm): super().__init__() norm_layer_dim=[] norm_layer_dim.append(dim) norm_layer_dim = tuple(norm_layer_dim) self.norm1 = norm_layer(norm_layer_dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else msnn.Identity() self.norm2 = norm_layer(norm_layer_dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def construct(self, x, return_attention=False,padding_mask=None): if return_attention: feat, attn = self.attn(self.norm1(x), True,padding_mask) x = x + self.drop_path(feat) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn else: x = x + self.drop_path(self.attn(self.norm1(x),padding_mask=padding_mask)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(BaseBackbone): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init=''): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input chamsnnels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (msnn.Cell): patch embedding layer norm_layer: (msnn.Cell): normalization layer weight_init: (str): weight init scheme """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(msnn.LayerNorm, epsilon=1e-6) act_layer = act_layer or msnn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = ms.Parameter(ops.zeros((1, 1, embed_dim))) self.dist_token = ms.Parameter(ops.zeros((1, 1, embed_dim))) if distilled else None self.pos_embed = ms.Parameter(ops.zeros((1, num_patches + self.num_tokens, embed_dim))) self.pos_drop = msnn.Dropout(p=drop_rate) dpr = [x.item() for x in ops.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = msnn.SequentialCell(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.extension = msnn.SequentialCell(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(3)]) norm_layer_dim=[] norm_layer_dim.append(embed_dim) self.norm = norm_layer(norm_layer_dim) # # Representation layer # if representation_size and not distilled: # self.num_features = representation_size # self.pre_logits = msnn.SequentialCell(OrderedDict([ # ('fc', msnn.Linear(embed_dim, representation_size)), # ('act', msnn.Tanh()) # ])) # else: # self.pre_logits = msnn.Identity() # # # Classifier head(s) # self.head = msnn.Linear(self.num_features, num_classes) if num_classes > 0 else msnn.Identity() # self.head_dist = None # if distilled: # self.head_dist = msnn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else msnn.Identity() self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.dist_token is not None: trunc_normal_(self.dist_token, std=.02) if mode.startswith('jax'): # leave cls token as zeros to match jax impl named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) else: trunc_normal_(self.cls_token, std=.02) self.apply(_init_vit_weights) def _init_weights(self, m): # this fn left here for compat with downstream users _init_vit_weights(m) # @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) # @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} def get_classifier(self): if self.dist_token is None: return self.head else: return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = msnn.Dense(self.embed_dim, num_classes) if num_classes > 0 else msnn.Identity() if self.num_tokens == 2: self.head_dist = msnn.Dense(self.embed_dim, self.num_classes) if num_classes > 0 else msnn.Identity() def _init_vit_weights(Cell: msnn.Cell, name: str = '', head_bias: float = 0., jax_impl: bool = False): """ ViT weight initialization * When called without n, head_bias, jax_impl args it will behave exactly the same as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). * When called w/ valid n (Cell name) and jax_impl=True, will (hopefully) match JAX impl """ if isinstance(Cell, msnn.Dense): if name.startswith('head'): bias_shape = Cell.bias.shape weight_shape = Cell.weight.shape Cell.weight=initializer('zeros',weight_shape) Cell.bias=initializer(Constant(head_bias),bias_shape) elif name.startswith('pre_logits'): bias_shape = Cell.bias.shape weight_shape = Cell.weight.shape lecun_normal_(Cell.weight) Cell.bias=initializer('zeros',bias_shape) else: weight_shape = Cell.weight.shape if jax_impl: msnn.init.xavier_uniform_(Cell.weight) if Cell.bias is not None: bias_shape = Cell.bias.shape if 'mlp' in name: Cell.bias=initializer('normal',bias_shape,sigma=1e-6) else: Cell.bias=initializer('zeros',bias_shape) else: trunc_normal_(Cell.weight, std=.02) if Cell.bias is not None: bias_shape = Cell.bias.shape Cell.bias=initializer('zeros',bias_shape) elif jax_impl and isinstance(Cell, msnn.Conv2d): # NOTE conv was left to pytorch default in my original init weight_shape = Cell.weight.shape lecun_normal_(Cell.weight) if Cell.bias is not None: bias_shape = Cell.bias.shape Cell.bias=initializer('zeros',bias_shape) def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.swapaxes([3, 2, 0, 1]) elif w.ndim == 3: w = w.swapaxes([2, 0, 1]) elif w.ndim == 2: w = w.swapaxes([1, 0]) return ops.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, msnn.Dense) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) if isinstance(getattr(model.pre_logits, 'fc', None), msnn.Dense) and f'{prefix}pre_logits/bias' in w: model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(ops.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(ops.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): # Rescale the grid of position embeddings when loading from state_dict. Adapted from # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if num_tokens: posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] ntok_new -= num_tokens else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = ops.interpolate(posemb_grid, size=gs_new, mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = ops.cat([posemb_tok, posemb_grid], axis=1) return posemb def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = VisionTransformer(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = ms.load_checkpoint(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224(pretrained=False, **kwargs): model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model ================================================ FILE: artrackv2_mindspore/lib/models/ostrack/vit_ce.py ================================================ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import to_2tuple from lib.models.layers.patch_embed import PatchEmbed from .utils import combine_tokens, recover_tokens from .vit import VisionTransformer from ..layers.attn_blocks import CEBlock _logger = logging.getLogger(__name__) class VisionTransformerCE(VisionTransformer): """ Vision Transformer with candidate elimination (CE) module A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init='', ce_loc=None, ce_keep_ratio=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ # super().__init__() super().__init__() if isinstance(img_size, tuple): self.img_size = img_size else: self.img_size = to_2tuple(img_size) self.patch_size = patch_size self.in_chans = in_chans self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule blocks = [] ce_index = 0 self.ce_loc = ce_loc for i in range(depth): ce_keep_ratio_i = 1.0 if ce_loc is not None and i in ce_loc: ce_keep_ratio_i = ce_keep_ratio[ce_index] ce_index += 1 blocks.append( CEBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, keep_ratio_search=ce_keep_ratio_i) ) self.blocks = nn.Sequential(*blocks) self.norm = norm_layer(embed_dim) self.init_weights(weight_init) def forward_features(self, z, x, mask_z=None, mask_x=None, ce_template_mask=None, ce_keep_rate=None, return_last_attn=False ): B, H, W = x.shape[0], x.shape[2], x.shape[3] x = self.patch_embed(x) z = self.patch_embed(z) # attention mask handling # B, H, W if mask_z is not None and mask_x is not None: mask_z = F.interpolate(mask_z[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0] mask_z = mask_z.flatten(1).unsqueeze(-1) mask_x = F.interpolate(mask_x[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0] mask_x = mask_x.flatten(1).unsqueeze(-1) mask_x = combine_tokens(mask_z, mask_x, mode=self.cat_mode) mask_x = mask_x.squeeze(-1) if self.add_cls_token: cls_tokens = self.cls_token.expand(B, -1, -1) cls_tokens = cls_tokens + self.cls_pos_embed z += self.pos_embed_z x += self.pos_embed_x if self.add_sep_seg: x += self.search_segment_pos_embed z += self.template_segment_pos_embed x = combine_tokens(z, x, mode=self.cat_mode) if self.add_cls_token: x = torch.cat([cls_tokens, x], dim=1) x = self.pos_drop(x) lens_z = self.pos_embed_z.shape[1] lens_x = self.pos_embed_x.shape[1] global_index_t = torch.linspace(0, lens_z - 1, lens_z).to(x.device) global_index_t = global_index_t.repeat(B, 1) global_index_s = torch.linspace(0, lens_x - 1, lens_x).to(x.device) global_index_s = global_index_s.repeat(B, 1) removed_indexes_s = [] for i, blk in enumerate(self.blocks): x, global_index_t, global_index_s, removed_index_s, attn = \ blk(x, global_index_t, global_index_s, mask_x, ce_template_mask, ce_keep_rate) if self.ce_loc is not None and i in self.ce_loc: removed_indexes_s.append(removed_index_s) x = self.norm(x) lens_x_new = global_index_s.shape[1] lens_z_new = global_index_t.shape[1] z = x[:, :lens_z_new] x = x[:, lens_z_new:] if removed_indexes_s and removed_indexes_s[0] is not None: removed_indexes_cat = torch.cat(removed_indexes_s, dim=1) pruned_lens_x = lens_x - lens_x_new pad_x = torch.zeros([B, pruned_lens_x, x.shape[2]], device=x.device) x = torch.cat([x, pad_x], dim=1) index_all = torch.cat([global_index_s, removed_indexes_cat], dim=1) # recover original token order C = x.shape[-1] # x = x.gather(1, index_all.unsqueeze(-1).expand(B, -1, C).argsort(1)) x = torch.zeros_like(x).scatter_(dim=1, index=index_all.unsqueeze(-1).expand(B, -1, C).to(torch.int64), src=x) x = recover_tokens(x, lens_z_new, lens_x, mode=self.cat_mode) # re-concatenate with the template, which may be further used by other modules x = torch.cat([z, x], dim=1) aux_dict = { "attn": attn, "removed_indexes_s": removed_indexes_s, # used for visualization } return x, aux_dict def forward(self, z, x, ce_template_mask=None, ce_keep_rate=None, tnc_keep_rate=None, return_last_attn=False): x, aux_dict = self.forward_features(z, x, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate,) return x, aux_dict def _create_vision_transformer(pretrained=False, **kwargs): model = VisionTransformerCE(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224_ce(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer(pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224_ce(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer(pretrained=pretrained, **model_kwargs) return model ================================================ FILE: artrackv2_mindspore/lib/models/timm.py ================================================ import mindspore as ms import mindspore.nn as nn import mindspore.ops as ops import math import numpy as np def to_2tuple(input): if isinstance(input,tuple): if len(input)==2: return input else: raise ValueError("The tuple's length is not 2!") elif isinstance(input,list): if len(input)==2: return tuple(x for x in input) else: raise ValueError("The List's length is not 2!") else: return (input,input) class Mlp(nn.Cell): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features drop_probs = to_2tuple(drop) self.fc1 = nn.Dense(in_features, hidden_features,has_bias=True) if act_layer==nn.GELU: self.act = act_layer(approximate=False) self.drop1 = nn.Dropout(p=drop_probs[0]) self.fc2 = nn.Dense(hidden_features, out_features,has_bias=True) self.drop2 = nn.Dropout(p=drop_probs[1]) def construct(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = x.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return x * random_tensor class DropPath(nn.Cell): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): super(DropPath, self).__init__() self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep def construct(self, x): return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) def extra_repr(self): return f'drop_prob={round(self.drop_prob,3):0.3f}' def _trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2) # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. shape = tensor.shape minval = ms.Tensor(2*l-1) maxval = ms.Tensor(2*u-1) x = ops.uniform(shape,minval, maxval) tensor = x # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor = tensor.erfinv() # Transform to proper mean, std tenspr = tensor.mul(std * math.sqrt(2.)) tensor = tensor.add(mean) # Clamp to ensure it's in the proper range tensor = tensor.clamp(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are applied while sampling the normal with mean/std applied, therefore a, b args should be adjusted to match the range of mean, std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _trunc_normal_(tensor, mean, std, a, b) def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 and the result is subsquently scaled and shifted by the mean and std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean) return tensor def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == 'fan_in': denom = fan_in elif mode == 'fan_out': denom = fan_out elif mode == 'fan_avg': denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978) elif distribution == "normal": tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') def adapt_input_conv(in_chans, conv_weight): conv_type = conv_weight.dtype conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU O, I, J, K = conv_weight.shape if in_chans == 1: if I > 3: assert conv_weight.shape[1] % 3 == 0 # For models with space2depth stems conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) conv_weight = conv_weight.sum(dim=2, keepdim=False) else: conv_weight = conv_weight.sum(dim=1, keepdim=True) elif in_chans != 3: if I != 3: raise NotImplementedError('Weight format not supported by conversion.') else: # NOTE this strategy should be better than random init, but there could be other combinations of # the original RGB input layer weights that'd work better for specific cases. repeat = int(math.ceil(in_chans / 3)) conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] conv_weight *= (3 / float(in_chans)) conv_weight = conv_weight.to(conv_type) return conv_weight ================================================ FILE: artrackv2_mindspore/lib/train/__init__.py ================================================ # from .admin.multigpu import MultiGPU ================================================ FILE: artrackv2_mindspore/lib/train/_init_paths.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as osp import sys def add_path(path): if path not in sys.path: sys.path.insert(0, path) this_dir = osp.dirname(__file__) prj_path = osp.join(this_dir, '../..') add_path(prj_path) ================================================ FILE: artrackv2_mindspore/lib/train/actors/__init__.py ================================================ from .base_actor import BaseActor from .ostrack import OSTrackActor ================================================ FILE: artrackv2_mindspore/lib/train/actors/base_actor.py ================================================ from lib.utils import TensorDict class BaseActor: """ Base class for actor. The actor class handles the passing of the data through the network and calculation the loss""" def __init__(self, net, objective): """ args: net - The network to train objective - The loss function """ self.net = net self.objective = objective def __call__(self, data: TensorDict): """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and return the training stats for the input data args: data - A TensorDict containing all the necessary data blocks. returns: loss - loss for the input data stats - a dict containing detailed losses """ raise NotImplementedError def to(self, device): """ Move the network to device args: device - device to use. 'cpu' or 'cuda' """ self.net.to(device) def train(self, mode=True): """ Set whether the network is in train mode. args: mode (True) - Bool specifying whether in training mode. """ self.net.train(mode) def eval(self): """ Set network to eval mode""" self.train(False) ================================================ FILE: artrackv2_mindspore/lib/train/actors/ostrack.py ================================================ from . import BaseActor from lib.utils.misc import NestedTensor from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy import torch import math import numpy as np import numpy import cv2 import torch.nn.functional as F import torchvision.transforms.functional as tvisf import lib.train.data.bounding_box_utils as bbutils from lib.utils.merge import merge_template_search from torch.distributions.categorical import Categorical from ...utils.heapmap_utils import generate_heatmap from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate def IoU(rect1, rect2): """ caculate interection over union Args: rect1: (x1, y1, x2, y2) rect2: (x1, y1, x2, y2) Returns: iou """ # overlap x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3] tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3] xx1 = np.maximum(tx1, x1) yy1 = np.maximum(ty1, y1) xx2 = np.minimum(tx2, x2) yy2 = np.minimum(ty2, y2) ww = np.maximum(0, xx2 - xx1) hh = np.maximum(0, yy2 - yy1) area = (x2-x1) * (y2-y1) target_a = (tx2-tx1) * (ty2 - ty1) inter = ww * hh iou = inter / (area + target_a - inter) return iou def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) def generate_sa_simdr(joints): ''' :param joints: [num_joints, 3] :param joints_vis: [num_joints, 3] :return: target, target_weight(1: visible, 0: invisible) ''' num_joints = 48 image_size = [256, 256] simdr_split_ratio = 1.5625 sigma = 6 target_x1 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y1 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) target_x2 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y2 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32) tmp_size = sigma * 3 for joint_id in range(num_joints): mu_x1 = joints[joint_id][0] mu_y1 = joints[joint_id][1] mu_x2 = joints[joint_id][2] mu_y2 = joints[joint_id][3] x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) return target_x1, target_y1, target_x2, target_y2 # angle cost def SIoU_loss(test1, test2, theta=4): eps = 1e-7 cx_pred = (test1[:, 0] + test1[:, 2]) / 2 cy_pred = (test1[:, 1] + test1[:, 3]) / 2 cx_gt = (test2[:, 0] + test2[:, 2]) / 2 cy_gt = (test2[:, 1] + test2[:, 3]) / 2 dist = ((cx_pred - cx_gt)**2 + (cy_pred - cy_gt)**2) ** 0.5 ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred) x = ch / (dist + eps) angle = 1 - 2*torch.sin(torch.arcsin(x)-torch.pi/4)**2 # distance cost xmin = torch.min(test1[:, 0], test2[:, 0]) xmax = torch.max(test1[:, 2], test2[:, 2]) ymin = torch.min(test1[:, 1], test2[:, 1]) ymax = torch.max(test1[:, 3], test2[:, 3]) cw = xmax - xmin ch = ymax - ymin px = ((cx_gt - cx_pred) / (cw+eps))**2 py = ((cy_gt - cy_pred) / (ch+eps))**2 gama = 2 - angle dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py)) #shape cost w_pred = test1[:, 2] - test1[:, 0] h_pred = test1[:, 3] - test1[:, 1] w_gt = test2[:, 2] - test2[:, 0] h_gt = test2[:, 3] - test2[:, 1] ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps) wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps) omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta #IoU loss lt = torch.max(test1[..., :2], test2[..., :2]) # [B, rows, 2] rb = torch.min(test1[..., 2:], test2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] area1 = (test1[..., 2] - test1[..., 0]) * ( test1[..., 3] - test1[..., 1]) area2 = (test2[..., 2] - test2[..., 0]) * ( test2[..., 3] - test2[..., 1]) iou = overlap / (area1 + area2 - overlap) SIoU = 1 - iou + (omega + dis) / 2 return SIoU, iou def ciou(pred, target, eps=1e-7): # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right factor = 4 / math.pi**2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) # CIoU cious = ious - (rho2 / c2 + v**2 / (1 - ious + v)) return cious, ious class OSTrackActor(BaseActor): """ Actor for training OSTrack models """ def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None): super().__init__(net, objective) self.loss_weight = loss_weight self.settings = settings self.bs = self.settings.batchsize # batch size self.cfg = cfg self.bins = bins self.search_size = search_size self.logsoftmax = torch.nn.LogSoftmax(dim=1) self.focal = None self.range = 2 self.loss_weight['KL'] = 0 self.loss_weight['focal'] = 0 self.pre_num = 7 self.pre_bbox = None self.x_feat_rem = None def __call__(self, data): """ args: data - The input data, should contain the fields 'template', 'search', 'gt_bbox'. template_images: (N_t, batch, 3, H, W) search_images: (N_s, batch, 3, H, W) returns: loss - the training loss status - dict containing detailed losses """ # forward pass out_dict = self.forward_pass(data) # compute losses loss, status = self.compute_losses(out_dict, data) return loss, status def _bbox_clip(self, cx, cy, width, height, boundary): cx = max(0, min(cx, boundary[1])) cy = max(0, min(cy, boundary[0])) width = max(10, min(width, boundary[1])) height = max(10, min(height, boundary[0])) return cx, cy, width, height def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans): """ args: im: bgr based image pos: center position model_sz: exemplar size s_z: original size avg_chans: channel average """ if isinstance(pos, float): pos = [pos, pos] sz = original_sz im_sz = im.shape c = (original_sz + 1) / 2 # context_xmin = round(pos[0] - c) # py2 and py3 round context_xmin = np.floor(pos[0] - c + 0.5) context_xmax = context_xmin + sz - 1 # context_ymin = round(pos[1] - c) context_ymin = np.floor(pos[1] - c + 0.5) context_ymax = context_ymin + sz - 1 left_pad = int(max(0., -context_xmin)) top_pad = int(max(0., -context_ymin)) right_pad = int(max(0., context_xmax - im_sz[1] + 1)) bottom_pad = int(max(0., context_ymax - im_sz[0] + 1)) context_xmin = context_xmin + left_pad context_xmax = context_xmax + left_pad context_ymin = context_ymin + top_pad context_ymax = context_ymax + top_pad r, c, k = im.shape if any([top_pad, bottom_pad, left_pad, right_pad]): size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k) te_im = np.zeros(size, np.uint8) te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im if top_pad: te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans if bottom_pad: te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans if left_pad: te_im[:, 0:left_pad, :] = avg_chans if right_pad: te_im[:, c + left_pad:, :] = avg_chans im_patch = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] else: im_patch = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] if not np.array_equal(model_sz, original_sz): try: im_patch = cv2.resize(im_patch, (model_sz, model_sz)) except: return None im_patch = im_patch.transpose(2, 0, 1) im_patch = im_patch[np.newaxis, :, :, :] im_patch = im_patch.astype(np.float32) im_patch = torch.from_numpy(im_patch) im_patch = im_patch.cuda() return im_patch def batch_init(self, images, template_bbox, initial_bbox) -> dict: self.frame_num = 1 self.device = 'cuda' # Convert bbox (x1, y1, w, h) -> (cx, cy, w, h) template_bbox_1 = template_bbox[:, 0] template_bbox_2 = template_bbox[:, 1] template_bbox_1 = bbutils.batch_xywh2center2(template_bbox_1) # ndarray:(2*num_seq,4) template_bbox_2 = bbutils.batch_xywh2center2(template_bbox_2) # ndarray:(2*num_seq,4) initial_bbox = bbutils.batch_xywh2center2(initial_bbox) # ndarray:(2*num_seq,4) self.center_pos = initial_bbox[:, :2] # ndarray:(2*num_seq,2) self.size = initial_bbox[:, 2:] # ndarray:(2*num_seq,2) self.pre_bbox = initial_bbox for i in range(self.pre_num - 1): self.pre_bbox = numpy.concatenate((self.pre_bbox, initial_bbox), axis=1) #print(self.pre_bbox.shape) template_factor = self.cfg.DATA.TEMPLATE.FACTOR w_z_1 = template_bbox_1[:, 2] * template_factor # ndarray:(2*num_seq) h_z_1 = template_bbox_1[:, 3] * template_factor # ndarray:(2*num_seq) s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1)) # ndarray:(2*num_seq) w_z_2 = template_bbox_2[:, 2] * template_factor # ndarray:(2*num_seq) h_z_2 = template_bbox_2[:, 3] * template_factor # ndarray:(2*num_seq) s_z_2 = np.ceil(np.sqrt(w_z_2 * h_z_2)) # ndarray:(2*num_seq) self.channel_average = [] for img in images: self.channel_average.append(np.mean(img[0], axis=(0, 1))) self.channel_average.append(np.mean(img[1], axis=(0, 1))) self.channel_average = np.array(self.channel_average) # ndarray:(2*num_seq,3) #get crop z_crop_list = [] z_1_list = [] z_2_list = [] for i in range(len(images)): here_crop_1 = self.get_subwindow(images[i][0], template_bbox_1[i, :2], self.cfg.DATA.TEMPLATE.SIZE, s_z_1[i], self.channel_average[2*i]) here_crop_2 = self.get_subwindow(images[i][1], template_bbox_2[i, :2], self.cfg.DATA.TEMPLATE.SIZE, s_z_2[i], self.channel_average[2*i+1]) z_crop_1 = here_crop_1.float().mul(1.0 / 255.0).clamp(0.0, 1.0) z_crop_2 = here_crop_2.float().mul(1.0 / 255.0).clamp(0.0, 1.0) self.mean = [0.485, 0.456, 0.406] self.std = [0.229, 0.224, 0.225] self.inplace = False z_crop_1[0] = tvisf.normalize(z_crop_1[0], self.mean, self.std, self.inplace) z_crop_2[0] = tvisf.normalize(z_crop_2[0], self.mean, self.std, self.inplace) z_1_list.append(z_crop_1.unsqueeze(1).clone()) z_2_list.append(z_crop_2.unsqueeze(1).clone()) z_crop = torch.concat([z_crop_1.unsqueeze(1), z_crop_2.unsqueeze(1)], dim=1) z_crop_list.append(z_crop.clone()) z_crop = torch.cat(z_crop_list, dim=0) # Tensor(2*num_seq,3,128,128) z_1_crop = torch.cat(z_1_list, dim=0) z_2_crop = torch.cat(z_2_list, dim=0) z_2_crop = z_2_crop.squeeze(1).to(self.net.module.backbone.word_embeddings.weight) z_2_feat = self.net.module.backbone.patch_embed(z_2_crop) out = {'template_images': z_crop, "z_1":z_1_crop, "z_2":z_2_crop, "z_2_feat":z_2_feat} return out def batch_track(self, img, gt_boxes, template, dz_feat, action_mode='max') -> dict: search_factor = self.cfg.DATA.SEARCH.FACTOR w_x = self.size[:, 0] * search_factor h_x = self.size[:, 1] * search_factor s_x = np.ceil(np.sqrt(w_x * h_x)) gt_boxes_corner = bbutils.batch_xywh2corner(gt_boxes) # ndarray:(2*num_seq,4) initial_bbox = bbutils.batch_xywh2center2(gt_boxes) x_crop_list = [] gt_in_crop_list = [] pre_seq_list = [] pre_seq_in_list = [] x_feat_list = [] target_in_search_list = [] update_feat_list = [] for i in range(len(img)): template_factor = self.cfg.DATA.TEMPLATE.FACTOR w_z_1 = initial_bbox[:, 2] * template_factor # ndarray:(2*num_seq) h_z_1 = initial_bbox[:, 3] * template_factor # ndarray:(2*num_seq) s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1)) # ndarray:(2*num_seq) channel_avg = np.mean(img[i], axis=(0, 1)) target_in_search = self.get_subwindow(img[i], initial_bbox[i, :2], self.cfg.DATA.TEMPLATE.SIZE, round(s_z_1[i]), channel_avg) x_crop = self.get_subwindow(img[i], self.center_pos[i], self.cfg.DATA.SEARCH.SIZE, round(s_x[i]), channel_avg) if x_crop == None: return None if target_in_search == None: return None for q in range(self.pre_num): pre_seq_temp = bbutils.batch_center2corner(self.pre_bbox[:, 0+4*q:4+4*q]) if q == 0: pre_seq = pre_seq_temp else: pre_seq = numpy.concatenate((pre_seq, pre_seq_temp), axis=1) #pre_seq = bbutils.batch_center2corner(self.pre_bbox) #ndarray:(x1 y1 x2 y2) if gt_boxes_corner is not None and np.sum(np.abs(gt_boxes_corner[i] - np.zeros(4))) > 10: pre_in = np.zeros(4 * self.pre_num) for w in range(self.pre_num): #print(pre_seq[i, 0+w*4:2+w*4].shape) #print(self.center_pos[i].shape) #print(pre_in[0+w*4:2+w*4].shape) pre_in[0+w*4:2+w*4] = pre_seq[i, 0+w*4:2+w*4] - self.center_pos[i] pre_in[2+w*4:4+w*4] = pre_seq[i, 2+w*4:4+w*4] - self.center_pos[i] pre_in[0+w*4:4+w*4] = pre_in[0+w*4:4+w*4] * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 #print(pre_in) pre_in[0+w*4:4+w*4] = pre_in[0+w*4:4+w*4] / self.cfg.DATA.SEARCH.SIZE pre_seq_list.append(pre_in) gt_in_crop = np.zeros(4) gt_in_crop[:2] = gt_boxes_corner[i, :2] - self.center_pos[i] gt_in_crop[2:] = gt_boxes_corner[i, 2:] - self.center_pos[i] gt_in_crop = gt_in_crop * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 gt_in_crop[2:] = gt_in_crop[2:] - gt_in_crop[:2] # (x1,y1,x2,y2) to (x1,y1,w,h) gt_in_crop_list.append(gt_in_crop) else: pre_in = np.zeros(4 * self.pre_num) #pre_in[:2] = pre_seq[i, :2] - self.center_pos[i] #pre_in[2:] = pre_seq[i, 2:] - self.center_pos[i] #pre_in = pre_in * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 pre_seq_list.append(pre_in) gt_in_crop_list.append(np.zeros(4)) #print(gt_in_crop) pre_seq_input = torch.from_numpy(pre_in).clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5) pre_seq_input = (pre_seq_input + (0.5 * self.range - 0.5)) * (self.bins - 1) pre_seq_in_list.append(pre_seq_input.clone()) x_crop = x_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0) target_in_search = target_in_search.float().mul(1.0 / 255.0).clamp(0.0, 1.0) rem_x = x_crop x_crop[0] = tvisf.normalize(x_crop[0], self.mean, self.std, self.inplace) target_in_search[0] = tvisf.normalize(target_in_search[0], self.mean, self.std, self.inplace) x_crop_list.append(x_crop.clone()) target_in_search_list.append(target_in_search.clone()) x_crop = torch.cat(x_crop_list, dim=0) target_in_search = torch.cat(target_in_search_list, dim=0) pre_seq_output = torch.cat(pre_seq_in_list, dim=0).reshape(-1, 4*self.pre_num) pre = torch.zeros_like(pre_seq_output) #print("this is x_feat_rem") #print(self.x_feat_rem) #print("i do this") outputs = self.net(template, dz_feat.cuda(), x_crop, seq_input=pre_seq_output, head_type=None, stage="batch_track", search_feature=self.x_feat_rem, target_in_search_img=target_in_search, gt_bboxes=gt_boxes[-1]) selected_indices = outputs['seqs'].detach() x_feat = outputs['x_feat'].detach().cpu() self.x_feat_rem = x_feat.clone() x_feat_list.append(x_feat.clone()) update_feat = outputs['dz_feat'].detach().cpu() update_feat_list.append(update_feat.clone()) pred_bbox = selected_indices[:, 0:4].data.cpu().numpy() bbox = (pred_bbox / (self.bins-1) - (self.range * 0.5 - 0.5)) * s_x.reshape(-1, 1) cx = bbox[:, 0] + self.center_pos[:, 0] - s_x/2 cy = bbox[:, 1] + self.center_pos[:, 1] - s_x/2 width = bbox[:, 2] - bbox[:, 0] height = bbox[:, 3] - bbox[:, 1] cx = cx + width/2 cy = cy + height/2 for i in range(len(img)): cx[i], cy[i], width[i], height[i] = self._bbox_clip(cx[i], cy[i], width[i], height[i], img[i].shape[:2]) self.center_pos = np.stack([cx, cy], 1) self.size = np.stack([width, height], 1) #self.pre_bbox = np.stack([cx, cy, width, height], 1) for e in range(self.pre_num): if e != self.pre_num-1 : self.pre_bbox[:, 0+e*4:4+e*4] = self.pre_bbox[:, 4+e*4:8+e*4] else: self.pre_bbox[:, 0+e*4:4+e*4] = numpy.stack([cx, cy, width, height], 1) #print(self.pre_bbox) #print(self.pre_bbox) #print(gt_boxes) #print(gt_in_crop) bbox = np.stack([cx - width / 2, cy - height / 2, width, height], 1) #print(pre_seq_output) #print(bbox) #print(gt_boxes) out = { 'dz_feat': update_feat, 'search_images': x_crop, 'target_in_search': target_in_search, 'pred_bboxes': bbox, 'selected_indices': selected_indices.cpu(), 'gt_in_crop': torch.tensor(np.stack(gt_in_crop_list, axis=0), dtype=torch.float), 'pre_seq': torch.tensor(np.stack(pre_seq_list, axis=0), dtype=torch.float), 'x_feat': torch.tensor([item.cpu().detach().numpy() for item in x_feat_list], dtype=torch.float), } #print("i want to see this") #print(out['x_feat'].shape) #print(out['pre_seq'].shape) # import matplotlib.pyplot as plt # print(gt_in_crop) # rem_x_plt = x_crop[0].permute(1, 2, 0).cpu() # plt.imshow(rem_x_plt) # ax = plt.gca() # ax.add_patch(plt.Rectangle(gt_in_crop[0:2], gt_in_crop[2], gt_in_crop[3], color="blue", fill=False, linewidth=1)) # plt.show() # input() return out def explore(self, data): results = {} search_images_list = [] search_anno_list = [] action_tensor_list = [] iou_list = [] # cover_list = [] pre_seq_list = [] x_feat_list = [] target_in_search_list = [] template_all_list = [] dz_feat_udpate_list = [] num_frames = data['num_frames'] images = data['search_images'] gt_bbox = data['search_annos'] template = data['template_images'] template_bbox = data['template_annos'] visible_ratio = data['visible_ratio'] template = template template_bbox = template_bbox template_bbox = np.array(template_bbox) num_seq = len(num_frames) for idx in range(np.max(num_frames)): here_images = [img[idx] for img in images] #S, N here_gt_bbox = np.array([gt[idx] for gt in gt_bbox]) here_images = here_images here_gt_bbox = np.concatenate([here_gt_bbox], 0) if idx == 0: outputs_template = self.batch_init(template, template_bbox, here_gt_bbox) results['template_images'] = outputs_template['z_1'] self.template_temp = outputs_template['z_1'].clone() z_all = [outputs_template['z_1'], outputs_template['z_2']] results['z_all'] = z_all self.dz_feat_update = outputs_template['z_2_feat'] else: outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update, action_mode='half') if outputs == None: return None template_all_list.append(self.template_temp.clone()) dz_feat_udpate_list.append(self.dz_feat_update.clone().to(outputs['dz_feat'])) x_feat = outputs['x_feat'] self.dz_feat_update = outputs['dz_feat'] #print(x_feat.shape) pred_bbox = outputs['pred_bboxes'] search_images_list.append(outputs['search_images']) target_in_search_list.append(outputs['target_in_search']) search_anno_list.append(outputs['gt_in_crop']) #action_tensor_list.append(outputs['selected_indices']) if len(outputs['pre_seq']) != 8: print(outputs['pre_seq']) print(len(outputs['pre_seq'])) print(idx) print(data['num_frames']) print(data['search_annos']) return None pre_seq_list.append(outputs['pre_seq']) pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox) gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox) here_iou = [] for i in range(num_seq): bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i]) here_iou.append(bbox_iou) iou_list.append(here_iou) x_feat_list.append(x_feat.clone()) search_images_reverse_list = [] search_anno_reverse_list = [] action_tensor_reverse_list = [] iou_reverse_list = [] pre_seq_reverse_list = [] x_feat_reverse_list = [] target_in_search_reverse_list = [] dz_feat_update_reverse_list = [] template_all_reverse_list = [] for idx in range(np.max(num_frames)): real_idx = np.max(num_frames) - 1 - idx here_images = [img[real_idx] for img in images] # S, N here_gt_bbox = np.array([gt[real_idx] for gt in gt_bbox]) here_images = here_images here_gt_bbox = np.concatenate([here_gt_bbox], 0) if idx == 0: outputs_template = self.batch_init(template, template_bbox, here_gt_bbox) results['template_images'] = outputs_template['z_1'] self.template_temp = outputs_template['z_1'].clone() z_all = [outputs_template['z_1'], outputs_template['z_2']] results['z_all'] = z_all self.dz_feat_update = outputs_template['z_2_feat'].clone() else: outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update, action_mode='half') if outputs == None: return None template_all_reverse_list.append(self.template_temp.clone()) dz_feat_update_reverse_list.append(self.dz_feat_update.clone().to(outputs['dz_feat'])) x_feat = outputs['x_feat'] self.dz_feat_update = outputs['dz_feat'] # print(x_feat.shape) pred_bbox = outputs['pred_bboxes'] search_images_reverse_list.append(outputs['search_images']) target_in_search_reverse_list.append(outputs['target_in_search']) search_anno_reverse_list.append(outputs['gt_in_crop']) # action_tensor_list.append(outputs['selected_indices']) if len(outputs['pre_seq']) != 8: print(outputs['pre_seq']) print(len(outputs['pre_seq'])) print(idx) print(data['num_frames']) print(data['search_annos']) return None pre_seq_reverse_list.append(outputs['pre_seq']) pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox) gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox) here_iou = [] for i in range(num_seq): bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i]) here_iou.append(bbox_iou) iou_reverse_list.append(here_iou) x_feat_reverse_list.append(x_feat.clone()) results['x_feat'] = torch.cat([torch.stack(x_feat_list), torch.stack(x_feat_reverse_list)], dim=2) results['search_images'] = torch.cat([torch.stack(search_images_list), torch.stack(search_images_reverse_list)], dim=1) results['template_images_z0'] = torch.cat([torch.stack(template_all_list), torch.stack(template_all_reverse_list)], dim=1) results['dz_feat_update'] = torch.cat([torch.stack(dz_feat_udpate_list), torch.stack(dz_feat_update_reverse_list)], dim=1) results['search_anno'] = torch.cat([torch.stack(search_anno_list), torch.stack(search_anno_reverse_list)], dim=1) results['pre_seq'] = torch.cat([torch.stack(pre_seq_list), torch.stack(pre_seq_reverse_list)], dim=1) results['target_in_search'] = torch.cat([torch.stack(target_in_search_list), torch.stack(target_in_search_reverse_list)], dim=1) iou_tensor = torch.tensor(iou_list, dtype=torch.float) iou_tensor_reverse = torch.tensor(iou_reverse_list, dtype=torch.float) results['baseline_iou'] = torch.cat([iou_tensor[:, :num_seq], iou_tensor_reverse[:, :num_seq]], dim=1) #results['explore_iou'] = iou_tensor[:, num_seq:] #results['action_tensor'] = torch.stack(action_tensor_list) return results def forward_pass(self, data): # currently only support 1 template and 1 search region assert len(data['template_images']) == 1 assert len(data['search_images']) == 1 #print(data['dataset']) template_list = [] for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) # template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128) template_list.append(template_img_i) search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) # search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320) box_mask_z = None ce_keep_rate = None if self.cfg.MODEL.BACKBONE.CE_LOC: box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device, data['template_anno'][0]) ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch, total_epochs=ce_start_epoch + ce_warm_epoch, ITERS_PER_EPOCH=1, base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0]) if len(template_list) == 1: template_list = template_list[0] gt_bbox = data['search_anno'][-1] begin = self.bins end = self.bins + 1 gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2] gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3] gt_bbox = gt_bbox.clamp(min=0.0, max=1.0) data['real_bbox'] = gt_bbox seq_ori = gt_bbox * (self.bins - 1) seq_ori = seq_ori.int().to(search_img) B = seq_ori.shape[0] seq_ori_4_4 = seq_ori[:, 0:3] #seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori_4_4], dim=1) # seq_input = torch.cat([torch.ones(1).to(target_bounding_box_label_matrix) * begin, seq_shuffle]) #seq_output = torch.cat([seq_ori], dim=1) seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1) # seq_input = torch.cat([torch.ones(1).to(target_bounding_box_label_matrix) * begin, seq_shuffle]) seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1) data['seq_input'] = seq_input data['seq_output'] = seq_output out_dict = self.net(template=template_list, search=search_img, ce_template_mask=box_mask_z, ce_keep_rate=ce_keep_rate, return_last_attn=False, seq_input=seq_input) return out_dict def compute_sequence_losses(self, data): num_frames = data['search_images'].shape[0] #template_images = data['template_images'].repeat(num_frames,1,1,1,1) template_images_for = data['template_images_z0'].reshape(-1, *data['template_images_z0'].size()[2:]) dz_feat = data['dz_feat_update'].reshape(-1, *data['dz_feat_update'].size()[2:]) target_in_search = data['target_in_search'].reshape(-1, *data['target_in_search'].size()[2:]) #template_images = template_images.view(-1, *template_images.size()[2:]) search_images = data['search_images'].reshape(-1, *data['search_images'].size()[2:]) search_anno = data['search_anno'].reshape(-1, *data['search_anno'].size()[2:]) # cover_truth = data['cover'].reshape(-1, *data['cover'].size()[2:]) #print(data['pre_seq'].shape) #print(data['x_feat'].shape) pre_seq = data['pre_seq'].reshape(-1, 4*self.pre_num) x_feat = data['x_feat'].reshape(-1, *data['x_feat'].size()[2:]) # print("begin") # print(template_images.shape) # print(template_images_for.shape) # print(target_in_search.shape) # print("end") epoch = data['epoch'] if epoch < 11: self.loss_weight['focal'] = 2 plus = 1 rem_p = 1 else: self.loss_weight['focal'] = 0 plus = 1 rem_p = 0 #print("this is looking for") #print(x_feat.shape) #print(pre_seq.shape) pre_seq = pre_seq.clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5) pre_seq = (pre_seq+(self.range * 0.5 - 0.5)) * (self.bins - 1) #print(pre_seq) outputs = self.net(template_images_for, dz_feat, search_images, seq_input=pre_seq, stage="forward_pass", search_feature=x_feat, target_in_search_img=target_in_search) score = outputs['score'] # cover = outputs['cover'] renew_loss = outputs['renew_loss'] pred_feat = outputs["feat"] # generate labels if self.focal == None: weight = torch.ones(self.bins*self.range + 6) * 1 weight[self.bins * self.range + 4] = 0.1 weight[self.bins * self.range + 3] = 0.1 weight[self.bins * self.range + 2] = 0.1 weight[self.bins*self.range+1] = 0.1 weight[self.bins*self.range] = 0.1 weight.to(pred_feat) self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat) # target[:, 2] = target[:, 2] + target[:, 0] # target[:, 3] = target[:, 3] + target[:, 1] # real_target = torch.zeros_like(target) # real_target[: ,0] = target[: ,0] # real_target[:, 1] = target[:, 1] # real_target[:, 2] = target[:, 2] + target[:, 0] # real_target[:, 3] = target[:, 3] + target[:, 1] # target = target.reshape(-1).to(torch.int64) # real_target = real_target.reshape(-1).to(torch.int64) # pred = pred_feat.permute(1, 0, 2)[:, 0:4, :].reshape(-1, self.bins + 2) #print(search_anno) search_anno[:, 2] = search_anno[:, 2] + search_anno[: ,0] search_anno[:, 3] = search_anno[:, 3] + search_anno[: ,1] target = (search_anno / self.cfg.DATA.SEARCH.SIZE + (self.range * 0.5 - 0.5)) * (self.bins - 1) #target[:, 2] = target[:, 2] + target[:, 0] #target[:, 3] = target[:, 3] + target[:, 1] target = target.clamp(min=0.0, max=(self.bins * self.range-0.0001)) #print(target) target_iou = target end_flag = torch.ones((target.shape[0], 1)) * (self.bins * self.range + 1) end_flag = end_flag.to(target) target = torch.cat([target], dim=1) target = target.reshape(-1).to(torch.int64) pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins*self.range+6) #print(target) varifocal_loss = self.focal(pred, target) pred = pred_feat[0:4, :, 0:self.bins*self.range] target = target_iou[:, 0:4].to(pred_feat) / (self.bins - 1) - (self.range * 0.5 - 0.5) #print(target) #print(target) out = pred.softmax(-1).to(pred) mul = torch.range((-1*self.range * 0.5 + 0.5)+1/800, (self.range * 0.5 + 0.5)-1/800, 2/800).to(pred) #print(mul) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) extra_seq = ans #print(extra_seq) extra_seq = extra_seq.to(pred) # print(extra_seq) cious, iou = SIoU_loss(extra_seq, target, 4) cious = cious.mean() score_real = score score_loss = self.objective['l1'](score_real, iou) #cover_loss = self.objective['l1'](cover, cover_truth) giou_loss = cious l1_loss = self.objective['l1'](extra_seq, target) #self.loss_weight['giou'] = 0 loss_bb = (self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight[ 'focal'] * varifocal_loss) * plus #pred = pred_feat[0:4, :, :].permute(1, 0, 2) #log_softmax_score = F.log_softmax(pred * self.cfg.DATA.TEMP, dim=-1) #log_softmax_score = log_softmax_score.reshape(-1, self.bins+2) #log = torch.argmax(log_softmax_score, dim=-1) #action_tensor = data['action_tensor'][:, :, 0:4].reshape(-1) #bs = log_softmax_score.shape[0] #selected_logprobs = log_softmax_score[range(bs), action_tensor.view(-1)] #print("this is true!") #print(selected_logprobs) #selected_logprobs = selected_logprobs.reshape(-1, 4) #print(selected_logprobs) #selected_logprobs = torch.mean(selected_logprobs, dim=1) #print(selected_logprobs) #selected_logprobs = selected_logprobs.reshape(-1) #cls_loss = - selected_logprobs * data['reward_tensor'].view(-1) #loss_sl = cls_loss.mean() #print(self.net.box_head.encoder.layers[0].z_self_attn.qkv.weight) #total_losses = loss_bb #total_losses = loss_bb + renew_loss * 0.3 + score_loss * 1 total_losses = loss_bb + renew_loss * 0.3 * rem_p + score_loss * 1 * rem_p# + cover_loss * 1 * rem_p #total_losses = loss_bb * 1 + renew_loss * 0.3 + score_loss * 0 mean_iou = iou.detach().mean() status = {"Loss/total": total_losses.item()/2, # "Loss/cover": cover_loss.item()/2, #"Loss/sl": loss_sl.item(), "Loss/score": score_loss.item()/2, "Loss/giou": giou_loss.item()/2, "Loss/l1": l1_loss.item()/2, "Loss/location": varifocal_loss.item()/2, "Loss/renew": renew_loss.item()/2, "IoU": mean_iou.item()/2} return total_losses, status def compute_losses(self, pred_dict, gt_dict, return_status=True): # gt gaussian map bins = self.bins gt_bbox = gt_dict['search_anno'][-1] # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4) real_bbox = gt_dict['real_bbox'] seq_output = gt_dict['seq_output'] pred_feat = pred_dict["feat"] if self.focal == None: weight = torch.ones(bins + 1) * 1 weight[bins+1] = 0.1 weight[bins] = 0.1 weight.to(pred_feat) self.klloss = torch.nn.KLDivLoss(reduction='none').to(pred_feat) self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat) # compute varfifocal loss pred = pred_feat.permute(1, 0, 2).reshape(-1, bins+1) #print(pred) target = seq_output.reshape(-1).to(torch.int64) #print(target) varifocal_loss = self.focal(pred, target) # compute giou and L1 loss beta = 1 pred = pred_feat[0:4, :, 0:bins] * beta target = seq_output[:, 0:4].to(pred_feat) target_box = seq_output[:, 0:4].cpu().numpy() x1,y1,x2,y2 = generate_sa_simdr(target_box) # x1_ = torch.Tensor(x1).to(pred).unsqueeze(1) # y1_ = torch.Tensor(y1).to(pred).unsqueeze(1) # x2_ = torch.Tensor(x2).to(pred).unsqueeze(1) # y2_ = torch.Tensor(y2).to(pred).unsqueeze(1) # KL_target = torch.concat((x1_, y1_, x2_, y2_), dim=1) out = pred.softmax(-1).to(pred) mul = torch.arange(0, bins, 1).to(pred) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) target = target / (bins - 1) extra_seq = ans / (bins - 1) extra_seq = extra_seq.to(pred) #cious, iou = ciou(extra_seq, target) cious, iou = SIoU_loss(extra_seq, target, 4) cious = cious.mean() #cious, iou = self.objective['giou'](extra_seq, target) giou_loss = cious #giou_loss = 1 - cious #giou_loss, iou = self.objective['giou'](extra_seq, target) l1_loss = self.objective['l1'](extra_seq, target) #print(giou_loss) #print(l1_loss) #print(varifocal_loss) # gt_gaussian_maps = generate_heatmap(gt_dict['search_anno'], self.cfg.DATA.SEARCH.SIZE, self.cfg.MODEL.BACKBONE.STRIDE) # gt_gaussian_maps = gt_gaussian_maps[-1].unsqueeze(1) # # # # Get boxes # pred_boxes = pred_dict['pred_boxes'] # if torch.isnan(pred_boxes).any(): # raise ValueError("Network outputs is NAN! Stop Training") # num_queries = pred_boxes.size(1) # pred_boxes_vec = box_cxcywh_to_xyxy(pred_boxes).view(-1, 4) # (B,N,4) --> (BN,4) (x1,y1,x2,y2) # gt_boxes_vec = box_xywh_to_xyxy(gt_bbox)[:, None, :].repeat((1, num_queries, 1)).view(-1, 4).clamp(min=0.0, # max=1.0) # (B,4) --> (B,1,4) --> (B,N,4) # compute giou and iou # try: # giou_loss, iou = self.objective['giou'](pred_boxes_vec, gt_boxes_vec) # (BN,4) (BN,4) # except: # giou_loss, iou = torch.tensor(0.0).cuda(), torch.tensor(0.0).cuda() # # compute l1 loss # l1_loss = self.objective['l1'](pred_boxes_vec, gt_boxes_vec) # (BN,4) (BN,4) # # compute location loss # if 'score_map' in pred_dict: # location_loss = self.objective['focal'](pred_dict['score_map'], gt_gaussian_maps) # else: # location_loss = torch.tensor(0.0, device=l1_loss.device) # weighted sum loss = self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight['focal'] * varifocal_loss if return_status: # status for log mean_iou = iou.detach().mean() status = {"Loss/total": loss.item(), "Loss/giou": giou_loss.item(), "Loss/l1": l1_loss.item(), "Loss/location": varifocal_loss.item(), "IoU": mean_iou.item()} return loss, status else: return loss ================================================ FILE: artrackv2_mindspore/lib/train/admin/__init__.py ================================================ from .environment import env_settings, create_default_local_file_ITP_train from .stats import AverageMeter, StatValue #from .tensorboard import TensorboardWriter ================================================ FILE: artrackv2_mindspore/lib/train/admin/environment.py ================================================ import importlib import os from collections import OrderedDict def create_default_local_file(): path = os.path.join(os.path.dirname(__file__), 'local.py') empty_str = '\'\'' default_settings = OrderedDict({ 'workspace_dir': empty_str, 'tensorboard_dir': 'self.workspace_dir + \'/tensorboard/\'', 'pretrained_networks': 'self.workspace_dir + \'/pretrained_networks/\'', 'lasot_dir': empty_str, 'got10k_dir': empty_str, 'trackingnet_dir': empty_str, 'coco_dir': empty_str, 'lvis_dir': empty_str, 'sbd_dir': empty_str, 'imagenet_dir': empty_str, 'imagenetdet_dir': empty_str, 'ecssd_dir': empty_str, 'hkuis_dir': empty_str, 'msra10k_dir': empty_str, 'davis_dir': empty_str, 'youtubevos_dir': empty_str}) comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 'tensorboard_dir': 'Directory for tensorboard files.'} with open(path, 'w') as f: f.write('class EnvironmentSettings:\n') f.write(' def __init__(self):\n') for attr, attr_val in default_settings.items(): comment_str = None if attr in comment: comment_str = comment[attr] if comment_str is None: f.write(' self.{} = {}\n'.format(attr, attr_val)) else: f.write(' self.{} = {} # {}\n'.format(attr, attr_val, comment_str)) def create_default_local_file_ITP_train(workspace_dir, data_dir): path = os.path.join(os.path.dirname(__file__), 'local.py') empty_str = '\'\'' default_settings = OrderedDict({ 'workspace_dir': workspace_dir, 'tensorboard_dir': os.path.join(workspace_dir, 'tensorboard'), # Directory for tensorboard files. 'pretrained_networks': os.path.join(workspace_dir, 'pretrained_networks'), 'lasot_dir': os.path.join(data_dir, 'lasot'), 'got10k_dir': os.path.join(data_dir, 'got10k/train'), 'got10k_val_dir': os.path.join(data_dir, 'got10k/val'), 'lasot_lmdb_dir': os.path.join(data_dir, 'lasot_lmdb'), 'got10k_lmdb_dir': os.path.join(data_dir, 'got10k_lmdb'), 'trackingnet_dir': os.path.join(data_dir, 'trackingnet'), 'trackingnet_lmdb_dir': os.path.join(data_dir, 'trackingnet_lmdb'), 'coco_dir': os.path.join(data_dir, 'coco'), 'coco_lmdb_dir': os.path.join(data_dir, 'coco_lmdb'), 'lvis_dir': empty_str, 'sbd_dir': empty_str, 'imagenet_dir': os.path.join(data_dir, 'vid'), 'imagenet_lmdb_dir': os.path.join(data_dir, 'vid_lmdb'), 'imagenetdet_dir': empty_str, 'ecssd_dir': empty_str, 'hkuis_dir': empty_str, 'msra10k_dir': empty_str, 'davis_dir': empty_str, 'youtubevos_dir': empty_str}) comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 'tensorboard_dir': 'Directory for tensorboard files.'} with open(path, 'w') as f: f.write('class EnvironmentSettings:\n') f.write(' def __init__(self):\n') for attr, attr_val in default_settings.items(): comment_str = None if attr in comment: comment_str = comment[attr] if comment_str is None: if attr_val == empty_str: f.write(' self.{} = {}\n'.format(attr, attr_val)) else: f.write(' self.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' self.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) def env_settings(): env_module_name = 'lib.train.admin.local' try: env_module = importlib.import_module(env_module_name) return env_module.EnvironmentSettings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file)) ================================================ FILE: artrackv2_mindspore/lib/train/admin/local.py ================================================ class EnvironmentSettings: def __init__(self): self.workspace_dir = '/home/baiyifan/code/prev_for_2stage' # Base directory for saving network checkpoints. self.tensorboard_dir = '/home/baiyifan/code/detrack/tensorboard' # Directory for tensorboard files. self.pretrained_networks = '/home/baiyifan/code/OSTrack/pretrained_networks' self.lasot_dir = '/home/baiyifan/LaSOT/LaSOTBenchmark' self.got10k_dir = '/home/baiyifan/GOT-10k/train' self.got10k_val_dir = '/home/baiyifan/GOT-10k/val' self.lasot_lmdb_dir = '/home/baiyifan/LaSOT/LaSOTBenchmark' self.got10k_lmdb_dir = '' self.trackingnet_dir = '/ssddata/TrackingNet/all_zip' self.trackingnet_lmdb_dir = '/ssddata/TrackingNet/all_zip' self.coco_dir = '/home/baiyifan/coco' self.coco_lmdb_dir = '' self.lvis_dir = '' self.sbd_dir = '' self.imagenet_dir = '/home/baiyifan/code/OSTrack/data/vid' self.imagenet_lmdb_dir = '/home/baiyifan/code/OSTrack/data/vid_lmdb' self.imagenetdet_dir = '' self.ecssd_dir = '' self.hkuis_dir = '' self.msra10k_dir = '' self.davis_dir = '' self.youtubevos_dir = '' ================================================ FILE: artrackv2_mindspore/lib/train/admin/settings.py ================================================ from lib.train.admin.environment import env_settings class Settings: """ Training settings, e.g. the paths to datasets and networks.""" def __init__(self): self.set_default() def set_default(self): self.env = env_settings() self.use_gpu = True ================================================ FILE: artrackv2_mindspore/lib/train/admin/stats.py ================================================ class StatValue: def __init__(self): self.clear() def reset(self): self.val = 0 def clear(self): self.reset() self.history = [] def update(self, val): self.val = val self.history.append(self.val) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.clear() self.has_new_data = False def reset(self): self.avg = 0 self.val = 0 self.sum = 0 self.count = 0 def clear(self): self.reset() self.history = [] def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def new_epoch(self): if self.count > 0: self.history.append(self.avg) self.reset() self.has_new_data = True else: self.has_new_data = False def topk_accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" single_input = not isinstance(topk, (tuple, list)) if single_input: topk = (topk,) maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0] res.append(correct_k * 100.0 / batch_size) if single_input: return res[0] return res ================================================ FILE: artrackv2_mindspore/lib/train/admin/tensorboard.py ================================================ #import os #from collections import OrderedDict #try: # from torch.utils.tensorboard import SummaryWriter #except: # print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.') # from tensorboardX import SummaryWriter #class TensorboardWriter: # def __init__(self, directory, loader_names): # self.directory = directory # self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names}) # def write_info(self, script_name, description): # tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info')) # tb_info_writer.add_text('Script_name', script_name) # tb_info_writer.add_text('Description', description) # tb_info_writer.close() # def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1): # for loader_name, loader_stats in stats.items(): # if loader_stats is None: # continue # for var_name, val in loader_stats.items(): # if hasattr(val, 'history') and getattr(val, 'has_new_data', True): # self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch) ================================================ FILE: artrackv2_mindspore/lib/train/base_functions.py ================================================ import torch from torch.utils.data.distributed import DistributedSampler # datasets related from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader import lib.train.data.transforms as tfm from lib.utils.misc import is_main_process def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] #settings.use_lmdb = True for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode", sampler_mode) dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_train, frame_sample_mode=sampler_mode, train_cls=train_cls) train_sampler = DistributedSampler(dataset_train) if settings.local_rank != -1 else None shuffle = False if settings.local_rank != -1 else True loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler) # Validation samplers and loaders dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.VAL.DATASETS_RATIO, samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_val, frame_sample_mode=sampler_mode, train_cls=train_cls) val_sampler = DistributedSampler(dataset_val) if settings.local_rank != -1 else None loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler, epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL) return loader_train, loader_val def get_optimizer_scheduler(net, cfg): train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) if train_cls: print("Only training classification head. Learnable parameters are shown below.") param_dicts = [ {"params": [p for n, p in net.named_parameters() if "cls" in n and p.requires_grad]} ] for n, p in net.named_parameters(): if "cls" not in n: p.requires_grad = False else: print(n) else: param_dicts = [ {"params": [p for n, p in net.named_parameters() if "backbone" not in n and p.requires_grad]}, { "params": [p for n, p in net.named_parameters() if "backbone" in n and p.requires_grad], "lr": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER, }, ] if is_main_process(): print("Learnable parameters are shown below.") for n, p in net.named_parameters(): if p.requires_grad: print(n) if cfg.TRAIN.OPTIMIZER == "ADAMW": optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY) else: raise ValueError("Unsupported Optimizer") if cfg.TRAIN.SCHEDULER.TYPE == 'step': lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH) elif cfg.TRAIN.SCHEDULER.TYPE == "Mstep": lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.TRAIN.SCHEDULER.MILESTONES, gamma=cfg.TRAIN.SCHEDULER.GAMMA) else: raise ValueError("Unsupported scheduler") return optimizer, lr_scheduler ================================================ FILE: artrackv2_mindspore/lib/train/data/__init__.py ================================================ # from .loader import LTRLoader from .image_loader import jpeg4py_loader, opencv_loader, jpeg4py_loader_w_failsafe, default_image_loader ================================================ FILE: artrackv2_mindspore/lib/train/data/bounding_box_utils.py ================================================ import torch import numpy as np def batch_center2corner(boxes): xmin = boxes[:, 0] - boxes[:, 2] * 0.5 ymin = boxes[:, 1] - boxes[:, 3] * 0.5 xmax = boxes[:, 0] + boxes[:, 2] * 0.5 ymax = boxes[:, 1] + boxes[:, 3] * 0.5 if isinstance(boxes, np.ndarray): return np.stack([xmin, ymin, xmax, ymax], 1) else: return torch.stack([xmin, ymin, xmax, ymax], 1) def batch_corner2center(boxes): cx = (boxes[:, 0] + boxes[:, 2]) * 0.5 cy = (boxes[:, 1] + boxes[:, 3]) * 0.5 w = (boxes[:, 2] - boxes[:, 0]) h = (boxes[:, 3] - boxes[:, 1]) if isinstance(boxes, np.ndarray): return np.stack([cx, cy, w, h], 1) else: return torch.stack([cx, cy, w, h], 1) def batch_xywh2center(boxes): cx = boxes[:, 0] + (boxes[:, 2] - 1) / 2 cy = boxes[:, 1] + (boxes[:, 3] - 1) / 2 w = boxes[:, 2] h = boxes[:, 3] if isinstance(boxes, np.ndarray): return np.stack([cx, cy, w, h], 1) else: return torch.stack([cx, cy, w, h], 1) def batch_xywh2center2(boxes): cx = boxes[:, 0] + boxes[:, 2] / 2 cy = boxes[:, 1] + boxes[:, 3] / 2 w = boxes[:, 2] h = boxes[:, 3] if isinstance(boxes, np.ndarray): return np.stack([cx, cy, w, h], 1) else: return torch.stack([cx, cy, w, h], 1) def batch_xywh2corner(boxes): xmin = boxes[:, 0] ymin = boxes[:, 1] xmax = boxes[:, 0] + boxes[:, 2] ymax = boxes[:, 1] + boxes[:, 3] if isinstance(boxes, np.ndarray): return np.stack([xmin, ymin, xmax, ymax], 1) else: return torch.stack([xmin, ymin, xmax, ymax], 1) def rect_to_rel(bb, sz_norm=None): """Convert standard rectangular parametrization of the bounding box [x, y, w, h] to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate. args: bb - N x 4 tensor of boxes. sz_norm - [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given. """ c = bb[...,:2] + 0.5 * bb[...,2:] if sz_norm is None: c_rel = c / bb[...,2:] else: c_rel = c / sz_norm sz_rel = torch.log(bb[...,2:]) return torch.cat((c_rel, sz_rel), dim=-1) def rel_to_rect(bb, sz_norm=None): """Inverts the effect of rect_to_rel. See above.""" sz = torch.exp(bb[...,2:]) if sz_norm is None: c = bb[...,:2] * sz else: c = bb[...,:2] * sz_norm tl = c - 0.5 * sz return torch.cat((tl, sz), dim=-1) def masks_to_bboxes(mask, fmt='c'): """ Convert a mask tensor to one or more bounding boxes. Note: This function is a bit new, make sure it does what it says. /Andreas :param mask: Tensor of masks, shape = (..., H, W) :param fmt: bbox layout. 'c' => "center + size" or (x_center, y_center, width, height) 't' => "top left + size" or (x_left, y_top, width, height) 'v' => "vertices" or (x_left, y_top, x_right, y_bottom) :return: tensor containing a batch of bounding boxes, shape = (..., 4) """ batch_shape = mask.shape[:-2] mask = mask.reshape((-1, *mask.shape[-2:])) bboxes = [] for m in mask: mx = m.sum(dim=-2).nonzero() my = m.sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bboxes.append(bb) bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device) bboxes = bboxes.reshape(batch_shape + (4,)) if fmt == 'v': return bboxes x1 = bboxes[..., :2] s = bboxes[..., 2:] - x1 + 1 if fmt == 'c': return torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': return torch.cat((x1, s), dim=-1) raise ValueError("Undefined bounding box layout '%s'" % fmt) def masks_to_bboxes_multi(mask, ids, fmt='c'): assert mask.dim() == 2 bboxes = [] for id in ids: mx = (mask == id).sum(dim=-2).nonzero() my = (mask == id).float().sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bb = torch.tensor(bb, dtype=torch.float32, device=mask.device) x1 = bb[:2] s = bb[2:] - x1 + 1 if fmt == 'v': pass elif fmt == 'c': bb = torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': bb = torch.cat((x1, s), dim=-1) else: raise ValueError("Undefined bounding box layout '%s'" % fmt) bboxes.append(bb) return bboxes ================================================ FILE: artrackv2_mindspore/lib/train/data/image_loader.py ================================================ import jpeg4py import cv2 as cv from PIL import Image import numpy as np davis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8) davis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128], [128, 64, 128]] def default_image_loader(path): """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, but reverts to the opencv_loader if the former is not available.""" if default_image_loader.use_jpeg4py is None: # Try using jpeg4py im = jpeg4py_loader(path) if im is None: default_image_loader.use_jpeg4py = False print('Using opencv_loader instead.') else: default_image_loader.use_jpeg4py = True return im if default_image_loader.use_jpeg4py: return jpeg4py_loader(path) return opencv_loader(path) default_image_loader.use_jpeg4py = None def jpeg4py_loader(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_loader(path): """ Read image using opencv's imread function and returns it in rgb format""" try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def jpeg4py_loader_w_failsafe(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except: try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_seg_loader(path): """ Read segmentation annotation using opencv's imread function""" try: return cv.imread(path) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def imread_indexed(filename): """ Load indexed image with given filename. Used to read segmentation annotations.""" im = Image.open(filename) annotation = np.atleast_3d(im)[...,0] return annotation def imwrite_indexed(filename, array, color_palette=None): """ Save indexed image as png. Used to save segmentation annotation.""" if color_palette is None: color_palette = davis_palette if np.atleast_3d(array).shape[2] != 1: raise Exception("Saving indexed PNGs requires 2D array.") im = Image.fromarray(array) im.putpalette(color_palette.ravel()) im.save(filename, format='PNG') ================================================ FILE: artrackv2_mindspore/lib/train/data/loader.py ================================================ import torch import torch.utils.data.dataloader import importlib import collections # from torch import string_classes from lib.utils import TensorDict, TensorList string_classes = str if float(torch.__version__[:3]) >= 1.9 or len('.'.join((torch.__version__).split('.')[0:2])) > 3: int_classes = int else: # from torch._six import int_classes int_classes = int # 原代码是没有int_classes = int,为 from torch._six import int_classes import warnings warnings.filterwarnings("ignore") def _check_use_shared_memory(): if hasattr(torch.utils.data.dataloader, '_use_shared_memory'): return getattr(torch.utils.data.dataloader, '_use_shared_memory') collate_lib = importlib.import_module('torch.utils.data._utils.collate') if hasattr(collate_lib, '_use_shared_memory'): return getattr(collate_lib, '_use_shared_memory') return torch.utils.data.get_worker_info() is not None def ltr_collate(batch): """Puts each data field into a tensor with outer dimension batch size""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) def ltr_collate_stack1(batch): """Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 1, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 1) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate_stack1(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate_stack1(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) class LTRLoader(torch.utils.data.dataloader.DataLoader): """ Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset. Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to select along which dimension the data should be stacked to form a batch. Arguments: dataset (Dataset): dataset from which to load the data. batch_size (int, optional): how many samples per batch to load (default: 1). shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: False). sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False. batch_sampler (Sampler, optional): like sampler, but returns a batch of indices at a time. Mutually exclusive with batch_size, shuffle, sampler, and drop_last. num_workers (int, optional): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0) collate_fn (callable, optional): merges a list of samples to form a mini-batch. stack_dim (int): Dimension along which to stack to form the batch. (default: 0) pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: False) timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: 0) worker_init_fn (callable, optional): If not None, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: None) .. note:: By default, each worker will have its PyTorch seed set to ``base_seed + worker_id``, where ``base_seed`` is a long generated by main process using its RNG. However, seeds for other libraries may be duplicated upon initializing workers (w.g., NumPy), causing each worker to return identical random numbers. (See :ref:`dataloader-workers-random-seed` section in FAQ.) You may use ``torch.initial_seed()`` to access the PyTorch seed for each worker in :attr:`worker_init_fn`, and use it to set other seeds before data loading. .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an unpicklable object, e.g., a lambda function. """ __initialized = False def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=True, timeout=0, worker_init_fn=None): print("pin_memory is", pin_memory) if collate_fn is None: if stack_dim == 0: collate_fn = ltr_collate elif stack_dim == 1: collate_fn = ltr_collate_stack1 else: raise ValueError('Stack dim no supported. Must be 0 or 1.') super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn) self.name = name self.training = training self.epoch_interval = epoch_interval self.stack_dim = stack_dim ================================================ FILE: artrackv2_mindspore/lib/train/data/processing.py ================================================ import torch import torchvision.transforms as transforms from lib.utils import TensorDict import lib.train.data.processing_utils as prutils import torch.nn.functional as F def stack_tensors(x): if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor): return torch.stack(x) return x class BaseProcessing: """ Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it through the network. For example, it can be used to crop a search region around the object, apply various data augmentations, etc.""" def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None): """ args: transform - The set of transformations to be applied on the images. Used only if template_transform or search_transform is None. template_transform - The set of transformations to be applied on the template images. If None, the 'transform' argument is used instead. search_transform - The set of transformations to be applied on the search images. If None, the 'transform' argument is used instead. joint_transform - The set of transformations to be applied 'jointly' on the template and search images. For example, it can be used to convert both template and search images to grayscale. """ self.transform = {'template': transform if template_transform is None else template_transform, 'search': transform if search_transform is None else search_transform, 'joint': joint_transform} def __call__(self, data: TensorDict): raise NotImplementedError class STARKProcessing(BaseProcessing): """ The processing class used for training LittleBoy. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, mode='pair', settings=None, *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.mode = mode self.settings = settings def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'template' or 'search' indicating template or search data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'template_images', search_images', 'template_anno', 'search_anno' returns: TensorDict - output data block with following fields: 'template_images', 'search_images', 'template_anno', 'search_anno', 'test_proposals', 'proposal_iou' """ # Apply joint transforms if self.transform['joint'] is not None: data['template_images'], data['template_anno'], data['template_masks'] = self.transform['joint']( image=data['template_images'], bbox=data['template_anno'], mask=data['template_masks']) data['search_images'], data['search_anno'], data['search_masks'] = self.transform['joint']( image=data['search_images'], bbox=data['search_anno'], mask=data['search_masks'], new_roll=False) data["target_in_search_images"] = data["search_images"] data["target_in_search_anno"] = data["search_anno"] data["target_in_search_masks"] = data["search_masks"] self.scale_jitter_factor["target_in_search"] = self.scale_jitter_factor["template"] self.center_jitter_factor["target_in_search"] = self.center_jitter_factor["template"] self.search_area_factor["target_in_search"] = self.search_area_factor["template"] self.output_sz["target_in_search"] = self.output_sz["template"] self.transform["target_in_search"] = self.transform["search"] for s in ['template', 'search', 'target_in_search']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # 2021.1.9 Check whether data is valid. Avoid too small bounding boxes w, h = torch.stack(jittered_anno, dim=0)[:, 2], torch.stack(jittered_anno, dim=0)[:, 3] crop_sz = torch.ceil(torch.sqrt(w * h) * self.search_area_factor[s]) if (crop_sz < 1).any(): data['valid'] = False # print("Too small box is found. Replace it with new data.") return data # Crop image region centered at jittered_anno box and get the attention mask crops, boxes, att_mask, mask_crops = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor[s], self.output_sz[s], masks=data[s + '_masks']) # Apply transforms data[s + '_images'], data[s + '_anno'], data[s + '_att'], data[s + '_masks'] = self.transform[s]( image=crops, bbox=boxes, att=att_mask, mask=mask_crops, joint=False) # 2021.1.9 Check whether elements in data[s + '_att'] is all 1 # Note that type of data[s + '_att'] is tuple, type of ele is torch.tensor for ele in data[s + '_att']: if (ele == 1).all(): data['valid'] = False # print("Values of original attention mask are all one. Replace it with new data.") return data # 2021.1.10 more strict conditions: require the donwsampled masks not to be all 1 for ele in data[s + '_att']: feat_size = self.output_sz[s] // 16 # 16 is the backbone stride # (1,1,128,128) (1,1,256,256) --> (1,1,8,8) (1,1,16,16) mask_down = F.interpolate(ele[None, None].float(), size=feat_size).to(torch.bool)[0] if (mask_down == 1).all(): data['valid'] = False # print("Values of down-sampled attention mask are all one. " # "Replace it with new data.") return data data['valid'] = True # if we use copy-and-paste augmentation if data["template_masks"] is None or data["search_masks"] is None: data["template_masks"] = torch.zeros((1, self.output_sz["template"], self.output_sz["template"])) data["search_masks"] = torch.zeros((1, self.output_sz["search"], self.output_sz["search"])) # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data ================================================ FILE: artrackv2_mindspore/lib/train/data/processing_utils.py ================================================ import mindspore as ms from mindspore import ops import math import cv2 as cv import numpy as np '''modified from the original test implementation Replace cv.BORDER_REPLICATE with cv.BORDER_CONSTANT Add a variable called att_mask for computing attention and positional encoding later''' def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None): """ Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ if not isinstance(target_bb, list): x, y, w, h = target_bb.tolist() else: x, y, w, h = target_bb # Crop image crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor) if crop_sz < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5 * w - crop_sz * 0.5) x2 = x1 + crop_sz y1 = round(y + 0.5 * h - crop_sz * 0.5) y2 = y1 + crop_sz x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT) # deal with attention mask H, W, _ = im_crop_padded.shape att_mask = np.ones((H,W)) end_x, end_y = -x2_pad, -y2_pad if y2_pad == 0: end_y = None if x2_pad == 0: end_x = None att_mask[y1_pad:end_y, x1_pad:end_x] = 0 if mask is not None: mask_crop_padded = ops.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) if output_sz is not None: resize_factor = output_sz / crop_sz im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz)) att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_) if mask is None: return im_crop_padded, resize_factor, att_mask mask_crop_padded = \ ops.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0] return im_crop_padded, resize_factor, att_mask, mask_crop_padded else: if mask is None: return im_crop_padded, att_mask.astype(np.bool_), 1.0 return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded def transform_image_to_crop(box_in: ms.Tensor, box_extract: ms.Tensor, resize_factor: float, crop_sz: ms.Tensor, normalize=False) -> ms.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box_in - the box for which the co-ordinates are to be transformed box_extract - the box about which the image crop has been extracted. resize_factor - the ratio between the original image scale and the scale of the image crop crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4] box_in_center = box_in[0:2] + 0.5 * box_in[2:4] box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor box_out_wh = box_in[2:4] * resize_factor box_out = ops.cat((box_out_center - 0.5 * box_out_wh, box_out_wh)) if normalize: return box_out / crop_sz[0] else: return box_out def jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if masks is None: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz) for f, a in zip(frames, box_extract)] frames_crop, resize_factors, att_mask = zip(*crops_resize_factors) masks_crop = None else: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m) for f, a, m in zip(frames, box_extract, masks)] frames_crop, resize_factors, att_mask, masks_crop = zip(*crops_resize_factors) # frames_crop: tuple of ndarray (128,128,3), att_mask: tuple of ndarray (128,128) crop_sz = ms.Tensor([output_sz, output_sz]) # find the bb location in the crop '''Note that here we use normalized coord''' box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz, normalize=True) for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)] # (x1,y1,w,h) list of tensors return frames_crop, box_crop, att_mask, masks_crop def transform_box_to_crop(box: ms.Tensor, crop_box: ms.Tensor, crop_sz: ms.Tensor, normalize=False) -> ms.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box - the box for which the co-ordinates are to be transformed crop_box - bounding box defining the crop in the original image crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_out = box.clone() box_out[:2] -= crop_box[:2] scale_factor = crop_sz / crop_box[2:] box_out[:2] *= scale_factor box_out[2:] *= scale_factor if normalize: return box_out / crop_sz[0] else: return box_out ================================================ FILE: artrackv2_mindspore/lib/train/data/sampler.py ================================================ import random import torch.utils.data from lib.utils import TensorDict import numpy as np def no_processing(data): return data class TrackingSampler(torch.utils.data.Dataset): """ Class responsible for sampling frames from training sequences to form batches. The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and 'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id] and (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled. If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found. The sampled frames are then passed through the input 'processing' function for the necessary processing- """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal', train_cls=False, pos_prob=0.5): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the test frames. num_search_frames - Number of search frames to sample. num_template_frames - Number of template frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally, otherwise randomly within the interval. """ self.datasets = datasets self.train_cls = train_cls # whether we are training classification self.pos_prob = pos_prob # probability of sampling positive class when making classification # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.num_search_frames = num_search_frames self.num_template_frames = num_template_frames self.processing = processing self.frame_sample_mode = frame_sample_mode def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None, allow_invisible=False, force_invisible=False): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) # get valid ids if force_invisible: valid_ids = [i for i in range(min_id, max_id) if not visible[i]] else: if allow_invisible: valid_ids = [i for i in range(min_id, max_id)] else: valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def __getitem__(self, index): if self.train_cls: return self.getitem_cls() else: return self.getitem() def getitem(self): """ returns: TensorDict - dict containing all the data blocks """ valid = False while not valid: # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # sample a sequence from the given dataset seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset) if is_video_dataset: template_frame_ids = None search_frame_ids = None gap_increase = 0 if self.frame_sample_mode == 'causal': # Sample test and train frames in a causal manner, i.e. search_frame_ids > template_frame_ids while search_frame_ids is None: base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_template_frames - 1, max_id=len(visible) - self.num_search_frames) prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_template_frames - 1, min_id=base_frame_id[0] - self.max_gap - gap_increase, max_id=base_frame_id[0]) if prev_frame_ids is None: gap_increase += 5 continue template_frame_ids = base_frame_id + prev_frame_ids search_frame_ids = self._sample_visible_ids(visible, min_id=template_frame_ids[0] + 1, max_id=template_frame_ids[0] + self.max_gap + gap_increase, num_ids=self.num_search_frames) # Increase gap until a frame is found gap_increase += 5 elif self.frame_sample_mode == "trident" or self.frame_sample_mode == "trident_pro": template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible) elif self.frame_sample_mode == "stark": template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict["valid"]) else: raise ValueError("Illegal frame sample mode") else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames try: template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) H, W, _ = template_frames[0].shape template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros((H, W))] * self.num_template_frames search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros((H, W))] * self.num_search_frames data = TensorDict({'template_images': template_frames, 'template_anno': template_anno['bbox'], 'template_masks': template_masks, 'search_images': search_frames, 'search_anno': search_anno['bbox'], 'search_masks': search_masks, 'dataset': dataset.get_name(), 'test_class': meta_obj_test.get('object_class_name')}) # make data augmentation data = self.processing(data) # check whether data is valid valid = data['valid'] except: valid = False return data def getitem_cls(self): # get data for classification """ args: index (int): Index (Ignored since we sample randomly) aux (bool): whether the current data is for auxiliary use (e.g. copy-and-paste) returns: TensorDict - dict containing all the data blocks """ valid = False label = None while not valid: # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # sample a sequence from the given dataset seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset) # sample template and search frame ids if is_video_dataset: if self.frame_sample_mode in ["trident", "trident_pro"]: template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible) elif self.frame_sample_mode == "stark": template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict["valid"]) else: raise ValueError("illegal frame sample mode") else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames try: # "try" is used to handle trackingnet data failure # get images and bounding boxes (for templates) template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) H, W, _ = template_frames[0].shape template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros( (H, W))] * self.num_template_frames # get images and bounding boxes (for searches) # positive samples if random.random() < self.pos_prob: label = torch.ones(1,) search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros( (H, W))] * self.num_search_frames # negative samples else: label = torch.zeros(1,) if is_video_dataset: search_frame_ids = self._sample_visible_ids(visible, num_ids=1, force_invisible=True) if search_frame_ids is None: search_frames, search_anno, meta_obj_test = self.get_one_search() else: search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) search_anno["bbox"] = [self.get_center_box(H, W)] else: search_frames, search_anno, meta_obj_test = self.get_one_search() H, W, _ = search_frames[0].shape search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros( (H, W))] * self.num_search_frames data = TensorDict({'template_images': template_frames, 'template_anno': template_anno['bbox'], 'template_masks': template_masks, 'search_images': search_frames, 'search_anno': search_anno['bbox'], 'search_masks': search_masks, 'dataset': dataset.get_name(), 'test_class': meta_obj_test.get('object_class_name')}) # make data augmentation data = self.processing(data) # add classification label data["label"] = label # check whether data is valid valid = data['valid'] except: valid = False return data def get_center_box(self, H, W, ratio=1/8): cx, cy, w, h = W/2, H/2, W * ratio, H * ratio return torch.tensor([int(cx-w/2), int(cy-h/2), int(w), int(h)]) def sample_seq_from_dataset(self, dataset, is_video_dataset): # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_search_frames + self.num_template_frames) and len(visible) >= 20 enough_visible_frames = enough_visible_frames or not is_video_dataset return seq_id, visible, seq_info_dict def get_one_search(self): # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # sample a sequence seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset) # sample a frame if is_video_dataset: if self.frame_sample_mode == "stark": search_frame_ids = self._sample_visible_ids(seq_info_dict["valid"], num_ids=1) else: search_frame_ids = self._sample_visible_ids(visible, num_ids=1, allow_invisible=True) else: search_frame_ids = [1] # get the image, bounding box and other info search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) return search_frames, search_anno, meta_obj_test def get_frame_ids_trident(self, visible): # get template and search ids in a 'trident' manner template_frame_ids_extra = [] while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0: template_frame_ids_extra = [] # first randomly sample two frames from a video template_frame_id1 = self._sample_visible_ids(visible, num_ids=1) # the initial template id search_frame_ids = self._sample_visible_ids(visible, num_ids=1) # the search region id # get the dynamic template id for max_gap in self.max_gap: if template_frame_id1[0] >= search_frame_ids[0]: min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap else: min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0] if self.frame_sample_mode == "trident_pro": f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id, allow_invisible=True) else: f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id) if f_id is None: template_frame_ids_extra += [None] else: template_frame_ids_extra += f_id template_frame_ids = template_frame_id1 + template_frame_ids_extra return template_frame_ids, search_frame_ids def get_frame_ids_stark(self, visible, valid): # get template and search ids in a 'stark' manner template_frame_ids_extra = [] while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0: template_frame_ids_extra = [] # first randomly sample two frames from a video template_frame_id1 = self._sample_visible_ids(visible, num_ids=1) # the initial template id search_frame_ids = self._sample_visible_ids(visible, num_ids=1) # the search region id # get the dynamic template id for max_gap in self.max_gap: if template_frame_id1[0] >= search_frame_ids[0]: min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap else: min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0] """we require the frame to be valid but not necessary visible""" f_id = self._sample_visible_ids(valid, num_ids=1, min_id=min_id, max_id=max_id) if f_id is None: template_frame_ids_extra += [None] else: template_frame_ids_extra += f_id template_frame_ids = template_frame_id1 + template_frame_ids_extra return template_frame_ids, search_frame_ids ================================================ FILE: artrackv2_mindspore/lib/train/data/sequence_sampler.py ================================================ import random import torch.utils.data import numpy as np from lib.utils import TensorDict class SequenceSampler(torch.utils.data.Dataset): """ Sample sequence for sequence-level training """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_search_frames, num_template_frames=1, frame_sample_mode='sequential', max_interval=10, prob=0.7): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the search frames.\ max_interval - Maximum interval between sampled frames num_search_frames - Number of search frames to sample. num_template_frames - Number of template frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the search frames are sampled in a causally, otherwise randomly within the interval. prob - sequential sampling by prob / interval sampling by 1-prob """ self.datasets = datasets # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.max_interval = max_interval self.num_search_frames = num_search_frames self.num_template_frames = num_template_frames self.frame_sample_mode = frame_sample_mode self.prob=prob self.extra=1 def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def _sequential_sample(self, visible): # Sample frames in sequential manner template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0, max_id=len(visible) - self.num_search_frames) template_another = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0], max_id=min(len(visible) - self.num_search_frames, template_frame_ids[0] + self.max_gap)) template_frame_ids.append(template_another[0]) template_frame_ids.sort() if self.max_gap == -1: left = template_frame_ids[1] else: # template frame (1) ->(max_gap) -> search frame (num_search_frames) left_max = min(len(visible) - self.num_search_frames, template_frame_ids[1] + self.max_gap) left = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[1], max_id=left_max)[0] valid_ids = [i for i in range(left, len(visible)) if visible[i]] search_frame_ids = valid_ids[:self.num_search_frames] # if length is not enough last = search_frame_ids[-1] while len(search_frame_ids) < self.num_search_frames: if last >= len(visible) - 1: search_frame_ids.append(last) else: last += 1 if visible[last]: search_frame_ids.append(last) return template_frame_ids, search_frame_ids def _random_interval_sample(self, visible): # Get valid ids valid_ids = [i for i in range(len(visible)) if visible[i]] # Sample template frame avg_interval = self.max_interval while avg_interval * (self.num_search_frames - 1) > len(visible): avg_interval = max(avg_interval - 1, 1) while True: template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0, max_id=len(visible) - avg_interval * (self.num_search_frames - 1)) if template_frame_ids == None: avg_interval = avg_interval - 1 else: break if avg_interval == 0: template_frame_ids = [valid_ids[0]] break # Sample first search frame if self.max_gap == -1: search_frame_ids = template_frame_ids else: avg_interval = self.max_interval while avg_interval * (self.num_search_frames - 1) > len(visible): avg_interval = max(avg_interval - 1, 1) while True: left_max = min(max(len(visible) - avg_interval * (self.num_search_frames - 1), template_frame_ids[0] + 1), template_frame_ids[0] + self.max_gap) search_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0], max_id=left_max) if search_frame_ids == None: avg_interval = avg_interval - 1 else: break if avg_interval == -1: search_frame_ids = template_frame_ids break # Sample rest of the search frames with random interval last = search_frame_ids[0] while last <= len(visible) - 1 and len(search_frame_ids) < self.num_search_frames: # sample id with interval max_id = min(last + self.max_interval + 1, len(visible)) id = self._sample_visible_ids(visible, num_ids=1, min_id=last, max_id=max_id) if id is None: # If not found in current range, find from previous range last = last + self.max_interval else: search_frame_ids.append(id[0]) last = search_frame_ids[-1] # if length is not enough, randomly sample new ids if len(search_frame_ids) < self.num_search_frames: valid_ids = [x for x in valid_ids if x > search_frame_ids[0] and x not in search_frame_ids] if len(valid_ids) > 0: new_ids = random.choices(valid_ids, k=min(len(valid_ids), self.num_search_frames - len(search_frame_ids))) search_frame_ids = search_frame_ids + new_ids search_frame_ids = sorted(search_frame_ids, key=int) # if length is still not enough, duplicate last frame while len(search_frame_ids) < self.num_search_frames: search_frame_ids.append(search_frame_ids[-1]) for i in range(1, self.num_search_frames): if search_frame_ids[i] - search_frame_ids[i - 1] > self.max_interval: print(search_frame_ids[i] - search_frame_ids[i - 1]) return template_frame_ids, search_frame_ids def __getitem__(self, index): """ args: index (int): Index (Ignored since we sample randomly) returns: TensorDict - dict containing all the data blocks """ # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] if dataset.get_name() == 'got10k' : max_gap = self.max_gap max_interval = self.max_interval else: max_gap = self.max_gap max_interval = self.max_interval self.max_gap = max_gap * self.extra self.max_interval = max_interval * self.extra is_video_dataset = dataset.is_video_sequence() # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_search_frames + self.num_template_frames) and len(visible) >= (self.num_search_frames + self.num_template_frames) enough_visible_frames = enough_visible_frames or not is_video_dataset if is_video_dataset: if self.frame_sample_mode == 'sequential': template_frame_ids, search_frame_ids = self._sequential_sample(visible) elif self.frame_sample_mode == 'random_interval': if random.random() < self.prob: template_frame_ids, search_frame_ids = self._random_interval_sample(visible) else: template_frame_ids, search_frame_ids = self._sequential_sample(visible) else: self.max_gap = max_gap self.max_interval = max_interval raise NotImplementedError else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames self.max_gap = max_gap self.max_interval = max_interval # print("this is template_frame_ids", template_frame_ids) # print("this is search_frame_ids", search_frame_ids) template_frames, template_anno, meta_obj_template = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) search_frames, search_anno, meta_obj_search = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) visible_ratio = search_anno['visible_ratio'] template_bbox = [bbox.numpy() for bbox in template_anno['bbox']] # tensor -> numpy array search_bbox = [bbox.numpy() for bbox in search_anno['bbox']] # tensor -> numpy array return TensorDict({'template_images': np.array(template_frames).squeeze(), # 1 template images 'template_annos': np.array(template_bbox).squeeze(), 'search_images': np.array(search_frames), # (num_frames) search images 'search_annos': np.array(search_bbox), 'seq_id': seq_id, 'dataset': dataset.get_name(), 'search_class': meta_obj_search.get('object_class_name'), 'num_frames': len(search_frames), 'visible_ratio': visible_ratio }) ================================================ FILE: artrackv2_mindspore/lib/train/data/transforms.py ================================================ import random import numpy as np import math import cv2 as cv import torch import torch.nn.functional as F import torchvision.transforms.functional as tvisf class Transform: """A set of transformations, used for e.g. data augmentation. Args of constructor: transforms: An arbitrary number of transformations, derived from the TransformBase class. They are applied in the order they are given. The Transform object can jointly transform images, bounding boxes and segmentation masks. This is done by calling the object with the following key-word arguments (all are optional). The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances. image - Image coords - 2xN dimensional Tensor of 2D image coordinates [y, x] bbox - Bounding box on the form [x, y, w, h] mask - Segmentation mask with discrete classes The following parameters can be supplied with calling the transform object: joint [Bool] - If True then transform all images/coords/bbox/mask in the list jointly using the same transformation. Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using different random rolls. Default: True. new_roll [Bool] - If False, then no new random roll is performed, and the saved result from the previous roll is used instead. Default: True. Check the DiMPProcessing class for examples. """ def __init__(self, *transforms): if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)): transforms = transforms[0] self.transforms = transforms self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att'] self._valid_args = ['joint', 'new_roll'] self._valid_all = self._valid_inputs + self._valid_args def __call__(self, **inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] for v in inputs.keys(): if v not in self._valid_all: raise ValueError('Incorrect input \"{}\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args)) joint_mode = inputs.get('joint', True) new_roll = inputs.get('new_roll', True) if not joint_mode: out = zip(*[self(**inp) for inp in self._split_inputs(inputs)]) return tuple(list(o) for o in out) out = {k: v for k, v in inputs.items() if k in self._valid_inputs} for t in self.transforms: out = t(**out, joint=joint_mode, new_roll=new_roll) if len(var_names) == 1: return out[var_names[0]] # Make sure order is correct return tuple(out[v] for v in var_names) def _split_inputs(self, inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])] for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()): if isinstance(arg_val, list): for inp, av in zip(split_inputs, arg_val): inp[arg_name] = av else: for inp in split_inputs: inp[arg_name] = arg_val return split_inputs def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class TransformBase: """Base class for transformation objects. See the Transform class for details.""" def __init__(self): """2020.12.24 Add 'att' to valid inputs""" self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att'] self._valid_args = ['new_roll'] self._valid_all = self._valid_inputs + self._valid_args self._rand_params = None def __call__(self, **inputs): # Split input input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs} input_args = {k: v for k, v in inputs.items() if k in self._valid_args} # Roll random parameters for the transform if input_args.get('new_roll', True): rand_params = self.roll() if rand_params is None: rand_params = () elif not isinstance(rand_params, tuple): rand_params = (rand_params,) self._rand_params = rand_params outputs = dict() for var_name, var in input_vars.items(): if var is not None: transform_func = getattr(self, 'transform_' + var_name) if var_name in ['coords', 'bbox']: params = (self._get_image_size(input_vars),) + self._rand_params else: params = self._rand_params if isinstance(var, (list, tuple)): outputs[var_name] = [transform_func(x, *params) for x in var] else: outputs[var_name] = transform_func(var, *params) return outputs def _get_image_size(self, inputs): im = None for var_name in ['image', 'mask']: if inputs.get(var_name) is not None: im = inputs[var_name] break if im is None: return None if isinstance(im, (list, tuple)): im = im[0] if isinstance(im, np.ndarray): return im.shape[:2] if torch.is_tensor(im): return (im.shape[-2], im.shape[-1]) raise Exception('Unknown image type') def roll(self): return None def transform_image(self, image, *rand_params): """Must be deterministic""" return image def transform_coords(self, coords, image_shape, *rand_params): """Must be deterministic""" return coords def transform_bbox(self, bbox, image_shape, *rand_params): """Assumes [x, y, w, h]""" # Check if not overloaded if self.transform_coords.__code__ == TransformBase.transform_coords.__code__: return bbox coord = bbox.clone().view(-1,2).t().flip(0) x1 = coord[1, 0] x2 = coord[1, 0] + coord[1, 1] y1 = coord[0, 0] y2 = coord[0, 0] + coord[0, 1] coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]]) coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0) tl = torch.min(coord_transf, dim=1)[0] sz = torch.max(coord_transf, dim=1)[0] - tl bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape) return bbox_out def transform_mask(self, mask, *rand_params): """Must be deterministic""" return mask def transform_att(self, att, *rand_params): """2020.12.24 Added to deal with attention masks""" return att class ToTensor(TransformBase): """Convert to a Tensor""" def transform_image(self, image): # handle numpy array if image.ndim == 2: image = image[:, :, None] image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if isinstance(image, torch.ByteTensor): return image.float().div(255) else: return image def transfrom_mask(self, mask): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) def transform_att(self, att): if isinstance(att, np.ndarray): return torch.from_numpy(att).to(torch.bool) elif isinstance(att, torch.Tensor): return att.to(torch.bool) else: raise ValueError ("dtype must be np.ndarray or torch.Tensor") class ToTensorAndJitter(TransformBase): """Convert to a Tensor and jitter brightness""" def __init__(self, brightness_jitter=0.0, normalize=True): super().__init__() self.brightness_jitter = brightness_jitter self.normalize = normalize def roll(self): return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter) def transform_image(self, image, brightness_factor): # handle numpy array image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if self.normalize: return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0) else: return image.float().mul(brightness_factor).clamp(0.0, 255.0) def transform_mask(self, mask, brightness_factor): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) else: return mask def transform_att(self, att, brightness_factor): if isinstance(att, np.ndarray): return torch.from_numpy(att).to(torch.bool) elif isinstance(att, torch.Tensor): return att.to(torch.bool) else: raise ValueError ("dtype must be np.ndarray or torch.Tensor") class Normalize(TransformBase): """Normalize image""" def __init__(self, mean, std, inplace=False): super().__init__() self.mean = mean self.std = std self.inplace = inplace def transform_image(self, image): return tvisf.normalize(image, self.mean, self.std, self.inplace) class ToGrayscale(TransformBase): """Converts image to grayscale with probability""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32) def roll(self): return random.random() < self.probability def transform_image(self, image, do_grayscale): if do_grayscale: if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY) return np.stack([img_gray, img_gray, img_gray], axis=2) # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2) return image class ToBGR(TransformBase): """Converts image to BGR""" def transform_image(self, image): if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR) return img_bgr class RandomHorizontalFlip(TransformBase): """Horizontally flip image randomly with a probability p.""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability def roll(self): return random.random() < self.probability def transform_image(self, image, do_flip): if do_flip: if torch.is_tensor(image): return image.flip((2,)) return np.fliplr(image).copy() return image def transform_coords(self, coords, image_shape, do_flip): if do_flip: coords_flip = coords.clone() coords_flip[1,:] = (image_shape[1] - 1) - coords[1,:] return coords_flip return coords def transform_mask(self, mask, do_flip): if do_flip: if torch.is_tensor(mask): return mask.flip((-1,)) return np.fliplr(mask).copy() return mask def transform_att(self, att, do_flip): if do_flip: if torch.is_tensor(att): return att.flip((-1,)) return np.fliplr(att).copy() return att class RandomHorizontalFlip_Norm(RandomHorizontalFlip): """Horizontally flip image randomly with a probability p. The difference is that the coord is normalized to [0,1]""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability def transform_coords(self, coords, image_shape, do_flip): """we should use 1 rather than image_shape""" if do_flip: coords_flip = coords.clone() coords_flip[1,:] = 1 - coords[1,:] return coords_flip return coords ================================================ FILE: artrackv2_mindspore/lib/train/data/wandb_logger.py ================================================ from collections import OrderedDict try: import wandb except ImportError: raise ImportError( 'Please run "pip install wandb" to install wandb') class WandbWriter: def __init__(self, exp_name, cfg, output_dir, cur_step=0, step_interval=0): self.wandb = wandb self.step = cur_step self.interval = step_interval wandb.init(project="tracking", name=exp_name, config=cfg, dir=output_dir) def write_log(self, stats: OrderedDict, epoch=-1): self.step += 1 for loader_name, loader_stats in stats.items(): if loader_stats is None: continue log_dict = {} for var_name, val in loader_stats.items(): if hasattr(val, 'avg'): log_dict.update({loader_name + '/' + var_name: val.avg}) else: log_dict.update({loader_name + '/' + var_name: val.val}) if epoch >= 0: log_dict.update({loader_name + '/epoch': epoch}) self.wandb.log(log_dict, step=self.step*self.interval) ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/README.md ================================================ # README ## Description for different text files GOT10K - got10k_train_full_split.txt: the complete GOT-10K training set. (9335 videos) - got10k_train_split.txt: part of videos from the GOT-10K training set - got10k_val_split.txt: another part of videos from the GOT-10K training set - got10k_vot_exclude.txt: 1k videos that are forbidden from "using to train models then testing on VOT" (as required by [VOT Challenge](https://www.votchallenge.net/vot2020/participation.html)) - got10k_vot_train_split.txt: part of videos from the "VOT-permitted" GOT-10K training set - got10k_vot_val_split.txt: another part of videos from the "VOT-permitted" GOT-10K training set LaSOT - lasot_train_split.txt: the complete LaSOT training set TrackingNnet - trackingnet_classmap.txt: The map from the sequence name to the target class for the TrackingNet ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/got10k_train_full_split.txt ================================================ 3784 8998 3906 1631 8277 8358 2338 7938 2988 8302 2662 2663 2825 7447 4781 2218 6348 5860 4517 2819 8075 5391 116 3606 7976 7941 1024 4519 1970 557 8579 6908 993 7204 1991 3674 8781 6840 5 3225 3763 8688 6778 5777 4794 2744 8126 3864 1733 2923 6829 701 683 2081 1831 2404 1459 2741 5972 3618 7462 2654 103 2174 6224 2989 2506 2766 5912 2699 3295 3986 609 4895 6673 801 1098 1602 2490 3129 8476 3186 7355 4784 4270 1812 4226 2267 8873 6544 6112 2381 4752 753 3776 6511 6016 731 2559 7369 5866 563 7731 1105 5603 50 4238 2208 8725 4994 4719 1444 8807 7298 8139 8760 8173 2332 4131 5207 1065 8562 3992 4024 2188 9095 6765 1707 6105 6922 5362 1486 7898 4135 6574 1551 998 6565 8127 8927 2544 4365 510 768 3535 3875 6808 2931 487 1088 4451 368 2470 8111 3493 7338 8281 6390 1271 4373 3667 3494 3757 2966 3756 7840 6315 7827 3300 6261 4163 2217 6549 94 7236 9136 1857 6691 3470 6271 807 516 9311 6098 3144 8420 5425 5694 2643 6696 6072 7285 3781 903 8522 6092 5979 2622 2529 855 3420 3261 8953 7866 2492 3157 359 1520 2642 7452 759 36 8931 1744 4350 1089 9199 4295 1889 1908 4868 4498 1968 9103 3273 8723 7413 4114 5584 4874 1427 5211 7618 1542 1353 8158 4168 3200 6345 8560 5619 5953 3158 8849 5831 1411 7294 8103 6539 7397 1006 5450 3119 4274 5352 4571 2319 4217 4976 902 1814 2651 3299 3398 982 2428 5793 1346 7057 3737 7329 4449 2110 7405 1773 958 3901 4127 8234 2994 7066 1289 2995 5871 3556 9085 846 2366 585 7032 5516 5230 3481 2732 6658 7423 1855 6384 3554 5823 4948 7058 4667 5377 2503 7694 9191 9144 655 3409 62 8019 8970 5523 7403 3379 2323 4833 5750 3178 6548 8891 7501 3280 7404 343 2171 8397 1367 8611 6118 6603 3729 7182 9048 7733 5642 7141 3335 4845 5449 3467 6250 163 5168 2040 5339 3609 8352 3426 8567 769 187 6151 6437 7028 8507 3970 9146 2068 5028 7492 1661 2815 2469 2563 3814 8430 4305 3479 5678 9115 4132 1211 5459 4814 545 4556 238 4296 2724 1260 2581 6087 4632 4313 380 1209 5447 3032 7942 8943 806 2432 6130 4314 2131 9045 6531 5706 6747 7724 2017 3292 5469 2743 424 4233 7643 8619 5192 4516 9324 3537 9152 8058 7526 8711 1949 5982 1732 6702 7027 6388 7012 328 2130 452 306 7669 3134 5761 3703 44 4189 695 7672 5224 9215 5644 3143 3704 5443 2348 7177 2328 4725 354 1418 7810 7746 9002 5759 7226 4535 9160 4385 5397 7249 2936 3204 6287 385 2371 2738 3636 9033 2246 2680 6940 4310 2054 9250 9080 4568 5586 4469 2038 3410 7900 4332 6108 678 3319 9079 1054 4048 4751 1320 6890 7931 1398 4349 5299 5025 7932 5738 7787 4590 4020 1274 2488 8497 3372 8965 3219 799 3664 6500 7093 4362 6205 4244 4652 1964 5945 6434 2031 2684 6632 4588 8271 3232 5782 2904 6789 5636 7200 3632 5435 8203 3480 4786 7579 3351 1921 798 3646 3094 4359 1654 5975 376 5965 780 7821 9224 6738 3185 2133 6248 5996 2834 531 5688 2448 7925 7974 5924 6401 5778 6594 5442 8336 4522 3770 6340 6328 4946 4161 2954 2588 8465 2885 1606 5787 3407 3121 7310 1413 1932 4787 2579 3325 508 5610 6480 4290 479 3792 6628 2545 6717 6972 2665 6730 3547 6845 5929 3540 4356 8993 1052 2235 8356 3403 8818 8260 572 4159 1180 5348 941 7948 2676 3539 4866 6422 8365 3217 1310 2059 9177 1419 2283 8892 8162 1212 6277 3725 7806 6149 7874 718 6888 7118 277 656 8763 8289 4759 5854 8659 7710 3145 5981 1881 5799 6947 1609 6396 2631 2887 318 2550 6132 1736 2907 7816 48 4304 8133 6698 2760 7779 7732 7642 1154 7242 711 9262 539 8033 7440 1913 5480 5570 8594 8772 4654 8974 6128 6183 1071 8449 2142 2298 524 1695 820 4053 8241 1856 8641 3981 217 1063 9286 3152 221 5461 1270 2006 7164 1199 6951 5604 5400 5309 3498 6407 6661 7097 8165 5169 3852 7070 5702 4344 6648 6904 3272 7119 5795 2365 2659 353 5444 6968 2755 1924 2098 2972 6006 5865 8740 2418 3401 7856 5841 598 836 1147 931 8897 0 6049 1837 865 1871 6116 6831 5773 3587 303 1883 2163 3070 1308 7953 6300 6909 853 7301 3279 123 7186 3194 5553 5133 1931 4622 6075 4891 5722 5693 8 2339 6596 71 379 4506 4370 1238 2707 3344 4254 8767 1726 325 4148 5438 5357 548 1332 6824 2290 2335 3146 2594 2315 3389 3885 2621 4116 5389 7412 7222 4894 8595 2000 4978 4721 6444 3796 9321 2236 6409 1523 1468 9249 8270 2341 2874 174 4757 4502 4703 9034 9108 5451 2619 5022 9158 490 6540 1466 2962 8771 3036 2712 4539 1581 5638 9246 4308 4363 4647 4470 1636 2511 1311 6560 7519 8027 9217 6464 6364 3779 4822 3563 3982 5896 5510 6655 1524 2846 3137 621 141 1887 6567 8921 4671 6052 8445 8699 7349 3553 2117 7651 5034 5383 649 3818 9022 8414 1012 8159 5081 8571 4765 9135 4361 4073 9142 727 2835 8229 3989 4490 4923 5477 1638 3643 712 9044 2230 499 7166 96 3172 8431 8401 1470 6356 8817 927 4212 2152 1795 3812 4949 1219 1538 3029 6481 9042 7775 7742 423 2085 7715 4541 9061 5916 3950 7420 4878 7406 7046 7808 4911 8804 6927 8820 3264 300 8670 2979 252 4407 3383 4688 8504 6723 26 3837 2489 4137 8209 229 6490 2364 9016 1763 1728 338 8335 9063 5280 2791 641 5454 4581 5420 4548 2840 8508 3463 7231 7619 2560 1755 6201 165 1471 6279 5806 6867 5890 2396 3416 1981 6073 5872 3045 4182 7607 3318 4414 2998 6553 7139 5624 2123 3666 723 5110 6932 8200 2222 8399 1041 4138 1594 3569 9253 393 7940 8004 1475 6759 5393 1107 2597 878 9309 7576 5250 1759 3142 2015 571 3921 1255 7080 893 2160 1355 82 1562 9153 8583 4085 4644 7196 9165 3558 4550 6374 7826 8602 4146 9257 6083 874 8383 3731 3374 3653 8222 7344 470 1813 4478 6871 7245 6866 3998 7433 276 1915 1988 8168 2518 2686 831 6143 5205 8718 1703 7729 2077 7983 8450 1195 9232 507 7989 6974 4054 5828 8655 6679 5245 7783 5886 9098 6491 8782 3525 6542 131 8110 9186 9074 4933 9035 2607 4 2057 6273 2711 5829 3382 2696 3043 2048 619 2499 5295 1162 7807 3694 2194 3149 1940 7934 840 3592 8237 4731 1324 8486 8726 8573 2928 9078 2272 2564 1370 5911 7434 8026 407 7546 2004 5849 3034 7887 3425 1118 926 3430 1544 5902 2282 1124 2334 129 1372 4842 6473 4382 1028 415 8269 8073 6910 2796 3038 5735 5080 2852 6306 8842 9188 3637 1066 532 928 5485 2838 6753 9008 7984 2816 8819 7103 5977 5044 2064 2599 4973 382 3249 6446 6638 852 1724 3368 892 3250 8258 7962 4300 1616 167 8855 2090 4424 879 5136 5350 2635 7828 8506 63 3004 3847 3676 1184 1705 6745 1263 5020 746 1888 7036 1033 3914 5433 3905 4641 8909 228 4801 3766 8085 643 6914 9280 3013 5657 3696 1590 2920 8282 2403 416 911 3849 4215 1120 5490 296 2306 3140 3742 4819 6153 6414 760 3000 7498 7108 6429 3031 5314 751 3357 5808 7505 98 7652 4027 6257 3943 1799 8577 5577 4969 9163 2025 6061 4026 5732 588 7017 1415 4961 4940 7152 538 706 2802 8983 3375 1246 6593 5837 1789 7939 4997 5939 2411 6133 199 7593 1702 5406 6082 2359 2912 6109 100 8149 5470 2807 3384 6413 3362 5621 6019 9241 9268 7703 4111 7967 5458 7181 5492 1112 6729 4577 106 8853 3774 979 7082 4610 1853 9003 9292 2867 6262 2245 3460 1557 767 4796 8147 2658 5769 6985 7065 421 7990 3289 1540 9316 2251 6896 5947 4965 2652 4480 963 9047 7168 7824 3976 6210 7018 7179 5016 7789 6102 6828 7659 9109 9071 8115 7628 7110 16 7513 835 939 4078 2351 2322 3881 4945 560 6837 6094 6475 7901 3 771 8029 3135 8044 7127 3741 5156 7030 4906 113 3747 7042 5232 5225 3002 4747 6879 5379 4886 7192 4184 1896 1834 8689 3665 2957 6913 8009 4851 6420 7987 828 3003 8884 8815 3198 8008 194 6251 3303 3934 395 1285 4169 1648 1347 3600 4631 509 211 6230 7241 8250 2219 2582 8353 7790 7583 4462 3904 9004 6942 1704 5686 8051 2981 5511 6182 7088 1699 1222 3455 6189 1528 5197 6221 7893 3283 2837 7773 8766 2942 8021 614 4102 7362 1786 400 133 556 3127 5237 3727 1440 3873 6322 8448 6285 8696 8800 4009 3386 454 4847 5685 9093 246 1314 5895 6863 4302 4260 8405 8417 7116 255 3223 4737 7852 6337 814 710 1094 6103 5809 5882 6336 4974 1499 2806 3744 2664 2436 4482 8665 8918 1076 8676 5725 9248 4755 1447 9328 5500 78 2653 792 6854 6093 6172 3378 4492 5529 5476 3846 1391 383 4289 3883 2648 3265 2525 5402 4599 6870 6877 4413 2464 8519 2521 1839 5822 5664 7257 5375 6852 6764 5182 8914 3015 8509 3080 4562 8979 6215 6643 8601 6096 4812 5246 7862 527 7849 6737 12 2468 7961 275 27 5932 3840 7341 4996 8564 2154 3788 6138 7831 4442 757 4464 1170 2568 19 323 6584 7675 3441 2067 9027 2486 4379 4744 1737 7563 301 3907 4742 6857 1221 9284 8458 8236 2897 4004 1526 5345 4423 6246 8578 1057 3711 4986 4785 3997 7311 4788 107 8387 2041 2608 8628 5830 6031 783 6817 3293 541 773 8473 2501 7247 5667 804 483 1639 696 6060 5429 5762 1527 7342 1329 6225 7895 381 8030 8520 8362 4734 3526 9273 2039 4142 5084 875 6905 8968 5275 3052 650 7509 232 2595 3631 1810 4355 8315 8908 1777 4834 3164 2336 1543 6212 8346 3024 3719 1242 6265 8101 3133 6150 6358 3316 4089 1647 4629 7117 2596 5366 1225 6371 624 2209 1428 1158 7648 466 8765 802 153 4639 3657 6482 9320 2693 6591 3294 2617 5052 6305 3227 8784 7170 93 5868 6716 1671 178 2703 954 3254 2262 5046 5743 8647 6393 7706 6604 3728 6978 7489 7474 8754 2740 2233 6038 1491 8814 2080 2358 5944 5653 1164 9259 4518 7343 5748 3897 923 5967 2677 3503 1202 4966 1836 1863 6634 1962 9096 9064 977 4049 1464 658 536 3402 8064 1309 259 7999 8122 910 224 6152 7142 6070 7523 8411 2408 6766 9214 9312 8325 6192 626 6025 6240 8708 4630 6777 1075 8906 408 9269 6236 9067 2514 8568 2324 156 3136 3530 7878 7308 4335 2065 3845 4453 3356 1450 371 7219 5171 201 8642 2099 477 1603 8339 7430 3061 235 8291 1133 8474 7035 8653 989 4569 9092 8347 3102 1743 9086 5140 7438 1530 4342 2460 7646 5047 5071 5430 6944 610 2803 1448 4696 6156 4386 4248 4256 994 2112 805 8011 8276 8999 4956 1712 2795 7553 6436 2158 9083 3184 5784 4428 612 5288 6222 1365 5074 6848 575 5213 2175 4240 351 2086 2656 5150 9255 8189 7735 1261 1344 4097 8674 2984 4235 5998 6488 537 1267 7486 7124 6245 7955 7337 5436 1194 8226 209 1710 7906 4357 4139 5679 2584 2854 1004 8246 8586 5087 1878 4926 6637 3197 7757 8249 4055 6502 1248 990 3928 2770 2751 1020 6426 4190 6839 2671 884 3871 9212 4179 3394 10 5861 5316 6869 2985 8905 8559 4457 2480 2313 4100 4395 6835 7799 7890 2785 5468 7302 5862 1803 6376 3171 8591 717 7053 1655 4489 2522 2921 8555 1984 895 8949 1305 738 7606 112 3042 1325 437 3167 3340 511 3689 5813 8982 69 4421 7150 550 8829 8685 3147 8956 3166 7023 8633 3308 2014 3573 3880 4045 2069 6051 4950 702 6664 8418 2454 6181 4853 4166 7022 7418 3605 9181 7172 5031 4589 7858 6586 6351 8334 7504 634 3759 1890 890 6959 5085 4919 2161 1191 256 3610 7079 3427 4071 7323 2982 7263 7444 4251 5846 4864 3649 4311 7461 8120 4582 6373 2805 4872 4869 5493 5867 2670 7099 30 8933 930 7919 501 7261 5289 7449 7772 3613 7848 3196 474 205 841 2611 6185 3088 409 7239 5938 7871 1343 6705 1027 5596 2199 9113 5471 6134 838 2345 8359 4061 1474 3229 270 4245 1979 5995 1517 8652 4006 4880 6137 4693 2528 6996 2926 5798 2477 2549 1128 3341 6014 4479 2861 4208 5175 5174 5118 3736 5463 1588 2327 8380 7982 1514 1058 4586 6608 7985 3044 1822 3628 6851 549 1811 2184 2601 4608 8922 2540 6659 3859 307 3650 3767 8167 505 4366 4824 5520 461 1933 2401 8106 2055 7844 8544 8838 4797 7419 6686 7670 6039 5672 5141 6543 206 5252 4718 888 1601 3218 5114 713 4022 4419 6708 397 425 6612 5057 1729 6573 4729 4080 1034 2961 534 8194 5598 9218 2424 329 4154 1597 922 109 8823 3578 9038 8437 3307 128 8032 1412 7333 8762 8851 8865 3056 468 3808 3064 8798 7052 7767 9231 1086 2162 6566 2109 3439 6122 3642 7696 8610 5279 1808 8687 8377 817 8714 6066 4008 3640 6015 1021 7601 4855 6017 87 7071 2730 7268 3614 6084 6117 6924 9102 2829 375 8724 2095 22 1541 2970 633 139 451 4521 179 1396 3876 5824 8020 426 4982 4172 1157 190 4859 1455 3110 3323 9104 858 6719 6428 4495 8551 2141 3984 3066 67 4299 5821 8444 6581 6097 7090 7781 8944 3085 8606 2114 5355 8901 1461 3301 422 7000 4820 5790 1379 7536 4199 8736 8991 5241 1698 1294 1753 196 2987 8680 4658 4144 8639 6441 8255 8156 3677 6385 6520 7700 3760 6001 1144 5478 7394 8057 5018 4232 5235 6844 3111 8802 867 949 7843 573 2278 6801 7629 2714 5105 6946 2697 5315 1571 8677 2537 4374 3833 7820 3750 2033 6526 3884 8706 7195 417 3603 3001 6284 5873 5718 8576 8457 3589 5839 459 3626 6342 8729 6933 607 6053 8228 3773 1805 6365 5142 6069 1389 9026 570 4614 5712 5533 9222 2821 1897 819 766 4060 4902 5905 6842 5446 1277 4303 2836 934 1014 7822 7494 3466 665 1047 5881 3328 4664 315 1315 1462 8616 7725 2756 5749 1730 8184 4567 5065 7499 8867 1304 3669 9192 410 8177 6710 1210 2329 8443 3911 1899 7686 3315 7190 6180 3116 5341 4394 8337 9182 6969 5715 2172 1742 2782 3715 9195 7960 2517 4890 8294 2337 8014 3353 7475 2193 4843 8831 4200 4653 6196 6957 3063 2996 8959 8973 6529 3457 5274 8002 6823 6154 5561 1780 9318 7657 1758 6503 7678 3274 1625 4327 3236 8575 3155 4707 4331 1494 8756 3174 1074 8116 8295 8311 3048 3752 6050 6483 8003 9175 4674 1642 2556 6166 7165 8441 5413 3990 1640 1778 7500 8304 1395 4315 5949 3364 242 5763 1036 249 2430 7426 8131 411 6267 2045 6606 899 8065 9052 7507 5779 5616 2107 5408 2980 6310 5776 4328 821 3251 2354 7076 1700 5313 6736 79 8212 3959 5677 7545 160 6790 6859 3659 6770 1106 8846 956 7472 2050 8099 4795 8053 9293 7037 1646 9307 1069 5322 5332 2708 8977 917 2419 184 2105 1578 3923 5780 1903 2512 429 5582 493 4972 445 8286 555 320 8300 322 617 3413 4459 525 5631 6314 5157 5300 8545 182 1031 4429 2495 7586 1534 3099 3916 3738 1919 535 2119 1299 177 1838 2159 4099 8285 5172 8540 6020 7683 3073 3115 1673 3087 3488 2416 1894 5942 3597 5834 2007 43 1779 4174 2023 2546 2429 9006 436 4214 4536 3693 5426 6767 5903 4368 2170 5051 7490 7882 2859 5035 7835 5372 7122 925 3253 6338 8393 4093 5848 7588 2683 8049 5403 5894 8745 8550 2941 3484 9029 4461 8022 725 2355 1619 3030 1975 5623 2415 1957 6141 9278 3226 3062 5670 7326 8759 8496 6619 8187 8262 6199 951 7183 668 2388 4698 5681 8240 2851 871 4988 9084 9089 3162 1167 8244 5227 6461 2831 776 5010 5770 5282 3574 5102 1278 2281 5455 305 4628 4663 9119 7487 8746 4889 6569 1175 102 2386 8940 2479 5566 53 8833 1918 8001 321 6786 6861 4358 2771 7467 975 4777 605 3543 2600 7584 9299 4530 6477 7364 7328 183 4761 7543 304 1196 4623 7839 2139 5519 1953 533 5989 7590 7428 6346 6162 1091 1946 6260 4405 5676 8924 7171 8409 1866 6379 3411 2387 3051 7398 154 1185 6442 6004 1611 2165 9018 8323 616 3995 8952 1533 7853 4194 213 789 4991 3675 7456 5752 175 7556 4195 907 2248 9057 8467 4594 1017 7968 880 7446 3304 1666 4942 3867 4802 9156 6357 4621 887 6213 5261 1336 521 8928 1818 7864 4792 6742 157 1593 823 7235 5303 5633 1100 1692 8047 5993 1460 6714 1630 6440 6307 3608 292 212 401 5974 7107 8301 8342 2720 4583 2757 7315 833 4466 4236 1282 5273 2149 287 8484 2380 8119 7167 737 5076 6598 3596 5382 2650 8980 3421 1356 1954 7823 1172 2226 1941 6136 7274 2256 4928 324 1407 4410 4579 1061 7113 486 862 3435 6956 2873 1465 6113 8225 8512 6806 272 6008 1241 88 5662 3555 689 8733 2812 7453 6282 420 2471 4477 7495 1445 594 6939 1564 8704 8590 7992 7374 5796 9298 4213 5713 5864 326 5513 402 464 608 1951 8640 8180 3347 3459 4162 2690 7478 5856 5240 2389 3022 602 5547 1798 1345 9276 599 3673 3277 1635 8625 1567 5928 636 5671 2896 3477 412 7575 4201 685 4760 1229 4275 8960 3123 4471 5941 3355 3999 7157 6354 7741 6850 8783 1943 6769 7330 8721 8477 1381 848 778 6408 2644 5817 1441 1723 2144 2776 2368 120 367 8839 8749 5353 4158 3148 9114 1233 9228 8857 2895 1286 200 6755 5125 5857 1657 7658 5097 5000 942 7020 586 784 7078 6194 8658 8957 9325 1851 8911 4862 7004 1186 8824 1651 2999 561 7639 4316 5086 3187 7912 2624 9183 8487 5089 8475 7554 4031 6297 6059 5329 115 2058 7650 7634 7121 2485 7805 2241 7713 4352 2409 1026 2745 4549 6474 5124 5201 6556 6617 9091 3945 8402 5648 5257 2192 4901 7750 6131 6027 6352 4625 1254 5498 3720 8261 3939 5576 3685 6713 8472 991 8354 8068 5655 5997 1029 7506 6740 2575 2990 4898 583 7402 3290 5388 6715 8235 5361 4970 1363 3338 5731 9014 5358 2216 2856 635 1193 3705 6334 7666 5270 1384 6368 8604 3564 1937 2481 1341 721 2100 3958 6551 3813 2592 7980 5385 319 2357 8761 8910 8693 1204 489 4827 8024 7832 6427 3895 89 9068 8067 1708 1111 8963 1902 9251 5719 9143 5537 9169 77 5365 1840 485 4456 2841 1169 3271 7144 6886 9140 7173 6003 1659 1807 8371 2439 274 4660 3448 6623 347 2103 3400 2106 9073 8169 3687 3305 4416 8454 6635 332 2433 2909 3839 4063 1944 6509 1296 7770 1880 6610 4075 9331 4484 302 418 4219 1333 2350 6498 8424 4694 4883 5269 6580 5007 6722 1669 8470 2571 513 3810 7049 6332 7363 3532 8456 2097 297 8841 7180 714 1587 5234 4268 2320 7372 660 8503 1668 8847 1101 7275 3336 6460 722 7782 3947 502 4258 2132 1835 181 3841 427 3446 2551 8324 6963 4284 7297 7577 3399 9148 8213 5656 8440 851 657 2446 4292 6992 976 1108 2681 3237 8582 377 5969 5287 9209 8523 7178 7833 6175 2126 3023 5090 7491 6640 6077 2221 2780 1694 4094 144 6161 3203 7123 749 3625 3848 980 2270 7819 3672 7689 7203 2718 1714 2884 3474 3802 3851 4224 7237 5415 7998 7207 4106 9036 1046 8731 5070 6818 4592 6056 693 1328 3309 5791 2629 2736 202 388 7886 4417 8786 8822 4035 7718 8492 5505 1192 4388 8941 5019 7538 6732 7296 6389 5923 1405 3278 3917 1688 8374 443 4037 9099 5190 6402 4177 9310 7747 4348 7197 4844 4998 5609 4345 29 3332 8648 4107 346 2577 3941 1215 3782 8252 4706 2675 3790 7459 6164 7316 1149 6687 582 3139 5040 7645 3882 7322 4034 1861 4701 8757 3208 8801 6349 8907 1823 4528 4789 143 4746 9234 3866 9245 1911 1366 4393 2061 859 1959 6967 3138 7382 9031 6237 845 80 6911 7163 5229 4736 8738 33 8543 357 3193 7262 4448 6796 6793 3321 7569 6411 7692 7340 1417 5847 3836 2678 1188 8727 223 8615 7417 5771 3170 8061 2935 8263 8257 6883 1276 1239 812 6258 3922 7525 8117 3039 603 8554 7573 2787 3445 5115 3478 962 3961 6570 7722 216 2797 5154 2530 4904 2405 7542 4021 3252 5370 9302 236 4532 1361 3373 1716 2183 1583 3783 868 1687 8925 1433 6198 8208 6367 7603 882 3469 1645 7654 1176 4231 150 7997 5456 7031 4375 8840 5634 6945 705 3442 4774 3822 7148 1922 8459 6249 8713 6197 8599 6071 6756 1634 950 5640 7749 5920 6622 4783 7837 7479 7229 3919 1797 5272 8945 4908 5439 6903 5833 6930 8197 9261 1711 5483 6046 4285 8852 7409 8971 8278 7534 7792 2444 7496 8063 1665 248 3894 4585 1982 66 6651 4850 1240 7511 7524 9258 2075 3979 4714 7592 965 2919 8239 1842 8013 4750 2344 6155 3468 31 2087 1599 1573 5883 7613 195 3749 644 2189 8779 8743 9005 8081 1040 7785 5820 8830 5495 4867 2710 3843 491 7153 6217 1148 4741 1761 5484 3423 5474 6916 5876 7252 1739 8930 6647 5198 4903 8488 7366 2774 2726 2385 7625 3179 2211 8845 6600 399 6810 3447 6684 4915 8368 1867 2325 2101 1335 7734 3722 7437 3716 7025 4000 6897 1408 7154 5013 2204 9233 4225 3817 1877 9161 2197 6991 3390 280 1892 1612 7753 2801 7246 7909 6229 9314 8407 1436 3879 6432 6849 5326 5327 8535 7910 7745 5545 7916 207 1783 6158 8517 7361 8070 6430 119 6146 4183 1083 7385 4497 9133 1686 3765 5099 595 8046 4418 4043 2361 7915 9149 1717 1141 6375 1018 5602 1262 7485 9178 6629 3339 8934 4648 7988 6252 3440 864 5418 3874 7280 6191 8388 4323 6792 4324 2232 7228 8684 7813 6187 6678 3177 3534 4953 4402 7739 6319 2414 8700 5946 8238 4533 6917 4167 4618 2115 2268 3081 1247 4001 8580 7636 3101 2195 1559 3714 2484 7188 6028 7530 2828 1977 3238 6496 2340 110 3247 7532 7541 924 1632 484 4487 4439 6447 1319 4944 6347 1791 2285 8087 5452 91 1166 162 5185 7933 4743 1627 7259 8620 8525 8207 5845 9011 5525 4269 4700 1824 8186 8872 8299 3957 8242 4558 6439 2666 5943 6958 8112 5121 8806 6170 7688 3486 2082 7436 2778 1096 786 2206 5170 1443 6030 3312 9151 8485 6404 8498 2883 8961 2280 8341 9137 4337 2809 2445 809 8298 8643 8316 4951 6853 1572 3215 3938 2249 6515 1337 8328 7712 1429 4117 5441 3230 4152 7225 3513 6953 1507 348 3639 5739 2673 1550 6301 1652 8453 204 6833 8056 2200 5217 1854 4711 7368 4572 4032 7531 1013 3634 2875 6058 8307 7609 1766 904 667 5410 6578 3601 1664 3233 7390 8178 4486 4952 4427 4876 9166 3107 2772 6295 5001 5296 3371 6518 6327 854 1615 8288 1912 5927 6202 5814 9032 1059 3214 6547 7038 5781 6926 4390 6114 1622 4318 5803 5984 736 3561 6554 5045 4277 7386 9081 8462 2034 4955 2701 932 1298 7758 7176 9205 2276 3077 3803 3562 8054 7946 295 1843 7728 1629 7768 3663 6363 2971 431 9285 2513 1116 3656 4529 6366 5758 6339 8398 816 4153 648 2536 1826 7870 8113 7730 7101 6555 9256 6774 1072 4578 2598 3604 5880 861 8273 3350 3117 4685 9219 4334 5165 2035 7224 4066 4253 4447 3815 5038 253 3658 2252 330 3967 6443 2143 7336 6135 593 2734 8390 4655 7800 1399 1173 5618 2822 7905 7503 4431 2443 1568 3909 1974 2496 4772 5164 4105 2138 2864 3799 3924 4882 8245 1585 5528 5692 5730 5832 137 3175 2894 2062 3899 2752 4028 2113 5411 293 2647 730 3758 1667 8879 9303 6653 3698 3968 3053 503 2150 4645 2257 4627 8303 7966 8742 4692 5901 8547 2277 5546 986 370 4697 8712 4804 4881 1182 6650 7290 3487 2814 5668 7567 5333 3724 4164 3084 8896 3888 6537 17 6882 3531 704 1037 8866 5263 6758 3762 1393 3824 5575 5112 214 1439 5700 8932 1306 5011 6928 5173 4098 1132 7352 4778 7723 1368 2390 670 2685 5855 1772 6380 3853 940 5424 6091 1748 6193 5297 6572 8877 6874 430 5041 5267 1145 7448 620 9112 4294 1432 72 130 2393 7920 4597 6614 8889 3697 1895 3462 2616 3978 4791 7846 7780 8372 428 6559 8326 9211 2363 1525 5980 7888 3331 8118 7899 615 7377 791 5930 6627 8322 1138 770 8460 5100 8274 8350 6316 2893 7594 9236 5082 8150 1986 1909 8902 2145 3617 3501 7 2426 5056 8016 2702 5360 8135 8385 8378 8018 8574 720 8893 3021 1978 4782 1816 2083 4051 1446 5870 971 9097 8006 4222 8287 686 1377 611 8153 4920 4808 1536 679 4096 3891 4884 432 4615 8988 5560 3451 5589 3514 6169 1414 3244 1490 7100 3588 690 7317 4171 2266 6800 108 2793 5151 6977 2587 8188 8752 6318 5815 5116 263 3311 5191 5689 289 3392 5755 1022 5548 9319 8937 6011 7632 5328 4993 4141 5407 1865 520 7305 7208 526 3645 1859 2520 3523 8629 7304 8881 3076 4005 8329 2205 2214 6925 8691 4136 8883 974 7873 7952 3965 5887 7964 7189 2406 2783 8086 405 6568 5147 2021 4727 4826 7674 1600 5078 2949 6624 6541 8986 5740 4679 8500 3591 4434 398 983 7544 1478 4570 6012 465 9330 7206 808 8737 2356 4959 8812 6955 3599 2168 1420 1721 1794 5897 8422 2 4023 2739 3619 8797 5496 8951 8181 6893 9254 1809 5682 4309 6929 2742 5988 3363 4493 8434 4210 1503 1876 5094 4600 4936 4798 3933 5216 646 7660 3098 8773 4076 1576 5335 3746 3327 47 4602 8636 4129 363 6417 7416 9025 4377 4766 2779 4151 9046 7860 3154 3476 7620 966 2052 8344 1752 7199 4412 8895 8882 2463 339 56 5390 4821 7555 6558 1905 5258 8880 4205 3580 6735 1023 4511 3850 161 7395 2532 3349 7055 7387 758 1907 872 3006 659 815 1961 6902 7668 4708 1904 4433 5159 6816 8664 6918 1016 6513 7314 5364 7480 9313 716 3395 6843 2292 918 4329 1035 6344 8593 3404 5212 837 480 8524 1342 3690 6797 7414 288 8863 3352 1628 24 135 3314 2181 8650 5915 8078 6812 1375 6040 906 5635 7126 1387 7458 6119 5591 3795 1531 95 1960 7522 3033 898 4607 4921 3913 2623 4430 6268 7063 1326 9075 2505 7400 1284 2951 747 6466 1357 6493 7320 5892 576 5107 5559 97 2583 6361 8843 3509 7892 6086 1476 4612 7427 4267 9094 7050 6048 8455 8382 2227 284 2898 3221 2353 2157 5990 5810 3581 7279 6188 7859 3549 5539 7918 2022 9066 630 2500 5111 6561 5127 8095 5569 6123 1338 8605 3491 4187 8220 7334 9213 3067 6997 2853 4735 4372 1489 5954 6662 2207 973 3361 960 6350 4170 7431 8076 1129 750 7559 7194 2261 2300 6590 5893 6889 3125 8788 334 7286 3472 8164 7693 1469 1181 669 7515 5563 4773 3210 6324 3113 9070 3638 7551 2541 3506 5138 4069 7198 7560 3306 6100 2932 4473 1741 14 4672 7564 8748 8874 3804 3678 2240 2610 2862 1358 5716 42 5176 9326 8464 1038 2993 3017 9072 32 4809 4364 2808 4125 448 152 7299 5431 6178 793 3444 9120 8410 4963 772 5457 6954 3014 6881 286 553 1948 6398 6255 3057 8646 6176 2700 7106 5663 6683 1281 6013 8799 7635 9289 1885 442 2225 6294 5054 2674 7884 8730 8216 4203 1488 7111 4013 3623 7950 1971 1966 3248 2900 1553 472 3865 7796 6937 4591 8098 5208 294 5627 5691 5687 7149 4879 3624 7005 2773 3112 9185 1633 7830 5101 8707 8469 4678 4860 700 5527 9194 2794 5068 2639 1177 4282 6492 8128 5859 5029 5123 2877 522 5048 7230 2104 6642 6731 2717 5149 2043 9059 5277 844 1394 3262 5515 6706 3651 9105 7671 2880 3607 6410 2508 8463 2394 1916 1125 5343 3322 5307 4547 1589 8478 8899 2955 8028 7293 4619 4058 2781 8715 1272 5734 4474 4863 4367 49 8844 5605 8671 6743 4281 7077 1874 2626 2516 258 5249 6186 7958 5432 3801 6288 4732 9121 7558 2527 4661 6819 3835 7508 584 215 5036 4261 8978 5228 647 4657 2591 5931 5088 9204 929 4381 5421 2965 5050 6495 5033 4799 959 6115 3520 1232 5811 317 8976 7705 3842 2178 7187 1373 7112 2694 8627 8493 3991 7441 6308 2589 6462 3406 7673 8660 2902 752 1025 849 7682 6982 6652 3612 298 5148 4873 3414 1693 1458 327 2016 5002 6768 7016 5583 3270 857 8232 7158 7981 4676 4675 2164 8360 6709 8143 365 4062 4527 7928 9009 6228 5818 2533 9305 8887 55 2507 8870 6649 5158 76 5595 6693 5306 8666 3020 7527 3082 6304 1591 6145 6868 7205 9107 1165 6773 172 1993 4176 8400 4611 7589 8702 5386 6095 6335 1561 8805 5963 7393 3681 2037 4968 7451 3360 7466 8361 4455 4064 5422 1689 3977 7269 362 4178 4145 6127 5162 2399 9225 7068 1650 794 3007 1348 7736 444 6081 5298 2026 2543 9087 3593 7425 3730 8468 2641 7529 1720 6377 8732 5851 7956 3150 3785 6485 3611 2869 8510 4775 4463 1251 9124 6873 3391 6505 4118 1617 8837 7051 3213 3668 5347 8452 6289 5840 478 3522 453 3376 6190 3342 2237 2870 5178 5567 5952 6919 3005 134 3397 7443 8539 6822 5264 3288 5962 8421 6744 8608 4656 1802 2073 4271 1043 2922 8211 2196 5260 3789 7211 7571 7834 5680 2047 5502 3369 3437 3286 5517 3912 8386 1442 6961 2191 2417 9088 5155 6813 4520 7375 1224 811 1891 3748 4123 2789 5305 8419 7248 9237 992 4038 4499 2060 5538 850 2669 7612 104 9290 2526 1287 4160 4633 7125 742 744 4534 2407 7714 4555 8764 7661 4722 7721 3205 6657 1214 3754 6080 4593 3018 8792 2294 4450 7701 9301 127 7069 4513 6243 8025 4010 8632 4715 5284 4574 726 4252 4561 7354 299 6088 1090 5012 5684 3489 5639 4888 1584 1969 4846 2915 6804 2775 7306 6506 9306 5231 7740 4283 953 6725 458 8290 1504 1539 8885 138 3764 1256 257 335 1011 7060 5986 9323 4740 8994 4140 6807 8254 3963 9297 2102 2964 9207 4910 8709 4411 1672 457 5852 8037 4932 3679 8794 2362 8592 495 8432 1608 2155 7411 2881 9244 37 6535 8219 4505 8635 1928 8384 2570 8996 7610 2128 8728 6656 8935 6681 2070 176 9062 972 514 1796 4039 6838 2462 230 569 5521 4637 4939 4420 2863 672 4995 3807 447 1656 2005 5113 3297 8858 2118 6309 1926 481 1156 1509 1228 1787 5978 8678 3951 2929 4980 5039 4713 7002 151 5536 8148 3823 4709 2299 142 7067 2372 3761 9 2265 5747 2764 724 2913 3151 4525 6370 4247 9329 5494 3721 629 3621 7371 59 1999 6704 3734 2698 4691 6938 9117 8415 6353 6750 9077 2679 7623 2478 7321 6611 4007 2076 5772 6416 2264 8348 2672 6546 754 6934 7908 8546 4404 592 4748 6625 2129 7944 2377 6 8929 8275 3515 4524 3660 8710 419 6878 170 8313 7460 8753 2917 6891 6663 4918 7129 396 7256 3500 631 5585 8343 2695 6168 6292 3176 5092 5160 3701 9021 7221 7825 1216 1438 3471 2318 8923 6223 2182 7621 8514 9010 8987 1252 1972 1872 1715 8205 6463 8138 8989 5661 2890 565 2427 8946 1303 3718 6000 3620 1560 5276 8089 9260 1467 6173 7641 7520 5061 4677 5757 4400 2620 2719 8995 2079 6644 1683 8141 7754 5744 2952 7568 654 7457 5368 3310 1510 4440 1513 3072 8034 1456 9164 3163 3035 6111 5042 7161 1401 1084 8000 6672 8531 5404 6550 8379 9141 8681 7752 6394 7011 3739 8253 978 4771 6024 4828 7959 1649 1727 7073 8349 6952 661 7283 3159 2590 3496 8741 3969 2956 4565 920 1830 8558 1930 6677 6825 8256 7454 7521 4710 1768 3753 6459 5606 5292 1397 240 2733 946 6711 3242 2627 4929 5006 3202 132 2295 2746 1293 2124 5405 4065 818 7464 1820 4398 1312 6994 6920 261 987 6120 3109 331 2986 4338 7774 5122 8396 1364 8969 6712 8161 7083 7595 5940 1566 6419 8634 4432 6047 4749 6076 1161 8217 674 8494 3688 2447 4704 969 7477 1160 3243 3173 4979 9288 6860 1662 6171 225 5143 313 8327 3275 3385 7626 3103 4401 6794 5600 5043 7664 933 6830 4452 3980 1604 5875 6633 4635 5756 3329 1751 8108 4817 1989 1237 1893 2848 9334 51 8875 4981 5417 4134 877 6688 3545 4943 5615 2476 1684 3652 7396 1769 1171 6563 3415 3644 340 6630 8284 3256 7240 5371 3405 2108 6360 1734 5612 8638 2343 1103 7803 6809 3055 188 8031 3124 3683 4537 988 2297 4893 6499 3396 839 4467 5195 4041 6457 4441 6378 6472 6195 4912 6884 5922 7014 1660 38 1595 6752 4554 1292 2709 3800 6057 1980 8775 6587 6392 6263 7214 5219 282 309 6685 2253 6311 4092 18 7570 5543 4081 2515 6278 8690 5294 6184 5215 9130 6720 250 7250 4983 639 3567 7841 2636 4067 8446 5703 8609 2586 7695 1253 6701 7930 6317 5921 7719 8501 7312 4110 6219 4552 5059 4088 7975 9132 6054 692 3412 4079 6754 6950 5281 3028 8321 3877 7614 8939 4188 2223 239 4745 6875 7096 5571 4403 2640 5556 1845 6690 1825 4157 314 4682 8825 1003 6206 8093 7215 6465 99 8077 6631 4206 2523 366 1208 6043 4640 1457 5475 4985 1351 3090 5625 7307 8466 2003 8854 218 1500 4476 2293 1847 5032 2147 866 3710 2552 1749 6692 3926 4112 6458 735 9171 60 9304 6726 2630 2882 1178 1151 4922 4662 173 7233 1776 6533 4113 2423 2425 4343 5800 970 6372 1009 6607 3068 8435 6423 3126 4813 1709 1201 7104 5620 3932 5701 5724 3366 8050 4984 5023 9203 5079 627 290 779 5572 5233 1392 4975 8534 8210 2269 1143 2475 2562 905 4546 267 3536 8538 449 101 7367 2722 4605 7356 6781 8537 8697 6820 8340 8926 3821 2349 2259 6545 8100 8395 2258 2911 5108 3946 1406 8683 8296 5579 2177 8264 1425 3940 957 3647 515 5342 8363 2449 3108 1001 2937 3452 5574 4319 9184 8381 945 6876 600 5714 4871 8532 1852 8856 392 2018 8878 369 5711 9230 5304 7266 1681 7829 2309 4683 8938 2255 6159 3207 4651 2029 4341 5106 5794 9024 4712 2434 7151 7359 6431 1290 5918 8705 3438 5554 8876 7415 6290 5373 3805 2950 2331 6772 8997 6576 2307 8515 4033 3428 6487 6595 45 5792 333 762 2383 3388 666 2166 460 943 364 6980 8223 8221 637 6218 4108 5381 4649 5096 1614 8768 5095 3809 5030 984 3538 5120 2498 5222 5613 5486 5119 241 5707 9227 544 4109 7771 728 3671 9327 1230 9270 1070 8565 4769 7056 5654 7965 1793 5956 7883 1362 5479 8769 8821 8320 1901 1994 2461 5552 389 2839 6467 2762 4763 3499 1487 7599 4488 3241 8272 1131 4496 7006 7265 4897 2747 6618 5291 4563 5146 1939 6369 8548 6163 5526 4068 9030 5349 8433 748 1477 4265 9200 3878 462 6846 9040 4806 3519 6798 5464 5179 546 6044 8114 7216 6276 1495 494 8146 5434 856 8403 8071 3972 5544 3337 6855 1546 2824 1718 6009 2042 251 9076 3330 5004 192 4717 3797 1146 394 7814 7699 4659 4689 4156 7903 9054 7332 7811 1119 5531 6782 5210 8412 2633 7924 4624 8314 5666 3240 2310 4262 8160 4553 8196 2661 7213 7455 7399 870 6126 1227 1226 781 937 6343 2578 2892 4124 2792 5696 6865 6455 8312 5193 6026 5251 3787 4460 4687 7923 1140 9106 796 2482 9170 8695 2749 6734 4825 114 8319 827 4175 390 7611 7484 1249 7727 955 579 3629 8915 2958 885 7227 1424 4810 4604 1535 774 7518 5428 1955 8233 2645 2167 6484 3855 1502 4861 2333 2973 4829 1906 3966 476 9023 6960 3483 2748 5891 8174 7702 8948 5324 4396 1605 2823 7348 7347 5933 310 9082 916 4255 203 4239 5976 6200 6435 4425 787 1121 6034 13 39 3104 5961 5507 5785 1463 7339 1575 7801 5445 8283 5951 6995 999 5163 6023 3786 6536 5850 3524 3528 4508 6674 2939 8227 4598 7550 8495 8622 1152 4538 4003 1318 739 3296 8202 1552 6204 5236 3576 4699 9238 1879 488 2274 433 5587 1678 9282 7914 8552 6445 7971 8331 6880 7476 7282 1570 7271 3827 6489 8091 9287 7351 1765 5286 6921 542 1762 8553 4987 894 3622 7855 92 3131 4811 3590 6517 4510 733 4954 1360 5669 2842 8107 5646 5968 1618 1827 7709 8521 5807 5321 9239 5501 3745 4437 1586 7273 5265 6605 7917 1607 6074 4668 7061 1580 8694 8461 4573 618 9173 5243 435 8770 2421 7450 3870 8308 2605 2934 9240 6887 4512 1198 7585 7691 7738 2843 8423 7929 6971 7854 86 9128 4298 622 790 9155 6579 2203 7716 1265 8645 3834 1174 7380 623 8936 4306 8082 4312 8661 5753 7243 2768 8155 85 4143 3047 8479 7809 2833 5555 7578 1637 1936 8130 5549 8062 7143 5522 8966 5614 8105 8719 7655 7502 8268 5760 6695 5565 7615 9226 4870 4507 3160 4835 1598 2465 4422 5248 7867 1078 5015 6660 1676 5354 6391 5351 7184 6280 5936 6124 1327 2906 269 8292 2466 8809 5167 8142 8204 2713 1910 2930 2494 5592 7384 7726 5727 625 1735 5710 5518 2491 1410 4989 5183 8777 6562 4947 3692 6129 384 1097 2084 5209 3723 7272 6895 2459 543 8621 5394 6211 2074 1511 2524 7776 5055 7191 6207 7922 281 8436 2918 3141 4800 6323 7631 8903 2716 3735 3012 5301 3975 2800 7963 105 1920 7391 4909 1754 4816 5488 5145 5098 5139 5268 9317 8631 4346 7318 136 3993 1220 2151 308 7483 7582 3071 1339 3777 8191 5378 7087 1056 7465 5608 6564 512 2754 2687 1596 5376 1512 566 6382 7360 1757 8035 2296 4264 3551 1053 4716 1537 8518 254 6253 7132 8557 3490 9267 5473 2412 7539 7136 6670 3974 891 1323 5958 1217 2879 9118 1259 2317 7033 2467 6665 6244 2180 2140 7098 5126 6395 4150 547 4120 4307 1725 2737 8549 8195 1245 6286 935 1756 1701 1626 7379 3492 3717 5802 2817 1234 1005 4101 21 2576 4650 3381 1030 2844 1641 936 2729 6469 8913 8369 5994 341 81 4083 1685 5152 3380 8739 6615 3829 164 7927 4779 829 4216 8528 3641 4606 2769 6970 1545 8850 4971 5489 2008 4564 8682 7784 5768 9252 901 438 3577 2765 5904 664 3348 6298 3602 2502 8617 7684 4293 5166 5805 4126 2451 6906 7234 9243 3778 2940 1087 9053 5026 2504 5283 2820 4242 797 3925 1383 8750 7861 1403 6973 7617 968 3065 5395 4347 8144 2688 6527 8597 8673 7327 6331 1422 7115 244 7013 2092 54 7970 5742 3464 4823 8588 2938 3060 6406 4149 2375 6616 8803 1555 4369 1380 3011 6144 3367 4990 7370 7131 1995 2602 985 8785 8480 9125 1927 3269 3771 1032 7378 6900 5726 2731 2020 4503 3313 6727 8793 2304 523 6036 58 7993 5512 5049 2721 8482 673 7937 1168 4472 8247 7287 9017 6421 9190 3584 1819 1792 2810 6033 638 6749 7677 981 7160 4726 1886 7845 7911 6975 568 7422 4613 4501 2569 4263 3206 4133 2420 3706 8894 2263 5774 4925 9180 8888 2945 2091 1873 6303 729 6728 2156 3267 1860 6597 1374 4930 5253 938 580 5825 4839 166 8198 6892 8701 74 7094 7284 8954 3156 6140 4279 5594 2229 7535 5466 8413 7105 8192 2632 7638 9308 8530 832 4643 2201 3268 4322 6510 2967 262 403 7973 1258 8828 4036 5838 9263 8529 2788 4202 237 3838 1291 2305 4056 5628 7281 1430 6476 7935 2850 6041 2013 4016 4576 5312 6827 6321 8669 8439 830 1942 1519 2750 6106 6993 6235 5899 7313 5331 4371 7086 4399 8600 2660 5409 3465 5499 6231 5745 1801 5337 4468 1451 4192 1275 8230 2302 1114 4960 8860 3900 6468 5058 1505 8868 5588 3858 1947 2565 1472 8499 243 8442 6583 7085 5374 2250 4291 4426 492 2311 8305 3662 5338 8780 7488 3890 5005 2442 4680 7358 9116 4397 5999 587 7902 83 3566 2134 8942 4767 6601 2456 1745 5736 5254 8017 4015 7690 3798 8947 1067 2116 7945 590 2547 2535 64 2053 5359 2493 6669 4351 6412 7473 6147 7175 6983 5196 745 2657 3497 697 3161 7528 2239 5991 3201 7681 2440 5189 2959 2044 8917 2046 6313 6333 5318 2763 4301 2555 2213 2933 4121 1340 3903 4392 7889 5323 1055 707 3857 518 6078 5134 6645 9138 1592 680 4446 7943 3461 3887 5601 2321 6621 558 4914 913 5637 6453 8511 4531 1218 5508 2603 6802 8426 8297 2947 5971 6552 5262 5935 782 7435 8357 6139 1136 1473 5008 3585 3627 2914 5356 2997 2347 881 5652 4849 8808 8351 4017 2010 6836 7616 4391 3630 3712 6099 2969 5238 4333 2301 4406 1236 1050 1864 1104 8408 8251 8795 5879 3365 7481 8206 2452 1767 8859 124 3948 4444 8962 4438 5003 1740 8428 3105 5117 1095 1480 8755 7881 3097 4877 155 1917 2455 6042 337 6724 6045 8483 7135 2242 4566 1679 834 1746 795 3548 2314 2036 4046 9129 6979 7084 5091 2413 8170 5775 1817 529 7220 813 2916 5130 8972 126 1243 2370 4831 9122 3010 5104 2613 6761 7482 909 2146 4595 5340 3512 6283 2346 653 6121 2615 7421 1869 1002 8834 2991 8992 632 1093 4543 645 2352 4115 373 1483 6966 8598 3896 3434 5987 8318 1815 1223 1548 6885 5073 6330 2573 1369 4095 1431 2185 5766 1301 7258 8048 7598 2847 1996 2378 8561 743 6381 271 1956 7439 7596 7134 6636 5804 1858 6214 4730 8536 1203 3118 9202 1875 5885 8975 168 5898 4014 4186 3346 3041 5558 9296 8157 4339 3234 1738 2604 6803 5387 5590 125 2173 8012 8005 4858 3069 651 372 378 8366 6299 1449 7793 8541 3235 8043 3086 3983 6949 4690 2176 6494 7637 8406 3856 7408 350 7021 8224 7044 7662 6697 7679 169 528 7029 2790 7138 7432 7602 8333 1582 1378 519 482 9279 8015 6592 4514 3542 2612 628 5053 6699 6227 2094 1621 847 3598 2728 8490 7276 6620 8345 9216 4278 4059 9058 5063 5816 4173 8134 1997 3182 3224 8129 5109 4494 189 7640 8243 180 2963 1123 5593 3263 4185 7140 8990 6320 9275 4601 4854 5907 1135 8083 5964 7788 1992 8069 9174 6160 35 8572 2865 46 3952 6418 2510 5783 20 3816 2715 3930 2548 5204 4122 4103 708 7756 3825 777 3550 8502 3929 5440 6751 7764 4070 7331 3743 9131 9206 3828 23 41 4197 234 5723 7622 8832 4626 2169 5599 2976 5266 1967 1150 5334 90 822 2538 3169 6771 7442 498 4967 5580 7581 7680 4728 1115 4040 1064 3106 6266 4415 9294 5597 7059 197 7218 6948 5690 4234 1653 4485 4019 3370 919 1330 6085 2078 3768 5427 4545 2435 8862 3633 8145 5221 1388 5913 8140 7471 7156 6989 1190 6832 2830 4387 3454 7469 2910 4526 5187 2410 9223 6247 6912 4681 1300 7407 8612 6523 3616 6894 7253 4515 5874 5448 7137 7957 1130 3092 7054 3516 5797 1000 2727 4336 9090 6403 7255 8919 6522 6760 8898 4803 1938 374 8686 9150 3985 7045 3475 6065 7991 1409 7851 6671 6090 5826 7857 1155 8964 1117 7072 6064 2497 4899 2397 3189 2369 15 5027 5754 8950 5617 8391 914 6264 279 6174 5184 3733 7392 5278 2924 567 7994 352 8084 2148 2723 3359 70 1870 7708 220 3994 9013 3191 9220 4155 5717 1110 2198 9179 785 5325 4770 4250 52 4634 5072 9037 601 8036 7996 2483 7232 8675 8836 1279 5346 7676 6104 1515 4603 5607 7894 5144 2628 68 440 3586 3083 4830 4378 7762 1134 4542 7850 6296 2866 4011 8751 4776 7954 7102 5697 2032 5729 5017 6962 2051 1092 764 9019 2759 8581 1484 8618 912 2382 4892 8447 8176 5491 5695 5504 1060 7064 709 578 4320 2379 7649 8416 1613 5344 7512 7865 3037 6689 6557 1569 5955 3707 9168 8566 1775 5950 6943 7804 434 6179 9300 1142 7947 6456 6291 5789 6538 9134 3049 5075 5399 5161 1623 948 6302 6063 7516 117 506 3302 7146 355 3854 1081 2827 1496 2574 6167 3183 4287 5482 1722 7319 7277 3860 3443 3298 8364 3826 7254 2360 5093 7039 6325 4230 2567 6241 4443 559 2625 4228 8967 6405 1674 3936 4475 8556 8585 896 3713 6259 4297 6718 2392 2279 4927 1283 2374 2860 7665 663 596 6293 6805 2811 7383 8306 8330 3153 2153 2618 2441 3615 8092 552 5285 5255 8124 9247 5530 8175 6242 5660 3433 1610 1832 3892 3862 640 2127 2474 4196 3495 7217 5206 4836 7759 4376 800 4227 3699 9055 5665 6826 7463 9065 4720 5069 3245 3453 3358 6532 5970 7921 4087 1547 3424 8040 7995 6787 9069 8716 2561 8199 1479 2767 7818 7145 604 7597 4896 9281 4666 185 8171 7978 3059 9196 9221 2135 1800 2974 1529 5948 446 4436 8672 3508 6208 5673 6998 5203 278 7041 9110 5853 8121 1764 3046 2400 6575 4738 2228 7761 9322 7019 6931 6383 6762 283 3935 2534 7717 6785 471 8214 231 4241 5310 3844 5746 2011 7209 336 6433 756 9167 6741 3345 7685 4018 6682 9147 4790 5836 5906 8747 676 3964 6362 3510 7510 2308 1806 5917 1189 4012 3387 1331 5319 5423 8900 147 3780 1696 9111 6783 6497 4104 1898 3987 260 4616 2121 9283 1400 2437 4670 2735 1163 2096 6521 1423 4523 2243 6667 6990 3944 6915 6763 5611 404 2691 1015 7092 7562 8624 2291 4193 5934 5503 2326 4408 2960 842 1963 3354 5568 9050 3806 439 9154 6055 6451 2190 7633 688 4354 8890 2813 2872 8102 8317 6609 1497 8389 6449 1682 3594 5103 5812 863 268 3054 8079 2260 2027 3091 7687 6703 3557 2019 8427 2799 8182 6641 3168 2284 1934 4865 1077 6507 1658 3811 1774 7897 2238 2943 191 3869 3246 4057 3188 414 8072 7838 1382 4962 6010 5363 4042 1983 4077 7429 1833 3583 4044 1109 1295 386 5481 3927 311 1349 5651 5878 562 2202 8904 765 1501 8654 2975 2689 3680 5180 1900 7707 4723 8912 4029 3579 869 2888 8657 6599 741 4288 2244 7357 5704 8791 208 8587 7969 4805 8526 4887 8871 7468 3343 886 7794 5764 2646 6454 6101 7885 7744 1297 4119 4856 122 2286 2925 5131 3570 5843 3027 5320 5626 540 1862 5401 7335 699 7760 9198 3259 7345 8698 1280 6479 3100 3988 1322 5737 1268 3257 6791 3326 4815 7644 1082 2826 6821 8984 2553 5290 5909 4762 9242 8096 8066 4325 6666 7193 7114 8060 2376 7872 6788 3544 5460 3507 2509 6626 3429 5542 4220 2968 5271 4249 3863 1868 5581 2012 6270 8038 4050 121 2845 1565 1998 2275 5524 6068 7624 4913 9277 1506 803 8848 5925 2450 2072 8190 4753 9162 1923 825 7303 9028 2088 8516 1556 5937 7847 2367 7549 1049 1521 4739 3931 8958 4130 7877 7876 897 5985 7346 7537 111 3700 1126 7896 1288 3419 4673 1051 5720 1068 3458 146 291 6256 5514 2857 4580 6239 6525 8717 391 4841 6676 4360 1453 4211 73 1675 1987 4025 1321 662 8265 6424 2758 7765 7656 3209 7497 7600 9039 7697 5177 2983 5622 9295 1200 3284 964 2024 1269 4551 8088 5659 2212 5199 5551 8607 5573 2247 5200 6341 7951 8429 7720 5919 1273 3529 6707 9176 7552 3255 5649 6110 9235 1137 9272 775 788 5786 5186 6746 2667 9145 7630 3953 1828 8827 6471 4702 7815 467 6387 3195 6238 6508 2373 5983 4931 2948 921 2438 517 3949 2137 3216 5683 3695 1719 4837 9159 6981 860 7410 5497 1770 5557 8810 5194 4857 9100 6329 2609 1925 3686 9041 4924 349 9187 3393 3661 7120 6858 4587 3831 3130 5396 5060 6486 3937 8023 824 5398 1354 8861 5534 7292 4389 6029 6226 3505 4326 7445 581 6089 3450 7324 6516 6775 1207 4575 5135 9265 3918 9020 3473 3898 7812 6571 6757 6639 2557 1206 6148 7325 8790 4938 7026 4383 8041 1250 7267 1952 7561 8811 4941 8373 4848 6602 8355 8104 5214 6654 4330 995 3181 3422 456 1782 3408 6530 719 7587 5910 3058 740 2009 4207 5336 2798 9229 8668 2473 4221 1493 3281 171 9157 9139 7766 6220 9127 3324 5308 3708 2431 8080 2093 2585 406 7040 5064 5247 4758 6512 2953 4257 4935 2705 2572 3436 8513 5884 1385 4852 2637 7091 2761 6007 8332 6694 2422 4917 2186 6898 1390 6965 3132 7698 475 2002 2692 5024 7365 7373 4091 1731 947 3962 8692 1788 8734 8656 6862 6856 1950 1914 5658 3635 1620 4780 2580 1454 2786 687 7238 3648 6452 1197 3190 5900 9043 4958 1935 1821 1187 1153 7737 7223 3820 7169 7350 5674 6254 3025 6680 1690 2899 3893 1577 5728 9189 5077 34 3560 2179 5462 1402 3654 1376 7936 4246 5506 1179 5647 4686 8644 1352 2855 6079 2254 2668 2287 2457 3418 7264 677 3074 2655 1042 2210 4504 7089 8309 4209 4280 3258 2977 84 4705 1244 3511 6355 8813 3228 9266 1122 613 732 5202 8425 2638 6470 2886 3541 8132 2063 8201 5129 2818 7949 6936 8090 4465 7295 5239 7009 9271 8563 2832 952 8136 6776 3565 5188 7288 6999 285 5487 7763 7608 8584 2071 7868 2804 3655 7048 6847 3276 4082 4272 3910 3709 1574 4559 7580 7081 5014 7769 8183 6386 7574 356 4937 2487 9315 7572 3040 671 2682 8626 3868 8623 387 8679 4074 1481 3527 3595 4754 2453 1579 4638 9123 1829 316 3009 3691 763 4875 3572 4642 3128 4273 2777 6032 4793 233 7147 996 3199 8835 3517 7210 6125 6037 3684 8589 3915 3095 8310 3180 7043 4458 2889 57 4483 7667 8375 1434 7493 6986 4733 8471 5827 2111 1313 7986 3075 2614 7547 4977 8527 3212 7300 5842 5244 3291 597 1007 2030 227 3830 5540 247 5643 9333 1958 3096 1371 5220 7926 2927 1516 7130 193 1522 6165 6923 3794 4223 5535 2472 8630 3971 9101 2946 222 4609 7291 8542 6501 7548 4557 6274 1010 5226 7309 1317 9056 6275 1624 1099 4191 4030 7270 5392 2316 3819 1670 8154 8045 4807 8864 2391 5908 8338 8218 6400 9193 3165 843 6613 6941 4380 9332 5629 7557 4321 3702 681 734 1159 4665 5959 1697 5509 8774 7389 3832 3751 8637 3079 1680 6841 703 684 8293 3682 5733 4818 3231 3078 5562 9001 3889 7024 2519 1713 3287 219 6021 8776 2289 7212 4832 4684 4617 4237 2649 8185 6326 3568 551 1426 4181 8869 312 2905 4165 8248 2558 900 1044 8613 7743 5437 7604 3122 5708 8649 2878 4695 4491 1929 7533 5223 7711 915 1844 5751 3008 8055 961 6142 4636 61 198 2271 5698 4596 4500 5709 5819 7972 2992 1643 1048 6281 8886 360 4198 1841 6814 3960 2606 7001 5888 450 7133 7015 7034 5153 8920 5066 469 1302 8816 463 8651 5869 8193 6582 5578 1231 9274 7260 7751 8052 6799 2089 2342 8451 3260 5550 7795 2288 1205 40 496 8367 7836 5973 3908 5242 5062 2706 997 6514 5419 9201 1965 6062 3050 5302 8735 358 2398 7470 1644 8179 7047 1549 5414 2539 7381 589 8166 8505 6035 3956 4540 6721 8074 1062 2384 2531 7159 3502 3902 4584 2554 264 8720 2849 4916 5218 7202 883 4560 1677 4317 7863 4509 6577 2903 1452 1416 5369 473 6233 6359 5992 4934 8059 6834 4907 3320 8267 8280 2066 2402 1485 3772 3732 4764 9126 3575 5564 4768 5641 1884 2330 1804 344 698 3089 1532 4454 761 7289 8094 3432 1747 6811 8722 8826 4646 3222 8614 2901 7003 652 8663 4266 413 810 75 3334 4905 6438 4756 5137 6528 6534 6988 6177 8533 889 5384 7201 5132 7802 6864 3973 873 4840 1482 8376 3769 5858 6675 4286 2593 5863 4353 7817 7540 4999 4838 2303 6002 7913 1508 5317 7755 2784 4964 3431 6209 3755 6022 6399 6232 3954 455 5416 6448 1558 7591 245 140 9210 6585 4084 967 7798 6795 7095 6733 3861 9264 361 1045 755 8042 7074 7778 6415 4724 6450 2049 1563 1307 3485 1790 7869 3282 6907 3920 2868 5801 5632 1079 5009 3955 7517 5128 3417 3019 2725 1784 2312 2753 6976 342 8266 1849 2273 5037 7880 3793 7401 5412 8279 1257 3670 9049 3266 8955 6519 8916 2858 694 5650 1019 4669 1785 3533 5877 2704 8603 3726 6668 497 1085 6815 6157 6646 6964 186 8097 5645 8481 8215 3775 2542 7514 5699 4072 3518 5767 3239 3740 1404 8981 4086 6397 6984 4204 6899 682 6589 3317 2944 3456 4340 7424 9208 6504 4409 1 145 1882 4620 2634 4992 5453 4481 3377 266 7875 530 1235 7605 504 1771 8489 345 7353 7797 7174 5914 2871 5721 6067 3582 7653 5467 6234 691 8758 2122 1213 2908 1492 1437 2187 1266 2395 7278 8491 5256 1554 8163 5966 7128 7904 1691 6272 1264 3996 1706 1334 1316 6478 6935 1518 6700 8703 8744 8152 8778 5367 4218 9007 6312 606 7565 5293 2891 675 2125 2120 826 7008 5705 7748 8010 1498 5330 5472 2215 7627 3016 6588 1850 4128 8569 6987 7566 148 8151 8789 7907 8596 715 6018 9060 3872 1750 5889 4047 5960 3120 3449 1421 1102 3333 9197 8796 8123 8007 2028 8404 1945 1985 8109 5380 8438 3504 6739 4180 5835 4243 25 4002 1976 3482 8392 158 5181 4885 8985 11 6872 6425 5926 7062 5083 8394 4259 5844 1990 3942 5532 2220 28 5957 149 6748 1663 3559 7647 2566 1359 8787 5259 7010 554 8231 4229 6005 8172 8125 1350 3571 9051 1973 1386 1781 5788 159 7007 3220 1846 3093 4445 2056 8370 3211 1113 4384 2231 273 4276 642 7663 5311 265 226 9012 7879 118 7109 7251 1760 8667 2876 7162 3552 6901 6779 5021 6524 4957 3114 4544 441 1848 2136 2458 8662 1127 5541 3026 1080 6780 2224 8259 1073 9000 7244 7977 500 4435 7376 7979 1435 9291 7704 3791 3521 210 7388 1039 6269 4052 8570 3285 564 8039 3546 6203 1183 6107 4147 6216 2234 7185 3192 7155 2001 7777 876 944 908 7791 5465 6784 65 9172 5675 7075 3886 7891 2978 1008 5630 591 5067 1139 577 9015 574 8137 7786 5765 4900 4090 7842 5741 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/got10k_train_split.txt ================================================ 3784 8998 3906 1631 8277 8358 2338 7938 2988 8302 2662 2663 2825 7447 4781 2218 6348 5860 4517 2819 8075 5391 116 3606 7976 7941 1024 4519 1970 557 8579 6908 993 7204 1991 3674 8781 6840 5 3225 3763 8688 6778 5777 4794 2744 8126 3864 1733 2923 6829 701 683 2081 1831 2404 1459 2741 5972 3618 7462 2654 103 2174 6224 2989 2506 2766 5912 2699 3295 3986 609 4895 6673 801 1098 1602 2490 3129 8476 3186 7355 4784 4270 1812 4226 2267 8873 6544 6112 2381 4752 753 3776 6511 6016 731 2559 7369 5866 563 7731 1105 5603 50 4238 2208 8725 4994 4719 1444 8807 7298 8139 8760 8173 2332 4131 5207 1065 8562 3992 4024 2188 9095 6765 1707 6105 6922 5362 1486 7898 4135 6574 1551 998 6565 8127 8927 2544 4365 510 768 3535 3875 6808 2931 487 1088 4451 368 2470 8111 3493 7338 8281 6390 1271 4373 3667 3494 3757 2966 3756 7840 6315 7827 3300 6261 4163 2217 6549 94 7236 9136 1857 6691 3470 6271 807 516 9311 6098 3144 8420 5425 5694 2643 6696 6072 7285 3781 903 8522 6092 5979 2622 2529 855 3420 3261 8953 7866 2492 3157 359 1520 2642 7452 759 36 8931 1744 4350 1089 9199 4295 1889 1908 4868 4498 1968 9103 3273 8723 7413 4114 5584 4874 1427 5211 7618 1542 1353 8158 4168 3200 6345 8560 5619 5953 3158 8849 5831 1411 7294 8103 6539 7397 1006 5450 3119 4274 5352 4571 2319 4217 4976 902 1814 2651 3299 3398 982 2428 5793 1346 7057 3737 7329 4449 2110 7405 1773 958 3901 4127 8234 2994 7066 1289 2995 5871 3556 9085 846 2366 585 7032 5516 5230 3481 2732 6658 7423 1855 6384 3554 5823 4948 7058 4667 5377 2503 7694 9191 9144 655 3409 62 8019 8970 5523 7403 3379 2323 4833 5750 3178 6548 8891 7501 3280 7404 343 2171 8397 1367 8611 6118 6603 3729 7182 9048 7733 5642 7141 3335 4845 5449 3467 6250 163 5168 2040 5339 3609 8352 3426 8567 769 187 6151 6437 7028 8507 3970 9146 2068 5028 7492 1661 2815 2469 2563 3814 8430 4305 3479 5678 9115 4132 1211 5459 4814 545 4556 238 4296 2724 1260 2581 6087 4632 4313 380 1209 5447 3032 7942 8943 806 2432 6130 4314 2131 9045 6531 5706 6747 7724 2017 3292 5469 2743 424 4233 7643 8619 5192 4516 9324 3537 9152 8058 7526 8711 1949 5982 1732 6702 7027 6388 7012 328 2130 452 306 7669 3134 5761 3703 44 4189 695 7672 5224 9215 5644 3143 3704 5443 2348 7177 2328 4725 354 1418 7810 7746 9002 5759 7226 4535 9160 4385 5397 7249 2936 3204 6287 385 2371 2738 3636 9033 2246 2680 6940 4310 2054 9250 9080 4568 5586 4469 2038 3410 7900 4332 6108 678 3319 9079 1054 4048 4751 1320 6890 7931 1398 4349 5299 5025 7932 5738 7787 4590 4020 1274 2488 8497 3372 8965 3219 799 3664 6500 7093 4362 6205 4244 4652 1964 5945 6434 2031 2684 6632 4588 8271 3232 5782 2904 6789 5636 7200 3632 5435 8203 3480 4786 7579 3351 1921 798 3646 3094 4359 1654 5975 376 5965 780 7821 9224 6738 3185 2133 6248 5996 2834 531 5688 2448 7925 7974 5924 6401 5778 6594 5442 8336 4522 3770 6340 6328 4946 4161 2954 2588 8465 2885 1606 5787 3407 3121 7310 1413 1932 4787 2579 3325 508 5610 6480 4290 479 3792 6628 2545 6717 6972 2665 6730 3547 6845 5929 3540 4356 8993 1052 2235 8356 3403 8818 8260 572 4159 1180 5348 941 7948 2676 3539 4866 6422 8365 3217 1310 2059 9177 1419 2283 8892 8162 1212 6277 3725 7806 6149 7874 718 6888 7118 277 656 8763 8289 4759 5854 8659 7710 3145 5981 1881 5799 6947 1609 6396 2631 2887 318 2550 6132 1736 2907 7816 48 4304 8133 6698 2760 7779 7732 7642 1154 7242 711 9262 539 8033 7440 1913 5480 5570 8594 8772 4654 8974 6128 6183 1071 8449 2142 2298 524 1695 820 4053 8241 1856 8641 3981 217 1063 9286 3152 221 5461 1270 2006 7164 1199 6951 5604 5400 5309 3498 6407 6661 7097 8165 5169 3852 7070 5702 4344 6648 6904 3272 7119 5795 2365 2659 353 5444 6968 2755 1924 2098 2972 6006 5865 8740 2418 3401 7856 5841 598 836 1147 931 8897 0 6049 1837 865 1871 6116 6831 5773 3587 303 1883 2163 3070 1308 7953 6300 6909 853 7301 3279 123 7186 3194 5553 5133 1931 4622 6075 4891 5722 5693 8 2339 6596 71 379 4506 4370 1238 2707 3344 4254 8767 1726 325 4148 5438 5357 548 1332 6824 2290 2335 3146 2594 2315 3389 3885 2621 4116 5389 7412 7222 4894 8595 2000 4978 4721 6444 3796 9321 2236 6409 1523 1468 9249 8270 2341 2874 174 4757 4502 4703 9034 9108 5451 2619 5022 9158 490 6540 1466 2962 8771 3036 2712 4539 1581 5638 9246 4308 4363 4647 4470 1636 2511 1311 6560 7519 8027 9217 6464 6364 3779 4822 3563 3982 5896 5510 6655 1524 2846 3137 621 141 1887 6567 8921 4671 6052 8445 8699 7349 3553 2117 7651 5034 5383 649 3818 9022 8414 1012 8159 5081 8571 4765 9135 4361 4073 9142 727 2835 8229 3989 4490 4923 5477 1638 3643 712 9044 2230 499 7166 96 3172 8431 8401 1470 6356 8817 927 4212 2152 1795 3812 4949 1219 1538 3029 6481 9042 7775 7742 423 2085 7715 4541 9061 5916 3950 7420 4878 7406 7046 7808 4911 8804 6927 8820 3264 300 8670 2979 252 4407 3383 4688 8504 6723 26 3837 2489 4137 8209 229 6490 2364 9016 1763 1728 338 8335 9063 5280 2791 641 5454 4581 5420 4548 2840 8508 3463 7231 7619 2560 1755 6201 165 1471 6279 5806 6867 5890 2396 3416 1981 6073 5872 3045 4182 7607 3318 4414 2998 6553 7139 5624 2123 3666 723 5110 6932 8200 2222 8399 1041 4138 1594 3569 9253 393 7940 8004 1475 6759 5393 1107 2597 878 9309 7576 5250 1759 3142 2015 571 3921 1255 7080 893 2160 1355 82 1562 9153 8583 4085 4644 7196 9165 3558 4550 6374 7826 8602 4146 9257 6083 874 8383 3731 3374 3653 8222 7344 470 1813 4478 6871 7245 6866 3998 7433 276 1915 1988 8168 2518 2686 831 6143 5205 8718 1703 7729 2077 7983 8450 1195 9232 507 7989 6974 4054 5828 8655 6679 5245 7783 5886 9098 6491 8782 3525 6542 131 8110 9186 9074 4933 9035 2607 4 2057 6273 2711 5829 3382 2696 3043 2048 619 2499 5295 1162 7807 3694 2194 3149 1940 7934 840 3592 8237 4731 1324 8486 8726 8573 2928 9078 2272 2564 1370 5911 7434 8026 407 7546 2004 5849 3034 7887 3425 1118 926 3430 1544 5902 2282 1124 2334 129 1372 4842 6473 4382 1028 415 8269 8073 6910 2796 3038 5735 5080 2852 6306 8842 9188 3637 1066 532 928 5485 2838 6753 9008 7984 2816 8819 7103 5977 5044 2064 2599 4973 382 3249 6446 6638 852 1724 3368 892 3250 8258 7962 4300 1616 167 8855 2090 4424 879 5136 5350 2635 7828 8506 63 3004 3847 3676 1184 1705 6745 1263 5020 746 1888 7036 1033 3914 5433 3905 4641 8909 228 4801 3766 8085 643 6914 9280 3013 5657 3696 1590 2920 8282 2403 416 911 3849 4215 1120 5490 296 2306 3140 3742 4819 6153 6414 760 3000 7498 7108 6429 3031 5314 751 3357 5808 7505 98 7652 4027 6257 3943 1799 8577 5577 4969 9163 2025 6061 4026 5732 588 7017 1415 4961 4940 7152 538 706 2802 8983 3375 1246 6593 5837 1789 7939 4997 5939 2411 6133 199 7593 1702 5406 6082 2359 2912 6109 100 8149 5470 2807 3384 6413 3362 5621 6019 9241 9268 7703 4111 7967 5458 7181 5492 1112 6729 4577 106 8853 3774 979 7082 4610 1853 9003 9292 2867 6262 2245 3460 1557 767 4796 8147 2658 5769 6985 7065 421 7990 3289 1540 9316 2251 6896 5947 4965 2652 4480 963 9047 7168 7824 3976 6210 7018 7179 5016 7789 6102 6828 7659 9109 9071 8115 7628 7110 16 7513 835 939 4078 2351 2322 3881 4945 560 6837 6094 6475 7901 3 771 8029 3135 8044 7127 3741 5156 7030 4906 113 3747 7042 5232 5225 3002 4747 6879 5379 4886 7192 4184 1896 1834 8689 3665 2957 6913 8009 4851 6420 7987 828 3003 8884 8815 3198 8008 194 6251 3303 3934 395 1285 4169 1648 1347 3600 4631 509 211 6230 7241 8250 2219 2582 8353 7790 7583 4462 3904 9004 6942 1704 5686 8051 2981 5511 6182 7088 1699 1222 3455 6189 1528 5197 6221 7893 3283 2837 7773 8766 2942 8021 614 4102 7362 1786 400 133 556 3127 5237 3727 1440 3873 6322 8448 6285 8696 8800 4009 3386 454 4847 5685 9093 246 1314 5895 6863 4302 4260 8405 8417 7116 255 3223 4737 7852 6337 814 710 1094 6103 5809 5882 6336 4974 1499 2806 3744 2664 2436 4482 8665 8918 1076 8676 5725 9248 4755 1447 9328 5500 78 2653 792 6854 6093 6172 3378 4492 5529 5476 3846 1391 383 4289 3883 2648 3265 2525 5402 4599 6870 6877 4413 2464 8519 2521 1839 5822 5664 7257 5375 6852 6764 5182 8914 3015 8509 3080 4562 8979 6215 6643 8601 6096 4812 5246 7862 527 7849 6737 12 2468 7961 275 27 5932 3840 7341 4996 8564 2154 3788 6138 7831 4442 757 4464 1170 2568 19 323 6584 7675 3441 2067 9027 2486 4379 4744 1737 7563 301 3907 4742 6857 1221 9284 8458 8236 2897 4004 1526 5345 4423 6246 8578 1057 3711 4986 4785 3997 7311 4788 107 8387 2041 2608 8628 5830 6031 783 6817 3293 541 773 8473 2501 7247 5667 804 483 1639 696 6060 5429 5762 1527 7342 1329 6225 7895 381 8030 8520 8362 4734 3526 9273 2039 4142 5084 875 6905 8968 5275 3052 650 7509 232 2595 3631 1810 4355 8315 8908 1777 4834 3164 2336 1543 6212 8346 3024 3719 1242 6265 8101 3133 6150 6358 3316 4089 1647 4629 7117 2596 5366 1225 6371 624 2209 1428 1158 7648 466 8765 802 153 4639 3657 6482 9320 2693 6591 3294 2617 5052 6305 3227 8784 7170 93 5868 6716 1671 178 2703 954 3254 2262 5046 5743 8647 6393 7706 6604 3728 6978 7489 7474 8754 2740 2233 6038 1491 8814 2080 2358 5944 5653 1164 9259 4518 7343 5748 3897 923 5967 2677 3503 1202 4966 1836 1863 6634 1962 9096 9064 977 4049 1464 658 536 3402 8064 1309 259 7999 8122 910 224 6152 7142 6070 7523 8411 2408 6766 9214 9312 8325 6192 626 6025 6240 8708 4630 6777 1075 8906 408 9269 6236 9067 2514 8568 2324 156 3136 3530 7878 7308 4335 2065 3845 4453 3356 1450 371 7219 5171 201 8642 2099 477 1603 8339 7430 3061 235 8291 1133 8474 7035 8653 989 4569 9092 8347 3102 1743 9086 5140 7438 1530 4342 2460 7646 5047 5071 5430 6944 610 2803 1448 4696 6156 4386 4248 4256 994 2112 805 8011 8276 8999 4956 1712 2795 7553 6436 2158 9083 3184 5784 4428 612 5288 6222 1365 5074 6848 575 5213 2175 4240 351 2086 2656 5150 9255 8189 7735 1261 1344 4097 8674 2984 4235 5998 6488 537 1267 7486 7124 6245 7955 7337 5436 1194 8226 209 1710 7906 4357 4139 5679 2584 2854 1004 8246 8586 5087 1878 4926 6637 3197 7757 8249 4055 6502 1248 990 3928 2770 2751 1020 6426 4190 6839 2671 884 3871 9212 4179 3394 10 5861 5316 6869 2985 8905 8559 4457 2480 2313 4100 4395 6835 7799 7890 2785 5468 7302 5862 1803 6376 3171 8591 717 7053 1655 4489 2522 2921 8555 1984 895 8949 1305 738 7606 112 3042 1325 437 3167 3340 511 3689 5813 8982 69 4421 7150 550 8829 8685 3147 8956 3166 7023 8633 3308 2014 3573 3880 4045 2069 6051 4950 702 6664 8418 2454 6181 4853 4166 7022 7418 3605 9181 7172 5031 4589 7858 6586 6351 8334 7504 634 3759 1890 890 6959 5085 4919 2161 1191 256 3610 7079 3427 4071 7323 2982 7263 7444 4251 5846 4864 3649 4311 7461 8120 4582 6373 2805 4872 4869 5493 5867 2670 7099 30 8933 930 7919 501 7261 5289 7449 7772 3613 7848 3196 474 205 841 2611 6185 3088 409 7239 5938 7871 1343 6705 1027 5596 2199 9113 5471 6134 838 2345 8359 4061 1474 3229 270 4245 1979 5995 1517 8652 4006 4880 6137 4693 2528 6996 2926 5798 2477 2549 1128 3341 6014 4479 2861 4208 5175 5174 5118 3736 5463 1588 2327 8380 7982 1514 1058 4586 6608 7985 3044 1822 3628 6851 549 1811 2184 2601 4608 8922 2540 6659 3859 307 3650 3767 8167 505 4366 4824 5520 461 1933 2401 8106 2055 7844 8544 8838 4797 7419 6686 7670 6039 5672 5141 6543 206 5252 4718 888 1601 3218 5114 713 4022 4419 6708 397 425 6612 5057 1729 6573 4729 4080 1034 2961 534 8194 5598 9218 2424 329 4154 1597 922 109 8823 3578 9038 8437 3307 128 8032 1412 7333 8762 8851 8865 3056 468 3808 3064 8798 7052 7767 9231 1086 2162 6566 2109 3439 6122 3642 7696 8610 5279 1808 8687 8377 817 8714 6066 4008 3640 6015 1021 7601 4855 6017 87 7071 2730 7268 3614 6084 6117 6924 9102 2829 375 8724 2095 22 1541 2970 633 139 451 4521 179 1396 3876 5824 8020 426 4982 4172 1157 190 4859 1455 3110 3323 9104 858 6719 6428 4495 8551 2141 3984 3066 67 4299 5821 8444 6581 6097 7090 7781 8944 3085 8606 2114 5355 8901 1461 3301 422 7000 4820 5790 1379 7536 4199 8736 8991 5241 1698 1294 1753 196 2987 8680 4658 4144 8639 6441 8255 8156 3677 6385 6520 7700 3760 6001 1144 5478 7394 8057 5018 4232 5235 6844 3111 8802 867 949 7843 573 2278 6801 7629 2714 5105 6946 2697 5315 1571 8677 2537 4374 3833 7820 3750 2033 6526 3884 8706 7195 417 3603 3001 6284 5873 5718 8576 8457 3589 5839 459 3626 6342 8729 6933 607 6053 8228 3773 1805 6365 5142 6069 1389 9026 570 4614 5712 5533 9222 2821 1897 819 766 4060 4902 5905 6842 5446 1277 4303 2836 934 1014 7822 7494 3466 665 1047 5881 3328 4664 315 1315 1462 8616 7725 2756 5749 1730 8184 4567 5065 7499 8867 1304 3669 9192 410 8177 6710 1210 2329 8443 3911 1899 7686 3315 7190 6180 3116 5341 4394 8337 9182 6969 5715 2172 1742 2782 3715 9195 7960 2517 4890 8294 2337 8014 3353 7475 2193 4843 8831 4200 4653 6196 6957 3063 2996 8959 8973 6529 3457 5274 8002 6823 6154 5561 1780 9318 7657 1758 6503 7678 3274 1625 4327 3236 8575 3155 4707 4331 1494 8756 3174 1074 8116 8295 8311 3048 3752 6050 6483 8003 9175 4674 1642 2556 6166 7165 8441 5413 3990 1640 1778 7500 8304 1395 4315 5949 3364 242 5763 1036 249 2430 7426 8131 411 6267 2045 6606 899 8065 9052 7507 5779 5616 2107 5408 2980 6310 5776 4328 821 3251 2354 7076 1700 5313 6736 79 8212 3959 5677 7545 160 6790 6859 3659 6770 1106 8846 956 7472 2050 8099 4795 8053 9293 7037 1646 9307 1069 5322 5332 2708 8977 917 2419 184 2105 1578 3923 5780 1903 2512 429 5582 493 4972 445 8286 555 320 8300 322 617 3413 4459 525 5631 6314 5157 5300 8545 182 1031 4429 2495 7586 1534 3099 3916 3738 1919 535 2119 1299 177 1838 2159 4099 8285 5172 8540 6020 7683 3073 3115 1673 3087 3488 2416 1894 5942 3597 5834 2007 43 1779 4174 2023 2546 2429 9006 436 4214 4536 3693 5426 6767 5903 4368 2170 5051 7490 7882 2859 5035 7835 5372 7122 925 3253 6338 8393 4093 5848 7588 2683 8049 5403 5894 8745 8550 2941 3484 9029 4461 8022 725 2355 1619 3030 1975 5623 2415 1957 6141 9278 3226 3062 5670 7326 8759 8496 6619 8187 8262 6199 951 7183 668 2388 4698 5681 8240 2851 871 4988 9084 9089 3162 1167 8244 5227 6461 2831 776 5010 5770 5282 3574 5102 1278 2281 5455 305 4628 4663 9119 7487 8746 4889 6569 1175 102 2386 8940 2479 5566 53 8833 1918 8001 321 6786 6861 4358 2771 7467 975 4777 605 3543 2600 7584 9299 4530 6477 7364 7328 183 4761 7543 304 1196 4623 7839 2139 5519 1953 533 5989 7590 7428 6346 6162 1091 1946 6260 4405 5676 8924 7171 8409 1866 6379 3411 2387 3051 7398 154 1185 6442 6004 1611 2165 9018 8323 616 3995 8952 1533 7853 4194 213 789 4991 3675 7456 5752 175 7556 4195 907 2248 9057 8467 4594 1017 7968 880 7446 3304 1666 4942 3867 4802 9156 6357 4621 887 6213 5261 1336 521 8928 1818 7864 4792 6742 157 1593 823 7235 5303 5633 1100 1692 8047 5993 1460 6714 1630 6440 6307 3608 292 212 401 5974 7107 8301 8342 2720 4583 2757 7315 833 4466 4236 1282 5273 2149 287 8484 2380 8119 7167 737 5076 6598 3596 5382 2650 8980 3421 1356 1954 7823 1172 2226 1941 6136 7274 2256 4928 324 1407 4410 4579 1061 7113 486 862 3435 6956 2873 1465 6113 8225 8512 6806 272 6008 1241 88 5662 3555 689 8733 2812 7453 6282 420 2471 4477 7495 1445 594 6939 1564 8704 8590 7992 7374 5796 9298 4213 5713 5864 326 5513 402 464 608 1951 8640 8180 3347 3459 4162 2690 7478 5856 5240 2389 3022 602 5547 1798 1345 9276 599 3673 3277 1635 8625 1567 5928 636 5671 2896 3477 412 7575 4201 685 4760 1229 4275 8960 3123 4471 5941 3355 3999 7157 6354 7741 6850 8783 1943 6769 7330 8721 8477 1381 848 778 6408 2644 5817 1441 1723 2144 2776 2368 120 367 8839 8749 5353 4158 3148 9114 1233 9228 8857 2895 1286 200 6755 5125 5857 1657 7658 5097 5000 942 7020 586 784 7078 6194 8658 8957 9325 1851 8911 4862 7004 1186 8824 1651 2999 561 7639 4316 5086 3187 7912 2624 9183 8487 5089 8475 7554 4031 6297 6059 5329 115 2058 7650 7634 7121 2485 7805 2241 7713 4352 2409 1026 2745 4549 6474 5124 5201 6556 6617 9091 3945 8402 5648 5257 2192 4901 7750 6131 6027 6352 4625 1254 5498 3720 8261 3939 5576 3685 6713 8472 991 8354 8068 5655 5997 1029 7506 6740 2575 2990 4898 583 7402 3290 5388 6715 8235 5361 4970 1363 3338 5731 9014 5358 2216 2856 635 1193 3705 6334 7666 5270 1384 6368 8604 3564 1937 2481 1341 721 2100 3958 6551 3813 2592 7980 5385 319 2357 8761 8910 8693 1204 489 4827 8024 7832 6427 3895 89 9068 8067 1708 1111 8963 1902 9251 5719 9143 5537 9169 77 5365 1840 485 4456 2841 1169 3271 7144 6886 9140 7173 6003 1659 1807 8371 2439 274 4660 3448 6623 347 2103 3400 2106 9073 8169 3687 3305 4416 8454 6635 332 2433 2909 3839 4063 1944 6509 1296 7770 1880 6610 4075 9331 4484 302 418 4219 1333 2350 6498 8424 4694 4883 5269 6580 5007 6722 1669 8470 2571 513 3810 7049 6332 7363 3532 8456 2097 297 8841 7180 714 1587 5234 4268 2320 7372 660 8503 1668 8847 1101 7275 3336 6460 722 7782 3947 502 4258 2132 1835 181 3841 427 3446 2551 8324 6963 4284 7297 7577 3399 9148 8213 5656 8440 851 657 2446 4292 6992 976 1108 2681 3237 8582 377 5969 5287 9209 8523 7178 7833 6175 2126 3023 5090 7491 6640 6077 2221 2780 1694 4094 144 6161 3203 7123 749 3625 3848 980 2270 7819 3672 7689 7203 2718 1714 2884 3474 3802 3851 4224 7237 5415 7998 7207 4106 9036 1046 8731 5070 6818 4592 6056 693 1328 3309 5791 2629 2736 202 388 7886 4417 8786 8822 4035 7718 8492 5505 1192 4388 8941 5019 7538 6732 7296 6389 5923 1405 3278 3917 1688 8374 443 4037 9099 5190 6402 4177 9310 7747 4348 7197 4844 4998 5609 4345 29 3332 8648 4107 346 2577 3941 1215 3782 8252 4706 2675 3790 7459 6164 7316 1149 6687 582 3139 5040 7645 3882 7322 4034 1861 4701 8757 3208 8801 6349 8907 1823 4528 4789 143 4746 9234 3866 9245 1911 1366 4393 2061 859 1959 6967 3138 7382 9031 6237 845 80 6911 7163 5229 4736 8738 33 8543 357 3193 7262 4448 6796 6793 3321 7569 6411 7692 7340 1417 5847 3836 2678 1188 8727 223 8615 7417 5771 3170 8061 2935 8263 8257 6883 1276 1239 812 6258 3922 7525 8117 3039 603 8554 7573 2787 3445 5115 3478 962 3961 6570 7722 216 2797 5154 2530 4904 2405 7542 4021 3252 5370 9302 236 4532 1361 3373 1716 2183 1583 3783 868 1687 8925 1433 6198 8208 6367 7603 882 3469 1645 7654 1176 4231 150 7997 5456 7031 4375 8840 5634 6945 705 3442 4774 3822 7148 1922 8459 6249 8713 6197 8599 6071 6756 1634 950 5640 7749 5920 6622 4783 7837 7479 7229 3919 1797 5272 8945 4908 5439 6903 5833 6930 8197 9261 1711 5483 6046 4285 8852 7409 8971 8278 7534 7792 2444 7496 8063 1665 248 3894 4585 1982 66 6651 4850 1240 7511 7524 9258 2075 3979 4714 7592 965 2919 8239 1842 8013 4750 2344 6155 3468 31 2087 1599 1573 5883 7613 195 3749 644 2189 8779 8743 9005 8081 1040 7785 5820 8830 5495 4867 2710 3843 491 7153 6217 1148 4741 1761 5484 3423 5474 6916 5876 7252 1739 8930 6647 5198 4903 8488 7366 2774 2726 2385 7625 3179 2211 8845 6600 399 6810 3447 6684 4915 8368 1867 2325 2101 1335 7734 3722 7437 3716 7025 4000 6897 1408 7154 5013 2204 9233 4225 3817 1877 9161 2197 6991 3390 280 1892 1612 7753 2801 7246 7909 6229 9314 8407 1436 3879 6432 6849 5326 5327 8535 7910 7745 5545 7916 207 1783 6158 8517 7361 8070 6430 119 6146 4183 1083 7385 4497 9133 1686 3765 5099 595 8046 4418 4043 2361 7915 9149 1717 1141 6375 1018 5602 1262 7485 9178 6629 3339 8934 4648 7988 6252 3440 864 5418 3874 7280 6191 8388 4323 6792 4324 2232 7228 8684 7813 6187 6678 3177 3534 4953 4402 7739 6319 2414 8700 5946 8238 4533 6917 4167 4618 2115 2268 3081 1247 4001 8580 7636 3101 2195 1559 3714 2484 7188 6028 7530 2828 1977 3238 6496 2340 110 3247 7532 7541 924 1632 484 4487 4439 6447 1319 4944 6347 1791 2285 8087 5452 91 1166 162 5185 7933 4743 1627 7259 8620 8525 8207 5845 9011 5525 4269 4700 1824 8186 8872 8299 3957 8242 4558 6439 2666 5943 6958 8112 5121 8806 6170 7688 3486 2082 7436 2778 1096 786 2206 5170 1443 6030 3312 9151 8485 6404 8498 2883 8961 2280 8341 9137 4337 2809 2445 809 8298 8643 8316 4951 6853 1572 3215 3938 2249 6515 1337 8328 7712 1429 4117 5441 3230 4152 7225 3513 6953 1507 348 3639 5739 2673 1550 6301 1652 8453 204 6833 8056 2200 5217 1854 4711 7368 4572 4032 7531 1013 3634 2875 6058 8307 7609 1766 904 667 5410 6578 3601 1664 3233 7390 8178 4486 4952 4427 4876 9166 3107 2772 6295 5001 5296 3371 6518 6327 854 1615 8288 1912 5927 6202 5814 9032 1059 3214 6547 7038 5781 6926 4390 6114 1622 4318 5803 5984 736 3561 6554 5045 4277 7386 9081 8462 2034 4955 2701 932 1298 7758 7176 9205 2276 3077 3803 3562 8054 7946 295 1843 7728 1629 7768 3663 6363 2971 431 9285 2513 1116 3656 4529 6366 5758 6339 8398 816 4153 648 2536 1826 7870 8113 7730 7101 6555 9256 6774 1072 4578 2598 3604 5880 861 8273 3350 3117 4685 9219 4334 5165 2035 7224 4066 4253 4447 3815 5038 253 3658 2252 330 3967 6443 2143 7336 6135 593 2734 8390 4655 7800 1399 1173 5618 2822 7905 7503 4431 2443 1568 3909 1974 2496 4772 5164 4105 2138 2864 3799 3924 4882 8245 1585 5528 5692 5730 5832 137 3175 2894 2062 3899 2752 4028 2113 5411 293 2647 730 3758 1667 8879 9303 6653 3698 3968 3053 503 2150 4645 2257 4627 8303 7966 8742 4692 5901 8547 2277 5546 986 370 4697 8712 4804 4881 1182 6650 7290 3487 2814 5668 7567 5333 3724 4164 3084 8896 3888 6537 17 6882 3531 704 1037 8866 5263 6758 3762 1393 3824 5575 5112 214 1439 5700 8932 1306 5011 6928 5173 4098 1132 7352 4778 7723 1368 2390 670 2685 5855 1772 6380 3853 940 5424 6091 1748 6193 5297 6572 8877 6874 430 5041 5267 1145 7448 620 9112 4294 1432 72 130 2393 7920 4597 6614 8889 3697 1895 3462 2616 3978 4791 7846 7780 8372 428 6559 8326 9211 2363 1525 5980 7888 3331 8118 7899 615 7377 791 5930 6627 8322 1138 770 8460 5100 8274 8350 6316 2893 7594 9236 5082 8150 1986 1909 8902 2145 3617 3501 7 2426 5056 8016 2702 5360 8135 8385 8378 8018 8574 720 8893 3021 1978 4782 1816 2083 4051 1446 5870 971 9097 8006 4222 8287 686 1377 611 8153 4920 4808 1536 679 4096 3891 4884 432 4615 8988 5560 3451 5589 3514 6169 1414 3244 1490 7100 3588 690 7317 4171 2266 6800 108 2793 5151 6977 2587 8188 8752 6318 5815 5116 263 3311 5191 5689 289 3392 5755 1022 5548 9319 8937 6011 7632 5328 4993 4141 5407 1865 520 7305 7208 526 3645 1859 2520 3523 8629 7304 8881 3076 4005 8329 2205 2214 6925 8691 4136 8883 974 7873 7952 3965 5887 7964 7189 2406 2783 8086 405 6568 5147 2021 4727 4826 7674 1600 5078 2949 6624 6541 8986 5740 4679 8500 3591 4434 398 983 7544 1478 4570 6012 465 9330 7206 808 8737 2356 4959 8812 6955 3599 2168 1420 1721 1794 5897 8422 2 4023 2739 3619 8797 5496 8951 8181 6893 9254 1809 5682 4309 6929 2742 5988 3363 4493 8434 4210 1503 1876 5094 4600 4936 4798 3933 5216 646 7660 3098 8773 4076 1576 5335 3746 3327 47 4602 8636 4129 363 6417 7416 9025 4377 4766 2779 4151 9046 7860 3154 3476 7620 966 2052 8344 1752 7199 4412 8895 8882 2463 339 56 5390 4821 7555 6558 1905 5258 8880 4205 3580 6735 1023 4511 3850 161 7395 2532 3349 7055 7387 758 1907 872 3006 659 815 1961 6902 7668 4708 1904 4433 5159 6816 8664 6918 1016 6513 7314 5364 7480 9313 716 3395 6843 2292 918 4329 1035 6344 8593 3404 5212 837 480 8524 1342 3690 6797 7414 288 8863 3352 1628 24 135 3314 2181 8650 5915 8078 6812 1375 6040 906 5635 7126 1387 7458 6119 5591 3795 1531 95 1960 7522 3033 898 4607 4921 3913 2623 4430 6268 7063 1326 9075 2505 7400 1284 2951 747 6466 1357 6493 7320 5892 576 5107 5559 97 2583 6361 8843 3509 7892 6086 1476 4612 7427 4267 9094 7050 6048 8455 8382 2227 284 2898 3221 2353 2157 5990 5810 3581 7279 6188 7859 3549 5539 7918 2022 9066 630 2500 5111 6561 5127 8095 5569 6123 1338 8605 3491 4187 8220 7334 9213 3067 6997 2853 4735 4372 1489 5954 6662 2207 973 3361 960 6350 4170 7431 8076 1129 750 7559 7194 2261 2300 6590 5893 6889 3125 8788 334 7286 3472 8164 7693 1469 1181 669 7515 5563 4773 3210 6324 3113 9070 3638 7551 2541 3506 5138 4069 7198 7560 3306 6100 2932 4473 1741 14 4672 7564 8748 8874 3804 3678 2240 2610 2862 1358 5716 42 5176 9326 8464 1038 2993 3017 9072 32 4809 4364 2808 4125 448 152 7299 5431 6178 793 3444 9120 8410 4963 772 5457 6954 3014 6881 286 553 1948 6398 6255 3057 8646 6176 2700 7106 5663 6683 1281 6013 8799 7635 9289 1885 442 2225 6294 5054 2674 7884 8730 8216 4203 1488 7111 4013 3623 7950 1971 1966 3248 2900 1553 472 3865 7796 6937 4591 8098 5208 294 5627 5691 5687 7149 4879 3624 7005 2773 3112 9185 1633 7830 5101 8707 8469 4678 4860 700 5527 9194 2794 5068 2639 1177 4282 6492 8128 5859 5029 5123 2877 522 5048 7230 2104 6642 6731 2717 5149 2043 9059 5277 844 1394 3262 5515 6706 3651 9105 7671 2880 3607 6410 2508 8463 2394 1916 1125 5343 3322 5307 4547 1589 8478 8899 2955 8028 7293 4619 4058 2781 8715 1272 5734 4474 4863 4367 49 8844 5605 8671 6743 4281 7077 1874 2626 2516 258 5249 6186 7958 5432 3801 6288 4732 9121 7558 2527 4661 6819 3835 7508 584 215 5036 4261 8978 5228 647 4657 2591 5931 5088 9204 929 4381 5421 2965 5050 6495 5033 4799 959 6115 3520 1232 5811 317 8976 7705 3842 2178 7187 1373 7112 2694 8627 8493 3991 7441 6308 2589 6462 3406 7673 8660 2902 752 1025 849 7682 6982 6652 3612 298 5148 4873 3414 1693 1458 327 2016 5002 6768 7016 5583 3270 857 8232 7158 7981 4676 4675 2164 8360 6709 8143 365 4062 4527 7928 9009 6228 5818 2533 9305 8887 55 2507 8870 6649 5158 76 5595 6693 5306 8666 3020 7527 3082 6304 1591 6145 6868 7205 9107 1165 6773 172 1993 4176 8400 4611 7589 8702 5386 6095 6335 1561 8805 5963 7393 3681 2037 4968 7451 3360 7466 8361 4455 4064 5422 1689 3977 7269 362 4178 4145 6127 5162 2399 9225 7068 1650 794 3007 1348 7736 444 6081 5298 2026 2543 9087 3593 7425 3730 8468 2641 7529 1720 6377 8732 5851 7956 3150 3785 6485 3611 2869 8510 4775 4463 1251 9124 6873 3391 6505 4118 1617 8837 7051 3213 3668 5347 8452 6289 5840 478 3522 453 3376 6190 3342 2237 2870 5178 5567 5952 6919 3005 134 3397 7443 8539 6822 5264 3288 5962 8421 6744 8608 4656 1802 2073 4271 1043 2922 8211 2196 5260 3789 7211 7571 7834 5680 2047 5502 3369 3437 3286 5517 3912 8386 1442 6961 2191 2417 9088 5155 6813 4520 7375 1224 811 1891 3748 4123 2789 5305 8419 7248 9237 992 4038 4499 2060 5538 850 2669 7612 104 9290 2526 1287 4160 4633 7125 742 744 4534 2407 7714 4555 8764 7661 4722 7721 3205 6657 1214 3754 6080 4593 3018 8792 2294 4450 7701 9301 127 7069 4513 6243 8025 4010 8632 4715 5284 4574 726 4252 4561 7354 299 6088 1090 5012 5684 3489 5639 4888 1584 1969 4846 2915 6804 2775 7306 6506 9306 5231 7740 4283 953 6725 458 8290 1504 1539 8885 138 3764 1256 257 335 1011 7060 5986 9323 4740 8994 4140 6807 8254 3963 9297 2102 2964 9207 4910 8709 4411 1672 457 5852 8037 4932 3679 8794 2362 8592 495 8432 1608 2155 7411 2881 9244 37 6535 8219 4505 8635 1928 8384 2570 8996 7610 2128 8728 6656 8935 6681 2070 176 9062 972 514 1796 4039 6838 2462 230 569 5521 4637 4939 4420 2863 672 4995 3807 447 1656 2005 5113 3297 8858 2118 6309 1926 481 1156 1509 1228 1787 5978 8678 3951 2929 4980 5039 4713 7002 151 5536 8148 3823 4709 2299 142 7067 2372 3761 9 2265 5747 2764 724 2913 3151 4525 6370 4247 9329 5494 3721 629 3621 7371 59 1999 6704 3734 2698 4691 6938 9117 8415 6353 6750 9077 2679 7623 2478 7321 6611 4007 2076 5772 6416 2264 8348 2672 6546 754 6934 7908 8546 4404 592 4748 6625 2129 7944 2377 6 8929 8275 3515 4524 3660 8710 419 6878 170 8313 7460 8753 2917 6891 6663 4918 7129 396 7256 3500 631 5585 8343 2695 6168 6292 3176 5092 5160 3701 9021 7221 7825 1216 1438 3471 2318 8923 6223 2182 7621 8514 9010 8987 1252 1972 1872 1715 8205 6463 8138 8989 5661 2890 565 2427 8946 1303 3718 6000 3620 1560 5276 8089 9260 1467 6173 7641 7520 5061 4677 5757 4400 2620 2719 8995 2079 6644 1683 8141 7754 5744 2952 7568 654 7457 5368 3310 1510 4440 1513 3072 8034 1456 9164 3163 3035 6111 5042 7161 1401 1084 8000 6672 8531 5404 6550 8379 9141 8681 7752 6394 7011 3739 8253 978 4771 6024 4828 7959 1649 1727 7073 8349 6952 661 7283 3159 2590 3496 8741 3969 2956 4565 920 1830 8558 1930 6677 6825 8256 7454 7521 4710 1768 3753 6459 5606 5292 1397 240 2733 946 6711 3242 2627 4929 5006 3202 132 2295 2746 1293 2124 5405 4065 818 7464 1820 4398 1312 6994 6920 261 987 6120 3109 331 2986 4338 7774 5122 8396 1364 8969 6712 8161 7083 7595 5940 1566 6419 8634 4432 6047 4749 6076 1161 8217 674 8494 3688 2447 4704 969 7477 1160 3243 3173 4979 9288 6860 1662 6171 225 5143 313 8327 3275 3385 7626 3103 4401 6794 5600 5043 7664 933 6830 4452 3980 1604 5875 6633 4635 5756 3329 1751 8108 4817 1989 1237 1893 2848 9334 51 8875 4981 5417 4134 877 6688 3545 4943 5615 2476 1684 3652 7396 1769 1171 6563 3415 3644 340 6630 8284 3256 7240 5371 3405 2108 6360 1734 5612 8638 2343 1103 7803 6809 3055 188 8031 3124 3683 4537 988 2297 4893 6499 3396 839 4467 5195 4041 6457 4441 6378 6472 6195 4912 6884 5922 7014 1660 38 1595 6752 4554 1292 2709 3800 6057 1980 8775 6587 6392 6263 7214 5219 282 309 6685 2253 6311 4092 18 7570 5543 4081 2515 6278 8690 5294 6184 5215 9130 6720 250 7250 4983 639 3567 7841 2636 4067 8446 5703 8609 2586 7695 1253 6701 7930 6317 5921 7719 8501 7312 4110 6219 4552 5059 4088 7975 9132 6054 692 3412 4079 6754 6950 5281 3028 8321 3877 7614 8939 4188 2223 239 4745 6875 7096 5571 4403 2640 5556 1845 6690 1825 4157 314 4682 8825 1003 6206 8093 7215 6465 99 8077 6631 4206 2523 366 1208 6043 4640 1457 5475 4985 1351 3090 5625 7307 8466 2003 8854 218 1500 4476 2293 1847 5032 2147 866 3710 2552 1749 6692 3926 4112 6458 735 9171 60 9304 6726 2630 2882 1178 1151 4922 4662 173 7233 1776 6533 4113 2423 2425 4343 5800 970 6372 1009 6607 3068 8435 6423 3126 4813 1709 1201 7104 5620 3932 5701 5724 3366 8050 4984 5023 9203 5079 627 290 779 5572 5233 1392 4975 8534 8210 2269 1143 2475 2562 905 4546 267 3536 8538 449 101 7367 2722 4605 7356 6781 8537 8697 6820 8340 8926 3821 2349 2259 6545 8100 8395 2258 2911 5108 3946 1406 8683 8296 5579 2177 8264 1425 3940 957 3647 515 5342 8363 2449 3108 1001 2937 3452 5574 4319 9184 8381 945 6876 600 5714 4871 8532 1852 8856 392 2018 8878 369 5711 9230 5304 7266 1681 7829 2309 4683 8938 2255 6159 3207 4651 2029 4341 5106 5794 9024 4712 2434 7151 7359 6431 1290 5918 8705 3438 5554 8876 7415 6290 5373 3805 2950 2331 6772 8997 6576 2307 8515 4033 3428 6487 6595 45 5792 333 762 2383 3388 666 2166 460 943 364 6980 8223 8221 637 6218 4108 5381 4649 5096 1614 8768 5095 3809 5030 984 3538 5120 2498 5222 5613 5486 5119 241 5707 9227 544 4109 7771 728 3671 9327 1230 9270 1070 8565 4769 7056 5654 7965 1793 5956 7883 1362 5479 8769 8821 8320 1901 1994 2461 5552 389 2839 6467 2762 4763 3499 1487 7599 4488 3241 8272 1131 4496 7006 7265 4897 2747 6618 5291 4563 5146 1939 6369 8548 6163 5526 4068 9030 5349 8433 748 1477 4265 9200 3878 462 6846 9040 4806 3519 6798 5464 5179 546 6044 8114 7216 6276 1495 494 8146 5434 856 8403 8071 3972 5544 3337 6855 1546 2824 1718 6009 2042 251 9076 3330 5004 192 4717 3797 1146 394 7814 7699 4659 4689 4156 7903 9054 7332 7811 1119 5531 6782 5210 8412 2633 7924 4624 8314 5666 3240 2310 4262 8160 4553 8196 2661 7213 7455 7399 870 6126 1227 1226 781 937 6343 2578 2892 4124 2792 5696 6865 6455 8312 5193 6026 5251 3787 4460 4687 7923 1140 9106 796 2482 9170 8695 2749 6734 4825 114 8319 827 4175 390 7611 7484 1249 7727 955 579 3629 8915 2958 885 7227 1424 4810 4604 1535 774 7518 5428 1955 8233 2645 2167 6484 3855 1502 4861 2333 2973 4829 1906 3966 476 9023 6960 3483 2748 5891 8174 7702 8948 5324 4396 1605 2823 7348 7347 5933 310 9082 916 4255 203 4239 5976 6200 6435 4425 787 1121 6034 13 39 3104 5961 5507 5785 1463 7339 1575 7801 5445 8283 5951 6995 999 5163 6023 3786 6536 5850 3524 3528 4508 6674 2939 8227 4598 7550 8495 8622 1152 4538 4003 1318 739 3296 8202 1552 6204 5236 3576 4699 9238 1879 488 2274 433 5587 1678 9282 7914 8552 6445 7971 8331 6880 7476 7282 1570 7271 3827 6489 8091 9287 7351 1765 5286 6921 542 1762 8553 4987 894 3622 7855 92 3131 4811 3590 6517 4510 733 4954 1360 5669 2842 8107 5646 5968 1618 1827 7709 8521 5807 5321 9239 5501 3745 4437 1586 7273 5265 6605 7917 1607 6074 4668 7061 1580 8694 8461 4573 618 9173 5243 435 8770 2421 7450 3870 8308 2605 2934 9240 6887 4512 1198 7585 7691 7738 2843 8423 7929 6971 7854 86 9128 4298 622 790 9155 6579 2203 7716 1265 8645 3834 1174 7380 623 8936 4306 8082 4312 8661 5753 7243 2768 8155 85 4143 3047 8479 7809 2833 5555 7578 1637 1936 8130 5549 8062 7143 5522 8966 5614 8105 8719 7655 7502 8268 5760 6695 5565 7615 9226 4870 4507 3160 4835 1598 2465 4422 5248 7867 1078 5015 6660 1676 5354 6391 5351 7184 6280 5936 6124 1327 2906 269 8292 2466 8809 5167 8142 8204 2713 1910 2930 2494 5592 7384 7726 5727 625 1735 5710 5518 2491 1410 4989 5183 8777 6562 4947 3692 6129 384 1097 2084 5209 3723 7272 6895 2459 543 8621 5394 6211 2074 1511 2524 7776 5055 7191 6207 7922 281 8436 2918 3141 4800 6323 7631 8903 2716 3735 3012 5301 3975 2800 7963 105 1920 7391 4909 1754 4816 5488 5145 5098 5139 5268 9317 8631 4346 7318 136 3993 1220 2151 308 7483 7582 3071 1339 3777 8191 5378 7087 1056 7465 5608 6564 512 2754 2687 1596 5376 1512 566 6382 7360 1757 8035 2296 4264 3551 1053 4716 1537 8518 254 6253 7132 8557 3490 9267 5473 2412 7539 7136 6670 3974 891 1323 5958 1217 2879 9118 1259 2317 7033 2467 6665 6244 2180 2140 7098 5126 6395 4150 547 4120 4307 1725 2737 8549 8195 1245 6286 935 1756 1701 1626 7379 3492 3717 5802 2817 1234 1005 4101 21 2576 4650 3381 1030 2844 1641 936 2729 6469 8913 8369 5994 341 81 4083 1685 5152 3380 8739 6615 3829 164 7927 4779 829 4216 8528 3641 4606 2769 6970 1545 8850 4971 5489 2008 4564 8682 7784 5768 9252 901 438 3577 2765 5904 664 3348 6298 3602 2502 8617 7684 4293 5166 5805 4126 2451 6906 7234 9243 3778 2940 1087 9053 5026 2504 5283 2820 4242 797 3925 1383 8750 7861 1403 6973 7617 968 3065 5395 4347 8144 2688 6527 8597 8673 7327 6331 1422 7115 244 7013 2092 54 7970 5742 3464 4823 8588 2938 3060 6406 4149 2375 6616 8803 1555 4369 1380 3011 6144 3367 4990 7370 7131 1995 2602 985 8785 8480 9125 1927 3269 3771 1032 7378 6900 5726 2731 2020 4503 3313 6727 8793 2304 523 6036 58 7993 5512 5049 2721 8482 673 7937 1168 4472 8247 7287 9017 6421 9190 3584 1819 1792 2810 6033 638 6749 7677 981 7160 4726 1886 7845 7911 6975 568 7422 4613 4501 2569 4263 3206 4133 2420 3706 8894 2263 5774 4925 9180 8888 2945 2091 1873 6303 729 6728 2156 3267 1860 6597 1374 4930 5253 938 580 5825 4839 166 8198 6892 8701 74 7094 7284 8954 3156 6140 4279 5594 2229 7535 5466 8413 7105 8192 2632 7638 9308 8530 832 4643 2201 3268 4322 6510 2967 262 403 7973 1258 8828 4036 5838 9263 8529 2788 4202 237 3838 1291 2305 4056 5628 7281 1430 6476 7935 2850 6041 2013 4016 4576 5312 6827 6321 8669 8439 830 1942 1519 2750 6106 6993 6235 5899 7313 5331 4371 7086 4399 8600 2660 5409 3465 5499 6231 5745 1801 5337 4468 1451 4192 1275 8230 2302 1114 4960 8860 3900 6468 5058 1505 8868 5588 3858 1947 2565 1472 8499 243 8442 6583 7085 5374 2250 4291 4426 492 2311 8305 3662 5338 8780 7488 3890 5005 2442 4680 7358 9116 4397 5999 587 7902 83 3566 2134 8942 4767 6601 2456 1745 5736 5254 8017 4015 7690 3798 8947 1067 2116 7945 590 2547 2535 64 2053 5359 2493 6669 4351 6412 7473 6147 7175 6983 5196 745 2657 3497 697 3161 7528 2239 5991 3201 7681 2440 5189 2959 2044 8917 2046 6313 6333 5318 2763 4301 2555 2213 2933 4121 1340 3903 4392 7889 5323 1055 707 3857 518 6078 5134 6645 9138 1592 680 4446 7943 3461 3887 5601 2321 6621 558 4914 913 5637 6453 8511 4531 1218 5508 2603 6802 8426 8297 2947 5971 6552 5262 5935 782 7435 8357 6139 1136 1473 5008 3585 3627 2914 5356 2997 2347 881 5652 4849 8808 8351 4017 2010 6836 7616 4391 3630 3712 6099 2969 5238 4333 2301 4406 1236 1050 1864 1104 8408 8251 8795 5879 3365 7481 8206 2452 1767 8859 124 3948 4444 8962 4438 5003 1740 8428 3105 5117 1095 1480 8755 7881 3097 4877 155 1917 2455 6042 337 6724 6045 8483 7135 2242 4566 1679 834 1746 795 3548 2314 2036 4046 9129 6979 7084 5091 2413 8170 5775 1817 529 7220 813 2916 5130 8972 126 1243 2370 4831 9122 3010 5104 2613 6761 7482 909 2146 4595 5340 3512 6283 2346 653 6121 2615 7421 1869 1002 8834 2991 8992 632 1093 4543 645 2352 4115 373 1483 6966 8598 3896 3434 5987 8318 1815 1223 1548 6885 5073 6330 2573 1369 4095 1431 2185 5766 1301 7258 8048 7598 2847 1996 2378 8561 743 6381 271 1956 7439 7596 7134 6636 5804 1858 6214 4730 8536 1203 3118 9202 1875 5885 8975 168 5898 4014 4186 3346 3041 5558 9296 8157 4339 3234 1738 2604 6803 5387 5590 125 2173 8012 8005 4858 3069 651 372 378 8366 6299 1449 7793 8541 3235 8043 3086 3983 6949 4690 2176 6494 7637 8406 3856 7408 350 7021 8224 7044 7662 6697 7679 169 528 7029 2790 7138 7432 7602 8333 1582 1378 519 482 9279 8015 6592 4514 3542 2612 628 5053 6699 6227 2094 1621 847 3598 2728 8490 7276 6620 8345 9216 4278 4059 9058 5063 5816 4173 8134 1997 3182 3224 8129 5109 4494 189 7640 8243 180 2963 1123 5593 3263 4185 7140 8990 6320 9275 4601 4854 5907 1135 8083 5964 7788 1992 8069 9174 6160 35 8572 2865 46 3952 6418 2510 5783 20 3816 2715 3930 2548 5204 4122 4103 708 7756 3825 777 3550 8502 3929 5440 6751 7764 4070 7331 3743 9131 9206 3828 23 41 4197 234 5723 7622 8832 4626 2169 5599 2976 5266 1967 1150 5334 90 822 2538 3169 6771 7442 498 4967 5580 7581 7680 4728 1115 4040 1064 3106 6266 4415 9294 5597 7059 197 7218 6948 5690 4234 1653 4485 4019 3370 919 1330 6085 2078 3768 5427 4545 2435 8862 3633 8145 5221 1388 5913 8140 7471 7156 6989 1190 6832 2830 4387 3454 7469 2910 4526 5187 2410 9223 6247 6912 4681 1300 7407 8612 6523 3616 6894 7253 4515 5874 5448 7137 7957 1130 3092 7054 3516 5797 1000 2727 4336 9090 6403 7255 8919 6522 6760 8898 4803 1938 374 8686 9150 3985 7045 3475 6065 7991 1409 7851 6671 6090 5826 7857 1155 8964 1117 7072 6064 2497 4899 2397 3189 2369 15 5027 5754 8950 5617 8391 914 6264 279 6174 5184 3733 7392 5278 2924 567 7994 352 8084 2148 2723 3359 70 1870 7708 220 3994 9013 3191 9220 4155 5717 1110 2198 9179 785 5325 4770 4250 52 4634 5072 9037 601 8036 7996 2483 7232 8675 8836 1279 5346 7676 6104 1515 4603 5607 7894 5144 2628 68 440 3586 3083 4830 4378 7762 1134 4542 7850 6296 2866 4011 8751 4776 7954 7102 5697 2032 5729 5017 6962 2051 1092 764 9019 2759 8581 1484 8618 912 2382 4892 8447 8176 5491 5695 5504 1060 7064 709 578 4320 2379 7649 8416 1613 5344 7512 7865 3037 6689 6557 1569 5955 3707 9168 8566 1775 5950 6943 7804 434 6179 9300 1142 7947 6456 6291 5789 6538 9134 3049 5075 5399 5161 1623 948 6302 6063 7516 117 506 3302 7146 355 3854 1081 2827 1496 2574 6167 3183 4287 5482 1722 7319 7277 3860 3443 3298 8364 3826 7254 2360 5093 7039 6325 4230 2567 6241 4443 559 2625 4228 8967 6405 1674 3936 4475 8556 8585 896 3713 6259 4297 6718 2392 2279 4927 1283 2374 2860 7665 663 596 6293 6805 2811 7383 8306 8330 3153 2153 2618 2441 3615 8092 552 5285 5255 8124 9247 5530 8175 6242 5660 3433 1610 1832 3892 3862 640 2127 2474 4196 3495 7217 5206 4836 7759 4376 800 4227 3699 9055 5665 6826 7463 9065 4720 5069 3245 3453 3358 6532 5970 7921 4087 1547 3424 8040 7995 6787 9069 8716 2561 8199 1479 2767 7818 7145 604 7597 4896 9281 4666 185 8171 7978 3059 9196 9221 2135 1800 2974 1529 5948 446 4436 8672 3508 6208 5673 6998 5203 278 7041 9110 5853 8121 1764 3046 2400 6575 4738 2228 7761 9322 7019 6931 6383 6762 283 3935 2534 7717 6785 471 8214 231 4241 5310 3844 5746 2011 7209 336 6433 756 9167 6741 3345 7685 4018 6682 9147 4790 5836 5906 8747 676 3964 6362 3510 7510 2308 1806 5917 1189 4012 3387 1331 5319 5423 8900 147 3780 1696 9111 6783 6497 4104 1898 3987 260 4616 2121 9283 1400 2437 4670 2735 1163 2096 6521 1423 4523 2243 6667 6990 3944 6915 6763 5611 404 2691 1015 7092 7562 8624 2291 4193 5934 5503 2326 4408 2960 842 1963 3354 5568 9050 3806 439 9154 6055 6451 2190 7633 688 4354 8890 2813 2872 8102 8317 6609 1497 8389 6449 1682 3594 5103 5812 863 268 3054 8079 2260 2027 3091 7687 6703 3557 2019 8427 2799 8182 6641 3168 2284 1934 4865 1077 6507 1658 3811 1774 7897 2238 2943 191 3869 3246 4057 3188 414 8072 7838 1382 4962 6010 5363 4042 1983 4077 7429 1833 3583 4044 1109 1295 386 5481 3927 311 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/got10k_val_split.txt ================================================ 1349 5651 5878 562 2202 8904 765 1501 8654 2975 2689 3680 5180 1900 7707 4723 8912 4029 3579 869 2888 8657 6599 741 4288 2244 7357 5704 8791 208 8587 7969 4805 8526 4887 8871 7468 3343 886 7794 5764 2646 6454 6101 7885 7744 1297 4119 4856 122 2286 2925 5131 3570 5843 3027 5320 5626 540 1862 5401 7335 699 7760 9198 3259 7345 8698 1280 6479 3100 3988 1322 5737 1268 3257 6791 3326 4815 7644 1082 2826 6821 8984 2553 5290 5909 4762 9242 8096 8066 4325 6666 7193 7114 8060 2376 7872 6788 3544 5460 3507 2509 6626 3429 5542 4220 2968 5271 4249 3863 1868 5581 2012 6270 8038 4050 121 2845 1565 1998 2275 5524 6068 7624 4913 9277 1506 803 8848 5925 2450 2072 8190 4753 9162 1923 825 7303 9028 2088 8516 1556 5937 7847 2367 7549 1049 1521 4739 3931 8958 4130 7877 7876 897 5985 7346 7537 111 3700 1126 7896 1288 3419 4673 1051 5720 1068 3458 146 291 6256 5514 2857 4580 6239 6525 8717 391 4841 6676 4360 1453 4211 73 1675 1987 4025 1321 662 8265 6424 2758 7765 7656 3209 7497 7600 9039 7697 5177 2983 5622 9295 1200 3284 964 2024 1269 4551 8088 5659 2212 5199 5551 8607 5573 2247 5200 6341 7951 8429 7720 5919 1273 3529 6707 9176 7552 3255 5649 6110 9235 1137 9272 775 788 5786 5186 6746 2667 9145 7630 3953 1828 8827 6471 4702 7815 467 6387 3195 6238 6508 2373 5983 4931 2948 921 2438 517 3949 2137 3216 5683 3695 1719 4837 9159 6981 860 7410 5497 1770 5557 8810 5194 4857 9100 6329 2609 1925 3686 9041 4924 349 9187 3393 3661 7120 6858 4587 3831 3130 5396 5060 6486 3937 8023 824 5398 1354 8861 5534 7292 4389 6029 6226 3505 4326 7445 581 6089 3450 7324 6516 6775 1207 4575 5135 9265 3918 9020 3473 3898 7812 6571 6757 6639 2557 1206 6148 7325 8790 4938 7026 4383 8041 1250 7267 1952 7561 8811 4941 8373 4848 6602 8355 8104 5214 6654 4330 995 3181 3422 456 1782 3408 6530 719 7587 5910 3058 740 2009 4207 5336 2798 9229 8668 2473 4221 1493 3281 171 9157 9139 7766 6220 9127 3324 5308 3708 2431 8080 2093 2585 406 7040 5064 5247 4758 6512 2953 4257 4935 2705 2572 3436 8513 5884 1385 4852 2637 7091 2761 6007 8332 6694 2422 4917 2186 6898 1390 6965 3132 7698 475 2002 2692 5024 7365 7373 4091 1731 947 3962 8692 1788 8734 8656 6862 6856 1950 1914 5658 3635 1620 4780 2580 1454 2786 687 7238 3648 6452 1197 3190 5900 9043 4958 1935 1821 1187 1153 7737 7223 3820 7169 7350 5674 6254 3025 6680 1690 2899 3893 1577 5728 9189 5077 34 3560 2179 5462 1402 3654 1376 7936 4246 5506 1179 5647 4686 8644 1352 2855 6079 2254 2668 2287 2457 3418 7264 677 3074 2655 1042 2210 4504 7089 8309 4209 4280 3258 2977 84 4705 1244 3511 6355 8813 3228 9266 1122 613 732 5202 8425 2638 6470 2886 3541 8132 2063 8201 5129 2818 7949 6936 8090 4465 7295 5239 7009 9271 8563 2832 952 8136 6776 3565 5188 7288 6999 285 5487 7763 7608 8584 2071 7868 2804 3655 7048 6847 3276 4082 4272 3910 3709 1574 4559 7580 7081 5014 7769 8183 6386 7574 356 4937 2487 9315 7572 3040 671 2682 8626 3868 8623 387 8679 4074 1481 3527 3595 4754 2453 1579 4638 9123 1829 316 3009 3691 763 4875 3572 4642 3128 4273 2777 6032 4793 233 7147 996 3199 8835 3517 7210 6125 6037 3684 8589 3915 3095 8310 3180 7043 4458 2889 57 4483 7667 8375 1434 7493 6986 4733 8471 5827 2111 1313 7986 3075 2614 7547 4977 8527 3212 7300 5842 5244 3291 597 1007 2030 227 3830 5540 247 5643 9333 1958 3096 1371 5220 7926 2927 1516 7130 193 1522 6165 6923 3794 4223 5535 2472 8630 3971 9101 2946 222 4609 7291 8542 6501 7548 4557 6274 1010 5226 7309 1317 9056 6275 1624 1099 4191 4030 7270 5392 2316 3819 1670 8154 8045 4807 8864 2391 5908 8338 8218 6400 9193 3165 843 6613 6941 4380 9332 5629 7557 4321 3702 681 734 1159 4665 5959 1697 5509 8774 7389 3832 3751 8637 3079 1680 6841 703 684 8293 3682 5733 4818 3231 3078 5562 9001 3889 7024 2519 1713 3287 219 6021 8776 2289 7212 4832 4684 4617 4237 2649 8185 6326 3568 551 1426 4181 8869 312 2905 4165 8248 2558 900 1044 8613 7743 5437 7604 3122 5708 8649 2878 4695 4491 1929 7533 5223 7711 915 1844 5751 3008 8055 961 6142 4636 61 198 2271 5698 4596 4500 5709 5819 7972 2992 1643 1048 6281 8886 360 4198 1841 6814 3960 2606 7001 5888 450 7133 7015 7034 5153 8920 5066 469 1302 8816 463 8651 5869 8193 6582 5578 1231 9274 7260 7751 8052 6799 2089 2342 8451 3260 5550 7795 2288 1205 40 496 8367 7836 5973 3908 5242 5062 2706 997 6514 5419 9201 1965 6062 3050 5302 8735 358 2398 7470 1644 8179 7047 1549 5414 2539 7381 589 8166 8505 6035 3956 4540 6721 8074 1062 2384 2531 7159 3502 3902 4584 2554 264 8720 2849 4916 5218 7202 883 4560 1677 4317 7863 4509 6577 2903 1452 1416 5369 473 6233 6359 5992 4934 8059 6834 4907 3320 8267 8280 2066 2402 1485 3772 3732 4764 9126 3575 5564 4768 5641 1884 2330 1804 344 698 3089 1532 4454 761 7289 8094 3432 1747 6811 8722 8826 4646 3222 8614 2901 7003 652 8663 4266 413 810 75 3334 4905 6438 4756 5137 6528 6534 6988 6177 8533 889 5384 7201 5132 7802 6864 3973 873 4840 1482 8376 3769 5858 6675 4286 2593 5863 4353 7817 7540 4999 4838 2303 6002 7913 1508 5317 7755 2784 4964 3431 6209 3755 6022 6399 6232 3954 455 5416 6448 1558 7591 245 140 9210 6585 4084 967 7798 6795 7095 6733 3861 9264 361 1045 755 8042 7074 7778 6415 4724 6450 2049 1563 1307 3485 1790 7869 3282 6907 3920 2868 5801 5632 1079 5009 3955 7517 5128 3417 3019 2725 1784 2312 2753 6976 342 8266 1849 2273 5037 7880 3793 7401 5412 8279 1257 3670 9049 3266 8955 6519 8916 2858 694 5650 1019 4669 1785 3533 5877 2704 8603 3726 6668 497 1085 6815 6157 6646 6964 186 8097 5645 8481 8215 3775 2542 7514 5699 4072 3518 5767 3239 3740 1404 8981 4086 6397 6984 4204 6899 682 6589 3317 2944 3456 4340 7424 9208 6504 4409 1 145 1882 4620 2634 4992 5453 4481 3377 266 7875 530 1235 7605 504 1771 8489 345 7353 7797 7174 5914 2871 5721 6067 3582 7653 5467 6234 691 8758 2122 1213 2908 1492 1437 2187 1266 2395 7278 8491 5256 1554 8163 5966 7128 7904 1691 6272 1264 3996 1706 1334 1316 6478 6935 1518 6700 8703 8744 8152 8778 5367 4218 9007 6312 606 7565 5293 2891 675 2125 2120 826 7008 5705 7748 8010 1498 5330 5472 2215 7627 3016 6588 1850 4128 8569 6987 7566 148 8151 8789 7907 8596 715 6018 9060 3872 1750 5889 4047 5960 3120 3449 1421 1102 3333 9197 8796 8123 8007 2028 8404 1945 1985 8109 5380 8438 3504 6739 4180 5835 4243 25 4002 1976 3482 8392 158 5181 4885 8985 11 6872 6425 5926 7062 5083 8394 4259 5844 1990 3942 5532 2220 28 5957 149 6748 1663 3559 7647 2566 1359 8787 5259 7010 554 8231 4229 6005 8172 8125 1350 3571 9051 1973 1386 1781 5788 159 7007 3220 1846 3093 4445 2056 8370 3211 1113 4384 2231 273 4276 642 7663 5311 265 226 9012 7879 118 7109 7251 1760 8667 2876 7162 3552 6901 6779 5021 6524 4957 3114 4544 441 1848 2136 2458 8662 1127 5541 3026 1080 6780 2224 8259 1073 9000 7244 7977 500 4435 7376 7979 1435 9291 7704 3791 3521 210 7388 1039 6269 4052 8570 3285 564 8039 3546 6203 1183 6107 4147 6216 2234 7185 3192 7155 2001 7777 876 944 908 7791 5465 6784 65 9172 5675 7075 3886 7891 2978 1008 5630 591 5067 1139 577 9015 574 8137 7786 5765 4900 4090 7842 5741 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/got10k_vot_exclude.txt ================================================ GOT-10k_Train_000004 GOT-10k_Train_000013 GOT-10k_Train_000015 GOT-10k_Train_000020 GOT-10k_Train_000024 GOT-10k_Train_000034 GOT-10k_Train_000038 GOT-10k_Train_000048 GOT-10k_Train_000051 GOT-10k_Train_000059 GOT-10k_Train_000077 GOT-10k_Train_000081 GOT-10k_Train_000089 GOT-10k_Train_000093 GOT-10k_Train_000094 GOT-10k_Train_000096 GOT-10k_Train_000104 GOT-10k_Train_000107 GOT-10k_Train_000108 GOT-10k_Train_000120 GOT-10k_Train_000132 GOT-10k_Train_000170 GOT-10k_Train_000186 GOT-10k_Train_000212 GOT-10k_Train_000213 GOT-10k_Train_000222 GOT-10k_Train_000223 GOT-10k_Train_000240 GOT-10k_Train_000246 GOT-10k_Train_000249 GOT-10k_Train_000266 GOT-10k_Train_000268 GOT-10k_Train_000287 GOT-10k_Train_000293 GOT-10k_Train_000305 GOT-10k_Train_000316 GOT-10k_Train_000319 GOT-10k_Train_000322 GOT-10k_Train_000331 GOT-10k_Train_000334 GOT-10k_Train_000354 GOT-10k_Train_000361 GOT-10k_Train_000368 GOT-10k_Train_000382 GOT-10k_Train_000401 GOT-10k_Train_000417 GOT-10k_Train_000448 GOT-10k_Train_000454 GOT-10k_Train_000458 GOT-10k_Train_000466 GOT-10k_Train_000475 GOT-10k_Train_000484 GOT-10k_Train_000488 GOT-10k_Train_000501 GOT-10k_Train_000510 GOT-10k_Train_000512 GOT-10k_Train_000519 GOT-10k_Train_000539 GOT-10k_Train_000544 GOT-10k_Train_000555 GOT-10k_Train_000564 GOT-10k_Train_000568 GOT-10k_Train_000583 GOT-10k_Train_000587 GOT-10k_Train_000593 GOT-10k_Train_000621 GOT-10k_Train_000624 GOT-10k_Train_000625 GOT-10k_Train_000638 GOT-10k_Train_000648 GOT-10k_Train_000654 GOT-10k_Train_000669 GOT-10k_Train_000701 GOT-10k_Train_000709 GOT-10k_Train_000712 GOT-10k_Train_000731 GOT-10k_Train_000734 GOT-10k_Train_000737 GOT-10k_Train_000744 GOT-10k_Train_000746 GOT-10k_Train_000748 GOT-10k_Train_000762 GOT-10k_Train_000764 GOT-10k_Train_000765 GOT-10k_Train_000766 GOT-10k_Train_000767 GOT-10k_Train_000775 GOT-10k_Train_000783 GOT-10k_Train_000790 GOT-10k_Train_000829 GOT-10k_Train_000857 GOT-10k_Train_000859 GOT-10k_Train_000867 GOT-10k_Train_000872 GOT-10k_Train_000880 GOT-10k_Train_000884 GOT-10k_Train_000909 GOT-10k_Train_000915 GOT-10k_Train_000922 GOT-10k_Train_000928 GOT-10k_Train_000933 GOT-10k_Train_000941 GOT-10k_Train_000961 GOT-10k_Train_000966 GOT-10k_Train_000968 GOT-10k_Train_000971 GOT-10k_Train_000972 GOT-10k_Train_000995 GOT-10k_Train_001003 GOT-10k_Train_001010 GOT-10k_Train_001011 GOT-10k_Train_001019 GOT-10k_Train_001021 GOT-10k_Train_001035 GOT-10k_Train_001039 GOT-10k_Train_001047 GOT-10k_Train_001057 GOT-10k_Train_001069 GOT-10k_Train_001077 GOT-10k_Train_001079 GOT-10k_Train_001085 GOT-10k_Train_001088 GOT-10k_Train_001091 GOT-10k_Train_001104 GOT-10k_Train_001112 GOT-10k_Train_001113 GOT-10k_Train_001124 GOT-10k_Train_001128 GOT-10k_Train_001143 GOT-10k_Train_001145 GOT-10k_Train_001146 GOT-10k_Train_001148 GOT-10k_Train_001150 GOT-10k_Train_001154 GOT-10k_Train_001156 GOT-10k_Train_001157 GOT-10k_Train_001163 GOT-10k_Train_001181 GOT-10k_Train_001184 GOT-10k_Train_001189 GOT-10k_Train_001200 GOT-10k_Train_001225 GOT-10k_Train_001264 GOT-10k_Train_001288 GOT-10k_Train_001296 GOT-10k_Train_001298 GOT-10k_Train_001299 GOT-10k_Train_001314 GOT-10k_Train_001319 GOT-10k_Train_001329 GOT-10k_Train_001331 GOT-10k_Train_001340 GOT-10k_Train_001374 GOT-10k_Train_001384 GOT-10k_Train_001394 GOT-10k_Train_001407 GOT-10k_Train_001415 GOT-10k_Train_001430 GOT-10k_Train_001433 GOT-10k_Train_001453 GOT-10k_Train_001457 GOT-10k_Train_001471 GOT-10k_Train_001473 GOT-10k_Train_001480 GOT-10k_Train_001484 GOT-10k_Train_001489 GOT-10k_Train_001514 GOT-10k_Train_001537 GOT-10k_Train_001544 GOT-10k_Train_001545 GOT-10k_Train_001551 GOT-10k_Train_001558 GOT-10k_Train_001560 GOT-10k_Train_001562 GOT-10k_Train_001563 GOT-10k_Train_001570 GOT-10k_Train_001576 GOT-10k_Train_001604 GOT-10k_Train_001615 GOT-10k_Train_001617 GOT-10k_Train_001618 GOT-10k_Train_001619 GOT-10k_Train_001624 GOT-10k_Train_001650 GOT-10k_Train_001651 GOT-10k_Train_001663 GOT-10k_Train_001673 GOT-10k_Train_001685 GOT-10k_Train_001692 GOT-10k_Train_001700 GOT-10k_Train_001722 GOT-10k_Train_001731 GOT-10k_Train_001732 GOT-10k_Train_001738 GOT-10k_Train_001740 GOT-10k_Train_001742 GOT-10k_Train_001747 GOT-10k_Train_001759 GOT-10k_Train_001769 GOT-10k_Train_001781 GOT-10k_Train_001791 GOT-10k_Train_001794 GOT-10k_Train_001795 GOT-10k_Train_001818 GOT-10k_Train_001833 GOT-10k_Train_001836 GOT-10k_Train_001841 GOT-10k_Train_001852 GOT-10k_Train_001863 GOT-10k_Train_001865 GOT-10k_Train_001878 GOT-10k_Train_001898 GOT-10k_Train_001919 GOT-10k_Train_001923 GOT-10k_Train_001929 GOT-10k_Train_001935 GOT-10k_Train_001938 GOT-10k_Train_001942 GOT-10k_Train_001955 GOT-10k_Train_001964 GOT-10k_Train_001966 GOT-10k_Train_001982 GOT-10k_Train_002005 GOT-10k_Train_002009 GOT-10k_Train_002035 GOT-10k_Train_002068 GOT-10k_Train_002073 GOT-10k_Train_002076 GOT-10k_Train_002084 GOT-10k_Train_002112 GOT-10k_Train_002115 GOT-10k_Train_002116 GOT-10k_Train_002123 GOT-10k_Train_002125 GOT-10k_Train_002129 GOT-10k_Train_002139 GOT-10k_Train_002146 GOT-10k_Train_002166 GOT-10k_Train_002168 GOT-10k_Train_002176 GOT-10k_Train_002184 GOT-10k_Train_002190 GOT-10k_Train_002192 GOT-10k_Train_002211 GOT-10k_Train_002216 GOT-10k_Train_002233 GOT-10k_Train_002240 GOT-10k_Train_002247 GOT-10k_Train_002250 GOT-10k_Train_002252 GOT-10k_Train_002253 GOT-10k_Train_002261 GOT-10k_Train_002274 GOT-10k_Train_002276 GOT-10k_Train_002292 GOT-10k_Train_002302 GOT-10k_Train_002304 GOT-10k_Train_002305 GOT-10k_Train_002320 GOT-10k_Train_002345 GOT-10k_Train_002355 GOT-10k_Train_002359 GOT-10k_Train_002363 GOT-10k_Train_002374 GOT-10k_Train_002376 GOT-10k_Train_002389 GOT-10k_Train_002393 GOT-10k_Train_002400 GOT-10k_Train_002408 GOT-10k_Train_002418 GOT-10k_Train_002437 GOT-10k_Train_002440 GOT-10k_Train_002442 GOT-10k_Train_002454 GOT-10k_Train_002456 GOT-10k_Train_002465 GOT-10k_Train_002466 GOT-10k_Train_002474 GOT-10k_Train_002479 GOT-10k_Train_002484 GOT-10k_Train_002511 GOT-10k_Train_002514 GOT-10k_Train_002517 GOT-10k_Train_002523 GOT-10k_Train_002527 GOT-10k_Train_002534 GOT-10k_Train_002555 GOT-10k_Train_002587 GOT-10k_Train_002589 GOT-10k_Train_002612 GOT-10k_Train_002627 GOT-10k_Train_002639 GOT-10k_Train_002652 GOT-10k_Train_002693 GOT-10k_Train_002699 GOT-10k_Train_002716 GOT-10k_Train_002725 GOT-10k_Train_002727 GOT-10k_Train_002730 GOT-10k_Train_002755 GOT-10k_Train_002756 GOT-10k_Train_002760 GOT-10k_Train_002763 GOT-10k_Train_002837 GOT-10k_Train_002841 GOT-10k_Train_002856 GOT-10k_Train_002862 GOT-10k_Train_002863 GOT-10k_Train_002866 GOT-10k_Train_002877 GOT-10k_Train_002884 GOT-10k_Train_002886 GOT-10k_Train_002887 GOT-10k_Train_002907 GOT-10k_Train_002908 GOT-10k_Train_002909 GOT-10k_Train_002914 GOT-10k_Train_002920 GOT-10k_Train_002922 GOT-10k_Train_002936 GOT-10k_Train_002940 GOT-10k_Train_002944 GOT-10k_Train_002953 GOT-10k_Train_002961 GOT-10k_Train_002964 GOT-10k_Train_002996 GOT-10k_Train_003003 GOT-10k_Train_003004 GOT-10k_Train_003007 GOT-10k_Train_003012 GOT-10k_Train_003027 GOT-10k_Train_003028 GOT-10k_Train_003033 GOT-10k_Train_003034 GOT-10k_Train_003036 GOT-10k_Train_003044 GOT-10k_Train_003056 GOT-10k_Train_003069 GOT-10k_Train_003078 GOT-10k_Train_003079 GOT-10k_Train_003095 GOT-10k_Train_003096 GOT-10k_Train_003107 GOT-10k_Train_003108 GOT-10k_Train_003127 GOT-10k_Train_003128 GOT-10k_Train_003129 GOT-10k_Train_003132 GOT-10k_Train_003146 GOT-10k_Train_003155 GOT-10k_Train_003173 GOT-10k_Train_003208 GOT-10k_Train_003239 GOT-10k_Train_003245 GOT-10k_Train_003246 GOT-10k_Train_003262 GOT-10k_Train_003275 GOT-10k_Train_003283 GOT-10k_Train_003296 GOT-10k_Train_003308 GOT-10k_Train_003310 GOT-10k_Train_003313 GOT-10k_Train_003317 GOT-10k_Train_003318 GOT-10k_Train_003354 GOT-10k_Train_003379 GOT-10k_Train_003384 GOT-10k_Train_003396 GOT-10k_Train_003401 GOT-10k_Train_003423 GOT-10k_Train_003435 GOT-10k_Train_003438 GOT-10k_Train_003442 GOT-10k_Train_003444 GOT-10k_Train_003455 GOT-10k_Train_003456 GOT-10k_Train_003464 GOT-10k_Train_003466 GOT-10k_Train_003474 GOT-10k_Train_003482 GOT-10k_Train_003488 GOT-10k_Train_003502 GOT-10k_Train_003515 GOT-10k_Train_003520 GOT-10k_Train_003530 GOT-10k_Train_003551 GOT-10k_Train_003570 GOT-10k_Train_003571 GOT-10k_Train_003578 GOT-10k_Train_003583 GOT-10k_Train_003590 GOT-10k_Train_003593 GOT-10k_Train_003618 GOT-10k_Train_003626 GOT-10k_Train_003650 GOT-10k_Train_003652 GOT-10k_Train_003663 GOT-10k_Train_003690 GOT-10k_Train_003704 GOT-10k_Train_003709 GOT-10k_Train_003716 GOT-10k_Train_003721 GOT-10k_Train_003722 GOT-10k_Train_003724 GOT-10k_Train_003729 GOT-10k_Train_003756 GOT-10k_Train_003768 GOT-10k_Train_003782 GOT-10k_Train_003786 GOT-10k_Train_003788 GOT-10k_Train_003791 GOT-10k_Train_003820 GOT-10k_Train_003821 GOT-10k_Train_003827 GOT-10k_Train_003834 GOT-10k_Train_003835 GOT-10k_Train_003839 GOT-10k_Train_003843 GOT-10k_Train_003854 GOT-10k_Train_003856 GOT-10k_Train_003881 GOT-10k_Train_003899 GOT-10k_Train_003904 GOT-10k_Train_003906 GOT-10k_Train_003913 GOT-10k_Train_003937 GOT-10k_Train_003940 GOT-10k_Train_003943 GOT-10k_Train_003950 GOT-10k_Train_003972 GOT-10k_Train_003974 GOT-10k_Train_003978 GOT-10k_Train_003981 GOT-10k_Train_003982 GOT-10k_Train_004003 GOT-10k_Train_004004 GOT-10k_Train_004008 GOT-10k_Train_004012 GOT-10k_Train_004013 GOT-10k_Train_004030 GOT-10k_Train_004036 GOT-10k_Train_004040 GOT-10k_Train_004052 GOT-10k_Train_004054 GOT-10k_Train_004055 GOT-10k_Train_004057 GOT-10k_Train_004063 GOT-10k_Train_004068 GOT-10k_Train_004072 GOT-10k_Train_004075 GOT-10k_Train_004078 GOT-10k_Train_004082 GOT-10k_Train_004102 GOT-10k_Train_004103 GOT-10k_Train_004105 GOT-10k_Train_004111 GOT-10k_Train_004120 GOT-10k_Train_004122 GOT-10k_Train_004124 GOT-10k_Train_004142 GOT-10k_Train_004158 GOT-10k_Train_004170 GOT-10k_Train_004175 GOT-10k_Train_004181 GOT-10k_Train_004190 GOT-10k_Train_004193 GOT-10k_Train_004194 GOT-10k_Train_004199 GOT-10k_Train_004202 GOT-10k_Train_004217 GOT-10k_Train_004225 GOT-10k_Train_004229 GOT-10k_Train_004230 GOT-10k_Train_004234 GOT-10k_Train_004241 GOT-10k_Train_004246 GOT-10k_Train_004249 GOT-10k_Train_004255 GOT-10k_Train_004268 GOT-10k_Train_004276 GOT-10k_Train_004292 GOT-10k_Train_004293 GOT-10k_Train_004295 GOT-10k_Train_004296 GOT-10k_Train_004302 GOT-10k_Train_004324 GOT-10k_Train_004337 GOT-10k_Train_004342 GOT-10k_Train_004351 GOT-10k_Train_004356 GOT-10k_Train_004376 GOT-10k_Train_004380 GOT-10k_Train_004395 GOT-10k_Train_004398 GOT-10k_Train_004399 GOT-10k_Train_004408 GOT-10k_Train_004430 GOT-10k_Train_004439 GOT-10k_Train_004440 GOT-10k_Train_004462 GOT-10k_Train_004473 GOT-10k_Train_004476 GOT-10k_Train_004478 GOT-10k_Train_004481 GOT-10k_Train_004483 GOT-10k_Train_004484 GOT-10k_Train_004503 GOT-10k_Train_004513 GOT-10k_Train_004517 GOT-10k_Train_004533 GOT-10k_Train_004536 GOT-10k_Train_004594 GOT-10k_Train_004595 GOT-10k_Train_004607 GOT-10k_Train_004619 GOT-10k_Train_004626 GOT-10k_Train_004642 GOT-10k_Train_004646 GOT-10k_Train_004652 GOT-10k_Train_004658 GOT-10k_Train_004660 GOT-10k_Train_004661 GOT-10k_Train_004668 GOT-10k_Train_004673 GOT-10k_Train_004679 GOT-10k_Train_004694 GOT-10k_Train_004702 GOT-10k_Train_004709 GOT-10k_Train_004717 GOT-10k_Train_004757 GOT-10k_Train_004768 GOT-10k_Train_004824 GOT-10k_Train_004826 GOT-10k_Train_004833 GOT-10k_Train_004839 GOT-10k_Train_004843 GOT-10k_Train_004852 GOT-10k_Train_004862 GOT-10k_Train_004865 GOT-10k_Train_004878 GOT-10k_Train_004880 GOT-10k_Train_004881 GOT-10k_Train_004902 GOT-10k_Train_004906 GOT-10k_Train_004920 GOT-10k_Train_004950 GOT-10k_Train_004951 GOT-10k_Train_004952 GOT-10k_Train_004973 GOT-10k_Train_004983 GOT-10k_Train_004984 GOT-10k_Train_004990 GOT-10k_Train_004993 GOT-10k_Train_004995 GOT-10k_Train_005004 GOT-10k_Train_005007 GOT-10k_Train_005022 GOT-10k_Train_005024 GOT-10k_Train_005040 GOT-10k_Train_005046 GOT-10k_Train_005047 GOT-10k_Train_005058 GOT-10k_Train_005063 GOT-10k_Train_005072 GOT-10k_Train_005097 GOT-10k_Train_005098 GOT-10k_Train_005099 GOT-10k_Train_005108 GOT-10k_Train_005113 GOT-10k_Train_005119 GOT-10k_Train_005126 GOT-10k_Train_005146 GOT-10k_Train_005166 GOT-10k_Train_005191 GOT-10k_Train_005207 GOT-10k_Train_005255 GOT-10k_Train_005269 GOT-10k_Train_005280 GOT-10k_Train_005310 GOT-10k_Train_005317 GOT-10k_Train_005319 GOT-10k_Train_005334 GOT-10k_Train_005338 GOT-10k_Train_005339 GOT-10k_Train_005354 GOT-10k_Train_005364 GOT-10k_Train_005382 GOT-10k_Train_005385 GOT-10k_Train_005389 GOT-10k_Train_005390 GOT-10k_Train_005396 GOT-10k_Train_005398 GOT-10k_Train_005399 GOT-10k_Train_005401 GOT-10k_Train_005413 GOT-10k_Train_005415 GOT-10k_Train_005420 GOT-10k_Train_005457 GOT-10k_Train_005465 GOT-10k_Train_005488 GOT-10k_Train_005493 GOT-10k_Train_005510 GOT-10k_Train_005523 GOT-10k_Train_005538 GOT-10k_Train_005553 GOT-10k_Train_005556 GOT-10k_Train_005575 GOT-10k_Train_005577 GOT-10k_Train_005582 GOT-10k_Train_005594 GOT-10k_Train_005606 GOT-10k_Train_005611 GOT-10k_Train_005636 GOT-10k_Train_005639 GOT-10k_Train_005642 GOT-10k_Train_005651 GOT-10k_Train_005652 GOT-10k_Train_005653 GOT-10k_Train_005681 GOT-10k_Train_005686 GOT-10k_Train_005689 GOT-10k_Train_005701 GOT-10k_Train_005712 GOT-10k_Train_005716 GOT-10k_Train_005724 GOT-10k_Train_005731 GOT-10k_Train_005732 GOT-10k_Train_005734 GOT-10k_Train_005741 GOT-10k_Train_005764 GOT-10k_Train_005767 GOT-10k_Train_005788 GOT-10k_Train_005791 GOT-10k_Train_005800 GOT-10k_Train_005813 GOT-10k_Train_005816 GOT-10k_Train_005830 GOT-10k_Train_005852 GOT-10k_Train_005876 GOT-10k_Train_005877 GOT-10k_Train_005884 GOT-10k_Train_005910 GOT-10k_Train_005929 GOT-10k_Train_005943 GOT-10k_Train_005958 GOT-10k_Train_005995 GOT-10k_Train_006002 GOT-10k_Train_006010 GOT-10k_Train_006018 GOT-10k_Train_006021 GOT-10k_Train_006022 GOT-10k_Train_006040 GOT-10k_Train_006046 GOT-10k_Train_006057 GOT-10k_Train_006075 GOT-10k_Train_006087 GOT-10k_Train_006099 GOT-10k_Train_006115 GOT-10k_Train_006126 GOT-10k_Train_006129 GOT-10k_Train_006142 GOT-10k_Train_006161 GOT-10k_Train_006163 GOT-10k_Train_006193 GOT-10k_Train_006195 GOT-10k_Train_006204 GOT-10k_Train_006206 GOT-10k_Train_006215 GOT-10k_Train_006216 GOT-10k_Train_006220 GOT-10k_Train_006224 GOT-10k_Train_006232 GOT-10k_Train_006241 GOT-10k_Train_006247 GOT-10k_Train_006287 GOT-10k_Train_006300 GOT-10k_Train_006315 GOT-10k_Train_006318 GOT-10k_Train_006322 GOT-10k_Train_006337 GOT-10k_Train_006341 GOT-10k_Train_006344 GOT-10k_Train_006348 GOT-10k_Train_006349 GOT-10k_Train_006363 GOT-10k_Train_006366 GOT-10k_Train_006376 GOT-10k_Train_006378 GOT-10k_Train_006395 GOT-10k_Train_006402 GOT-10k_Train_006406 GOT-10k_Train_006412 GOT-10k_Train_006413 GOT-10k_Train_006427 GOT-10k_Train_006448 GOT-10k_Train_006459 GOT-10k_Train_006464 GOT-10k_Train_006474 GOT-10k_Train_006477 GOT-10k_Train_006482 GOT-10k_Train_006483 GOT-10k_Train_006496 GOT-10k_Train_006498 GOT-10k_Train_006499 GOT-10k_Train_006505 GOT-10k_Train_006506 GOT-10k_Train_006514 GOT-10k_Train_006533 GOT-10k_Train_006563 GOT-10k_Train_006569 GOT-10k_Train_006573 GOT-10k_Train_006584 GOT-10k_Train_006585 GOT-10k_Train_006587 GOT-10k_Train_006591 GOT-10k_Train_006592 GOT-10k_Train_006598 GOT-10k_Train_006605 GOT-10k_Train_006631 GOT-10k_Train_006633 GOT-10k_Train_006644 GOT-10k_Train_006651 GOT-10k_Train_006654 GOT-10k_Train_006672 GOT-10k_Train_006717 GOT-10k_Train_006728 GOT-10k_Train_006736 GOT-10k_Train_006740 GOT-10k_Train_006746 GOT-10k_Train_006754 GOT-10k_Train_006759 GOT-10k_Train_006766 GOT-10k_Train_006789 GOT-10k_Train_006796 GOT-10k_Train_006797 GOT-10k_Train_006817 GOT-10k_Train_006818 GOT-10k_Train_006849 GOT-10k_Train_006851 GOT-10k_Train_006855 GOT-10k_Train_006872 GOT-10k_Train_006879 GOT-10k_Train_006900 GOT-10k_Train_006912 GOT-10k_Train_006926 GOT-10k_Train_006936 GOT-10k_Train_006955 GOT-10k_Train_006968 GOT-10k_Train_006969 GOT-10k_Train_006979 GOT-10k_Train_006980 GOT-10k_Train_006984 GOT-10k_Train_006986 GOT-10k_Train_006991 GOT-10k_Train_007017 GOT-10k_Train_007032 GOT-10k_Train_007035 GOT-10k_Train_007048 GOT-10k_Train_007064 GOT-10k_Train_007065 GOT-10k_Train_007075 GOT-10k_Train_007077 GOT-10k_Train_007081 GOT-10k_Train_007083 GOT-10k_Train_007089 GOT-10k_Train_007106 GOT-10k_Train_007107 GOT-10k_Train_007131 GOT-10k_Train_007138 GOT-10k_Train_007144 GOT-10k_Train_007150 GOT-10k_Train_007168 GOT-10k_Train_007170 GOT-10k_Train_007177 GOT-10k_Train_007181 GOT-10k_Train_007183 GOT-10k_Train_007190 GOT-10k_Train_007208 GOT-10k_Train_007220 GOT-10k_Train_007223 GOT-10k_Train_007247 GOT-10k_Train_007273 GOT-10k_Train_007284 GOT-10k_Train_007289 GOT-10k_Train_007293 GOT-10k_Train_007294 GOT-10k_Train_007296 GOT-10k_Train_007316 GOT-10k_Train_007322 GOT-10k_Train_007355 GOT-10k_Train_007360 GOT-10k_Train_007362 GOT-10k_Train_007364 GOT-10k_Train_007388 GOT-10k_Train_007392 GOT-10k_Train_007403 GOT-10k_Train_007404 GOT-10k_Train_007426 GOT-10k_Train_007427 GOT-10k_Train_007443 GOT-10k_Train_007446 GOT-10k_Train_007461 GOT-10k_Train_007482 GOT-10k_Train_007489 GOT-10k_Train_007499 GOT-10k_Train_007503 GOT-10k_Train_007507 GOT-10k_Train_007515 GOT-10k_Train_007521 GOT-10k_Train_007523 GOT-10k_Train_007525 GOT-10k_Train_007535 GOT-10k_Train_007559 GOT-10k_Train_007566 GOT-10k_Train_007582 GOT-10k_Train_007586 GOT-10k_Train_007596 GOT-10k_Train_007616 GOT-10k_Train_007623 GOT-10k_Train_007634 GOT-10k_Train_007637 GOT-10k_Train_007643 GOT-10k_Train_007645 GOT-10k_Train_007653 GOT-10k_Train_007660 GOT-10k_Train_007661 GOT-10k_Train_007663 GOT-10k_Train_007672 GOT-10k_Train_007700 GOT-10k_Train_007710 GOT-10k_Train_007714 GOT-10k_Train_007717 GOT-10k_Train_007718 GOT-10k_Train_007737 GOT-10k_Train_007741 GOT-10k_Train_007746 GOT-10k_Train_007763 GOT-10k_Train_007769 GOT-10k_Train_007780 GOT-10k_Train_007803 GOT-10k_Train_007821 GOT-10k_Train_007825 GOT-10k_Train_007839 GOT-10k_Train_007848 GOT-10k_Train_007873 GOT-10k_Train_007877 GOT-10k_Train_007882 GOT-10k_Train_007894 GOT-10k_Train_007905 GOT-10k_Train_007908 GOT-10k_Train_007911 GOT-10k_Train_007914 GOT-10k_Train_007918 GOT-10k_Train_007929 GOT-10k_Train_007936 GOT-10k_Train_007938 GOT-10k_Train_007965 GOT-10k_Train_007969 GOT-10k_Train_007973 GOT-10k_Train_007987 GOT-10k_Train_007999 GOT-10k_Train_008001 GOT-10k_Train_008034 GOT-10k_Train_008050 GOT-10k_Train_008056 GOT-10k_Train_008068 GOT-10k_Train_008073 GOT-10k_Train_008089 GOT-10k_Train_008095 GOT-10k_Train_008101 GOT-10k_Train_008128 GOT-10k_Train_008139 GOT-10k_Train_008147 GOT-10k_Train_008154 GOT-10k_Train_008171 GOT-10k_Train_008180 GOT-10k_Train_008193 GOT-10k_Train_008194 GOT-10k_Train_008201 GOT-10k_Train_008212 GOT-10k_Train_008226 GOT-10k_Train_008230 GOT-10k_Train_008231 GOT-10k_Train_008236 GOT-10k_Train_008239 GOT-10k_Train_008241 GOT-10k_Train_008243 GOT-10k_Train_008249 GOT-10k_Train_008250 GOT-10k_Train_008273 GOT-10k_Train_008278 GOT-10k_Train_008291 GOT-10k_Train_008310 GOT-10k_Train_008311 GOT-10k_Train_008317 GOT-10k_Train_008319 GOT-10k_Train_008331 GOT-10k_Train_008332 GOT-10k_Train_008344 GOT-10k_Train_008369 GOT-10k_Train_008377 GOT-10k_Train_008386 GOT-10k_Train_008392 GOT-10k_Train_008396 GOT-10k_Train_008432 GOT-10k_Train_008438 GOT-10k_Train_008439 GOT-10k_Train_008440 GOT-10k_Train_008442 GOT-10k_Train_008443 GOT-10k_Train_008455 GOT-10k_Train_008471 GOT-10k_Train_008484 GOT-10k_Train_008490 GOT-10k_Train_008492 GOT-10k_Train_008499 GOT-10k_Train_008502 GOT-10k_Train_008507 GOT-10k_Train_008520 GOT-10k_Train_008525 GOT-10k_Train_008568 GOT-10k_Train_008587 GOT-10k_Train_008589 GOT-10k_Train_008591 GOT-10k_Train_008606 GOT-10k_Train_008612 GOT-10k_Train_008623 GOT-10k_Train_008628 GOT-10k_Train_008633 GOT-10k_Train_008634 GOT-10k_Train_008645 GOT-10k_Train_008656 GOT-10k_Train_008668 GOT-10k_Train_008670 GOT-10k_Train_008702 GOT-10k_Train_008714 GOT-10k_Train_008723 GOT-10k_Train_008731 GOT-10k_Train_008732 GOT-10k_Train_008734 GOT-10k_Train_008747 GOT-10k_Train_008787 GOT-10k_Train_008794 GOT-10k_Train_008805 GOT-10k_Train_008829 GOT-10k_Train_008837 GOT-10k_Train_008838 GOT-10k_Train_008853 GOT-10k_Train_008878 GOT-10k_Train_008879 GOT-10k_Train_008880 GOT-10k_Train_008891 GOT-10k_Train_008895 GOT-10k_Train_008907 GOT-10k_Train_008909 GOT-10k_Train_008922 GOT-10k_Train_008935 GOT-10k_Train_008939 GOT-10k_Train_008972 GOT-10k_Train_008975 GOT-10k_Train_008976 GOT-10k_Train_009002 GOT-10k_Train_009031 GOT-10k_Train_009040 GOT-10k_Train_009052 GOT-10k_Train_009056 GOT-10k_Train_009057 GOT-10k_Train_009066 GOT-10k_Train_009076 GOT-10k_Train_009103 GOT-10k_Train_009115 GOT-10k_Train_009117 GOT-10k_Train_009127 GOT-10k_Train_009137 GOT-10k_Train_009145 GOT-10k_Train_009150 GOT-10k_Train_009155 GOT-10k_Train_009156 GOT-10k_Train_009160 GOT-10k_Train_009179 GOT-10k_Train_009181 GOT-10k_Train_009196 GOT-10k_Train_009203 GOT-10k_Train_009216 GOT-10k_Train_009219 GOT-10k_Train_009222 GOT-10k_Train_009224 GOT-10k_Train_009229 GOT-10k_Train_009231 GOT-10k_Train_009235 GOT-10k_Train_009242 GOT-10k_Train_009263 GOT-10k_Train_009265 GOT-10k_Train_009280 GOT-10k_Train_009282 GOT-10k_Train_009300 GOT-10k_Train_009301 GOT-10k_Train_009329 GOT-10k_Train_009332 GOT-10k_Train_009334 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/got10k_vot_train_split.txt ================================================ 3784 8998 1631 8277 8358 2338 2988 8302 2662 2663 2825 7447 4781 2218 5860 2819 8075 5391 116 3606 7976 7941 1024 4519 1970 557 8579 6908 993 7204 1991 3674 8781 6840 5 3225 3763 8688 6778 5777 4794 2744 8126 3864 1733 2923 6829 683 2081 1831 2404 1459 2741 5972 7462 2654 103 2174 2989 2506 2766 5912 3295 3986 609 4895 6673 801 1098 1602 2490 8476 3186 4784 4270 1812 4226 2267 8873 6544 6112 2381 4752 753 3776 6511 6016 2559 7369 5866 563 7731 1105 5603 50 4238 2208 8725 4994 4719 1444 8807 7298 8760 8173 2332 4131 1065 8562 3992 4024 2188 9095 6765 1707 6105 6922 5362 1486 7898 4135 6574 998 6565 8127 8927 2544 4365 768 3535 3875 6808 2931 487 4451 2470 8111 3493 7338 8281 6390 1271 4373 3667 3494 3757 2966 7840 7827 3300 6261 4163 2217 6549 7236 9136 1857 6691 3470 6271 807 516 9311 6098 3144 8420 5425 5694 2643 6696 6072 7285 3781 903 8522 6092 5979 2622 2529 855 3420 3261 8953 7866 2492 3157 359 1520 2642 7452 759 36 8931 1744 4350 1089 9199 1889 1908 4868 4498 1968 3273 7413 4114 5584 4874 1427 5211 7618 1542 1353 8158 4168 3200 6345 8560 5619 5953 3158 8849 5831 1411 8103 6539 7397 1006 5450 3119 4274 5352 4571 2319 4976 902 1814 2651 3299 3398 982 2428 5793 1346 7057 3737 7329 4449 2110 7405 1773 958 3901 4127 8234 2994 7066 1289 2995 5871 3556 9085 846 2366 585 5516 5230 3481 2732 6658 7423 1855 6384 3554 5823 4948 7058 4667 5377 2503 7694 9191 9144 655 3409 62 8019 8970 2323 5750 3178 6548 7501 3280 343 2171 8397 1367 8611 6118 6603 7182 9048 7733 7141 3335 4845 5449 3467 6250 163 5168 2040 3609 8352 3426 8567 769 187 6151 6437 7028 3970 9146 5028 7492 1661 2815 2469 2563 3814 8430 4305 3479 5678 4132 1211 5459 4814 545 4556 238 2724 1260 2581 4632 4313 380 1209 5447 3032 7942 8943 806 2432 6130 4314 2131 9045 6531 5706 6747 7724 2017 3292 5469 2743 424 4233 8619 5192 4516 9324 3537 9152 8058 7526 8711 1949 5982 6702 7027 6388 7012 328 2130 452 306 7669 3134 5761 3703 44 4189 695 5224 9215 5644 3143 5443 2348 2328 4725 1418 7810 5759 7226 4535 4385 5397 7249 3204 385 2371 2738 3636 9033 2246 2680 6940 4310 2054 9250 9080 4568 5586 4469 2038 3410 7900 4332 6108 678 3319 9079 1054 4048 4751 1320 6890 7931 1398 4349 5299 5025 7932 5738 7787 4590 4020 1274 2488 8497 3372 8965 3219 799 3664 6500 7093 4362 6205 4244 5945 6434 2031 2684 6632 4588 8271 3232 5782 2904 7200 3632 5435 8203 3480 4786 7579 3351 1921 798 3646 3094 4359 1654 5975 376 5965 780 6738 3185 2133 6248 5996 2834 531 5688 2448 7925 7974 5924 6401 5778 6594 5442 8336 4522 3770 6340 6328 4946 4161 2954 2588 8465 2885 1606 5787 3407 3121 7310 1413 1932 4787 2579 3325 508 5610 6480 4290 479 3792 6628 2545 6972 2665 6730 3547 6845 3540 8993 1052 2235 8356 3403 8818 8260 572 4159 1180 5348 7948 2676 3539 4866 6422 8365 3217 1310 2059 9177 1419 2283 8892 8162 1212 6277 3725 7806 6149 7874 718 6888 7118 277 656 8763 8289 4759 5854 8659 3145 5981 1881 5799 6947 1609 6396 2631 318 2550 6132 1736 7816 4304 8133 6698 7779 7732 7642 7242 711 9262 8033 7440 1913 5480 5570 8594 8772 4654 8974 6128 6183 1071 8449 2142 2298 524 1695 820 4053 1856 8641 217 1063 9286 3152 221 5461 1270 2006 7164 1199 6951 5604 5400 5309 3498 6407 6661 7097 8165 5169 3852 7070 5702 4344 6648 6904 3272 7119 5795 2365 2659 353 5444 1924 2098 2972 6006 5865 8740 7856 5841 598 836 1147 931 8897 0 6049 1837 865 1871 6116 6831 5773 3587 303 1883 2163 3070 1308 7953 6909 853 7301 3279 123 7186 3194 5133 1931 4622 4891 5722 5693 8 2339 6596 71 379 4506 4370 1238 2707 3344 4254 8767 1726 325 4148 5438 5357 548 1332 6824 2290 2335 2594 2315 3389 3885 2621 4116 7412 7222 4894 8595 2000 4978 4721 6444 3796 9321 2236 6409 1523 1468 9249 8270 2341 2874 174 4502 4703 9034 9108 5451 2619 9158 490 6540 1466 2962 8771 2712 4539 1581 5638 9246 4308 4363 4647 4470 1636 1311 6560 7519 8027 9217 6364 3779 4822 3563 5896 6655 1524 2846 3137 141 1887 6567 8921 4671 6052 8445 8699 7349 3553 2117 7651 5034 5383 649 3818 9022 8414 1012 8159 5081 8571 4765 9135 4361 4073 9142 727 2835 8229 3989 4490 4923 5477 1638 3643 9044 2230 499 7166 3172 8431 8401 1470 6356 8817 927 4212 2152 3812 4949 1219 1538 3029 6481 9042 7775 7742 423 2085 7715 4541 9061 5916 7420 7406 7046 7808 4911 8804 6927 8820 3264 300 2979 252 4407 3383 4688 8504 6723 26 3837 2489 4137 8209 229 6490 2364 9016 1763 1728 338 8335 9063 2791 641 5454 4581 4548 2840 8508 3463 7231 7619 2560 1755 6201 165 6279 5806 6867 5890 2396 3416 1981 6073 5872 3045 4182 7607 4414 2998 6553 7139 5624 3666 723 5110 6932 8200 2222 8399 1041 4138 1594 3569 9253 393 7940 8004 1475 5393 1107 2597 878 9309 7576 5250 3142 2015 571 3921 1255 7080 893 2160 1355 82 9153 8583 4085 4644 7196 9165 3558 4550 6374 7826 8602 4146 9257 6083 874 8383 3731 3374 3653 8222 7344 470 1813 6871 7245 6866 3998 7433 276 1915 1988 8168 2518 2686 831 6143 5205 8718 1703 7729 2077 7983 8450 1195 9232 507 7989 6974 5828 8655 6679 5245 7783 5886 9098 6491 8782 3525 6542 131 8110 9186 9074 4933 9035 2607 2057 6273 2711 5829 3382 2696 3043 2048 619 2499 5295 1162 7807 3694 2194 3149 1940 7934 840 3592 8237 4731 1324 8486 8726 8573 2928 9078 2272 2564 1370 5911 7434 8026 407 7546 2004 5849 7887 3425 1118 926 3430 5902 2282 2334 129 1372 4842 6473 4382 1028 415 8269 6910 2796 3038 5735 5080 2852 6306 8842 9188 3637 1066 532 5485 2838 6753 9008 7984 2816 8819 7103 5977 5044 2064 2599 3249 6446 6638 852 1724 3368 892 3250 8258 7962 4300 1616 167 8855 2090 4424 879 5136 5350 2635 7828 8506 63 3847 3676 1705 6745 1263 5020 1888 7036 1033 3914 5433 3905 4641 228 4801 3766 8085 643 6914 3013 5657 3696 1590 8282 2403 416 911 3849 4215 1120 5490 296 2306 3140 3742 4819 6153 6414 760 3000 7498 7108 6429 3031 5314 751 3357 5808 7505 98 7652 4027 6257 1799 8577 4969 9163 2025 6061 4026 588 4961 4940 7152 538 706 2802 8983 3375 1246 6593 5837 1789 7939 4997 5939 2411 6133 199 7593 1702 5406 6082 2912 6109 100 8149 5470 2807 3362 5621 6019 9241 9268 7703 7967 5458 5492 6729 4577 106 3774 979 7082 4610 1853 9003 9292 2867 6262 2245 3460 1557 4796 2658 5769 6985 421 7990 3289 1540 9316 2251 6896 5947 4965 4480 963 9047 7824 3976 6210 7018 7179 5016 7789 6102 6828 7659 9109 9071 8115 7628 7110 16 7513 835 939 2351 2322 4945 560 6837 6094 6475 7901 3 771 8029 3135 8044 7127 3741 5156 7030 113 3747 7042 5232 5225 3002 4747 5379 4886 7192 4184 1896 1834 8689 3665 2957 6913 8009 4851 6420 828 8884 8815 3198 8008 194 6251 3303 3934 395 1285 4169 1648 1347 3600 4631 509 211 6230 7241 2219 2582 8353 7790 7583 9004 6942 1704 8051 2981 5511 6182 7088 1699 1222 6189 1528 5197 6221 7893 7773 8766 2942 8021 614 1786 400 133 556 5237 3727 1440 3873 8448 6285 8696 8800 4009 3386 4847 5685 9093 5895 6863 4260 8405 8417 7116 255 3223 4737 7852 814 710 1094 6103 5809 5882 6336 4974 1499 2806 3744 2664 2436 4482 8665 8918 1076 8676 5725 9248 4755 1447 9328 5500 78 2653 792 6854 6093 6172 3378 4492 5529 5476 3846 1391 383 4289 3883 2648 3265 2525 5402 4599 6870 6877 4413 2464 8519 2521 1839 5822 5664 7257 5375 6852 6764 5182 8914 3015 8509 3080 4562 8979 6643 8601 6096 4812 5246 7862 527 7849 6737 12 2468 7961 275 27 5932 3840 7341 4996 8564 2154 6138 7831 4442 757 4464 1170 2568 19 323 7675 3441 2067 9027 2486 4379 4744 1737 7563 301 3907 4742 6857 1221 9284 8458 2897 1526 5345 4423 6246 8578 3711 4986 4785 3997 7311 4788 8387 2041 2608 6031 3293 541 773 8473 2501 5667 804 483 1639 696 6060 5429 5762 1527 7342 6225 7895 381 8030 8362 4734 3526 9273 2039 5084 875 6905 8968 5275 3052 650 7509 232 2595 3631 1810 4355 8315 8908 1777 4834 3164 2336 1543 6212 8346 3024 3719 1242 6265 3133 6150 6358 3316 4089 1647 4629 7117 2596 5366 6371 2209 1428 1158 7648 8765 802 153 4639 3657 9320 3294 2617 5052 6305 3227 8784 5868 6716 1671 178 2703 954 3254 2262 5743 8647 6393 7706 6604 3728 6978 7474 8754 2740 6038 1491 8814 2080 2358 5944 1164 9259 4518 7343 5748 3897 923 5967 2677 3503 1202 4966 6634 1962 9096 9064 977 4049 1464 658 536 3402 8064 1309 259 8122 910 224 6152 7142 6070 8411 9214 9312 8325 6192 626 6025 6240 8708 4630 6777 1075 8906 408 9269 6236 9067 2324 156 3136 7878 7308 4335 2065 3845 4453 3356 1450 371 7219 5171 201 8642 2099 477 1603 8339 7430 3061 235 1133 8474 8653 989 4569 9092 8347 3102 1743 9086 5140 7438 1530 2460 7646 5071 5430 6944 610 2803 1448 4696 6156 4386 4248 4256 994 805 8011 8276 8999 4956 1712 2795 7553 6436 2158 9083 3184 5784 4428 612 5288 6222 1365 5074 6848 575 5213 2175 4240 351 2086 2656 5150 9255 8189 7735 1261 1344 4097 8674 2984 4235 5998 6488 537 1267 7486 7124 6245 7955 7337 5436 1194 209 1710 7906 4357 4139 5679 2584 2854 1004 8246 8586 5087 4926 6637 3197 7757 6502 1248 990 3928 2770 2751 1020 6426 6839 2671 3871 9212 4179 3394 10 5861 5316 6869 2985 8905 8559 4457 2480 2313 4100 6835 7799 7890 2785 5468 7302 5862 1803 3171 717 7053 1655 4489 2522 2921 8555 1984 895 8949 1305 738 7606 112 3042 1325 437 3167 3340 511 3689 8982 69 4421 550 8685 3147 8956 3166 7023 2014 3573 3880 4045 2069 6051 702 6664 8418 6181 4853 4166 7022 7418 3605 7172 5031 4589 7858 6586 6351 8334 7504 634 3759 1890 890 6959 5085 4919 2161 1191 256 3610 7079 3427 4071 7323 2982 7263 7444 4251 5846 4864 3649 4311 8120 4582 6373 2805 4872 4869 5867 2670 7099 30 8933 930 7919 7261 5289 7449 7772 3613 3196 474 205 841 2611 6185 3088 409 7239 5938 7871 1343 6705 1027 5596 2199 9113 5471 6134 838 8359 4061 1474 3229 270 4245 1979 1517 8652 4006 6137 4693 2528 6996 2926 5798 2477 2549 3341 6014 4479 2861 4208 5175 5174 5118 3736 5463 1588 2327 8380 7982 1058 4586 6608 7985 1822 3628 549 1811 2601 4608 2540 6659 3859 307 3767 8167 505 4366 5520 461 1933 2401 8106 2055 7844 8544 4797 7419 6686 7670 6039 5672 5141 6543 206 5252 4718 888 1601 3218 5114 713 4022 4419 6708 397 425 6612 5057 1729 4729 4080 1034 534 5598 9218 2424 329 4154 1597 109 8823 9038 8437 3307 128 8032 1412 7333 8762 8851 8865 468 3808 3064 8798 7052 7767 1086 2162 6566 2109 3439 6122 3642 7696 8610 5279 1808 8687 817 6066 3640 6015 7601 4855 6017 87 7071 7268 3614 6084 6117 6924 9102 2829 375 8724 2095 22 1541 2970 633 139 451 4521 179 1396 3876 5824 8020 426 4982 4172 190 4859 1455 3110 3323 9104 858 6719 6428 4495 8551 2141 3984 3066 67 4299 5821 8444 6581 6097 7090 7781 8944 3085 2114 5355 8901 1461 3301 422 7000 4820 5790 1379 7536 8736 8991 5241 1698 1294 1753 196 2987 8680 4144 8639 6441 8255 8156 3677 6385 6520 3760 6001 1144 5478 7394 8057 5018 4232 5235 6844 3111 8802 949 7843 573 2278 6801 7629 2714 5105 6946 2697 5315 1571 8677 2537 4374 3833 7820 3750 2033 6526 3884 8706 7195 3603 3001 6284 5873 5718 8576 8457 3589 5839 459 6342 8729 6933 607 6053 8228 3773 1805 6365 5142 6069 1389 9026 570 4614 5533 2821 1897 819 4060 5905 6842 5446 1277 4303 2836 934 1014 7822 7494 665 5881 3328 4664 315 1315 1462 8616 7725 5749 1730 8184 4567 5065 8867 1304 3669 9192 410 8177 6710 1210 2329 3911 1899 7686 3315 6180 3116 5341 4394 8337 9182 5715 2172 2782 3715 9195 7960 4890 8294 2337 8014 3353 7475 2193 8831 4200 4653 6196 6957 3063 8959 8973 6529 3457 5274 8002 6823 6154 5561 1780 9318 7657 1758 6503 7678 3274 1625 4327 3236 8575 4707 4331 1494 8756 3174 1074 8116 8295 3048 3752 6050 8003 9175 4674 1642 2556 6166 7165 8441 3990 1640 1778 7500 8304 1395 4315 5949 3364 242 5763 1036 2430 8131 411 6267 2045 6606 899 8065 5779 5616 2107 5408 2980 6310 5776 4328 821 3251 2354 7076 5313 79 3959 5677 7545 160 6790 6859 3659 6770 1106 8846 956 7472 2050 8099 4795 8053 9293 7037 1646 9307 5322 5332 2708 8977 917 2419 184 2105 1578 3923 5780 1903 2512 429 493 4972 445 8286 320 8300 617 3413 4459 525 5631 6314 5157 5300 8545 182 1031 4429 2495 1534 3099 3916 3738 535 2119 177 1838 2159 4099 8285 5172 8540 6020 7683 3073 3115 3087 2416 1894 5942 3597 5834 2007 43 1779 4174 2023 2546 2429 9006 436 4214 3693 5426 6767 5903 4368 2170 5051 7490 2859 5035 7835 5372 7122 925 3253 6338 8393 4093 5848 7588 2683 8049 5403 5894 8745 8550 2941 3484 9029 4461 8022 725 3030 1975 5623 2415 1957 6141 9278 3226 3062 5670 7326 8759 8496 6619 8187 8262 6199 951 668 2388 4698 8240 2851 871 4988 9084 9089 3162 1167 8244 5227 6461 2831 776 5010 5770 5282 3574 5102 1278 2281 5455 4628 4663 9119 7487 8746 4889 1175 102 2386 8940 5566 53 8833 1918 321 6786 6861 4358 2771 7467 975 4777 605 3543 2600 7584 9299 4530 7328 183 4761 7543 304 1196 4623 5519 1953 533 5989 7590 7428 6346 6162 1946 6260 4405 5676 8924 7171 8409 1866 6379 3411 2387 3051 7398 154 1185 6442 6004 1611 2165 9018 8323 616 3995 8952 1533 7853 789 4991 3675 7456 5752 175 7556 4195 907 2248 8467 1017 7968 3304 1666 4942 3867 4802 6357 4621 887 6213 5261 1336 521 8928 7864 4792 6742 157 1593 823 7235 5303 5633 1100 8047 5993 1460 6714 1630 6440 6307 3608 292 5974 8301 8342 2720 4583 2757 7315 833 4466 4236 1282 5273 2149 2380 8119 7167 5076 3596 2650 8980 3421 1356 1954 7823 1172 2226 1941 6136 7274 2256 4928 324 4410 4579 1061 7113 486 862 6956 2873 1465 6113 8225 8512 6806 272 6008 1241 88 5662 3555 689 8733 2812 7453 6282 420 2471 4477 7495 1445 594 6939 1564 8704 8590 7992 7374 5796 9298 4213 5713 5864 326 5513 402 464 608 1951 8640 3347 3459 4162 2690 7478 5856 5240 3022 602 5547 1798 1345 9276 599 3673 3277 1635 8625 1567 5928 636 5671 2896 3477 412 7575 4201 685 4760 1229 4275 8960 3123 4471 5941 3355 3999 7157 6354 6850 8783 1943 6769 7330 8721 8477 1381 848 778 6408 2644 5817 1441 1723 2144 2776 2368 367 8839 8749 5353 3148 9114 1233 9228 8857 2895 1286 200 6755 5125 5857 1657 7658 5000 942 7020 586 784 7078 6194 8658 8957 9325 1851 8911 7004 1186 8824 2999 561 7639 4316 5086 3187 7912 2624 9183 8487 5089 8475 7554 4031 6297 6059 5329 115 2058 7650 7121 2485 7805 2241 7713 4352 2409 1026 2745 4549 5124 5201 6556 6617 9091 3945 8402 5648 5257 4901 7750 6131 6027 6352 4625 1254 5498 3720 8261 3939 5576 3685 6713 8472 991 8354 5655 5997 1029 7506 2575 2990 4898 7402 3290 5388 6715 8235 5361 4970 1363 3338 9014 5358 635 1193 3705 6334 7666 5270 6368 8604 3564 1937 2481 1341 721 2100 3958 6551 3813 2592 7980 2357 8761 8910 8693 1204 489 4827 8024 7832 3895 9068 8067 1708 1111 8963 1902 9251 5719 9143 5537 9169 5365 1840 485 4456 1169 3271 6886 9140 7173 6003 1659 1807 8371 2439 274 3448 6623 347 2103 3400 2106 9073 8169 3687 3305 4416 8454 6635 332 2433 1944 6509 7770 1880 6610 9331 302 418 4219 1333 2350 8424 4883 6580 6722 1669 8470 2571 513 3810 7049 6332 7363 3532 8456 2097 297 8841 7180 714 1587 5234 7372 660 8503 1668 8847 1101 7275 3336 6460 722 7782 3947 502 4258 2132 1835 181 3841 427 3446 2551 8324 6963 4284 7297 7577 3399 9148 8213 5656 851 657 2446 6992 976 1108 2681 3237 8582 377 5969 5287 9209 8523 7178 7833 6175 2126 3023 5090 7491 6640 6077 2221 2780 1694 4094 144 3203 7123 749 3625 3848 980 2270 7819 3672 7689 7203 2718 1714 3802 3851 4224 7237 7998 7207 4106 9036 1046 5070 4592 6056 693 1328 3309 2629 2736 202 388 7886 4417 8786 8822 4035 5505 1192 4388 8941 5019 7538 6732 6389 5923 1405 3278 3917 1688 8374 443 4037 9099 5190 4177 9310 7747 4348 7197 4844 4998 5609 4345 29 3332 8648 4107 346 2577 3941 1215 8252 4706 2675 3790 7459 6164 1149 6687 582 3139 3882 4034 1861 4701 8757 8801 1823 4528 4789 143 4746 9234 3866 9245 1911 1366 4393 2061 1959 6967 3138 7382 6237 845 80 6911 7163 5229 4736 8738 33 8543 357 3193 7262 4448 6793 3321 7569 6411 7692 7340 1417 5847 3836 2678 1188 8727 8615 7417 5771 3170 8061 2935 8263 8257 6883 1276 1239 812 6258 3922 8117 3039 603 8554 7573 2787 3445 5115 3478 962 3961 6570 7722 216 2797 5154 2530 4904 2405 7542 4021 3252 5370 9302 236 4532 1361 3373 1716 2183 1583 3783 868 1687 8925 6198 8208 6367 7603 882 3469 1645 7654 1176 4231 150 7997 5456 7031 4375 8840 5634 6945 705 4774 3822 7148 1922 8459 6249 8713 6197 8599 6071 6756 1634 950 5640 7749 5920 6622 4783 7837 7479 7229 3919 1797 5272 8945 4908 5439 6903 5833 6930 8197 9261 1711 5483 4285 8852 7409 8971 7534 7792 2444 7496 8063 1665 248 3894 4585 66 4850 1240 7511 7524 9258 2075 3979 4714 7592 965 2919 1842 8013 4750 2344 6155 3468 31 2087 1599 1573 5883 7613 195 3749 644 2189 8779 8743 9005 8081 1040 7785 5820 8830 5495 4867 2710 491 7153 6217 4741 1761 5484 5474 6916 7252 1739 8930 6647 5198 4903 8488 7366 2774 2726 2385 7625 3179 8845 6600 399 6810 3447 6684 4915 8368 1867 2325 2101 1335 7734 7437 7025 4000 6897 1408 7154 5013 2204 9233 3817 1877 9161 2197 3390 280 1892 1612 7753 2801 7246 7909 6229 9314 8407 1436 3879 6432 5326 5327 8535 7910 7745 5545 7916 207 1783 6158 8517 7361 8070 6430 119 6146 4183 1083 7385 4497 9133 1686 3765 595 8046 4418 4043 2361 7915 9149 1717 1141 6375 1018 5602 1262 7485 9178 6629 3339 8934 4648 7988 6252 3440 864 5418 3874 7280 6191 8388 4323 6792 2232 7228 8684 7813 6187 6678 3177 3534 4953 4402 7739 6319 2414 8700 5946 8238 6917 4167 4618 2268 3081 1247 4001 8580 7636 3101 2195 1559 3714 7188 6028 7530 2828 1977 3238 2340 110 3247 7532 7541 924 1632 4487 6447 4944 6347 2285 8087 5452 91 1166 162 5185 7933 4743 1627 7259 8620 8207 5845 9011 5525 4269 4700 1824 8186 8872 8299 3957 8242 4558 6439 2666 6958 8112 5121 8806 6170 7688 3486 2082 7436 2778 1096 786 2206 5170 1443 6030 3312 9151 8485 6404 8498 2883 8961 2280 8341 2809 2445 809 8298 8643 8316 6853 1572 3215 3938 2249 6515 1337 8328 7712 1429 4117 5441 3230 4152 7225 3513 6953 1507 348 3639 5739 2673 1550 6301 1652 8453 204 6833 2200 5217 1854 4711 7368 4572 4032 7531 1013 3634 2875 6058 8307 7609 1766 904 667 5410 6578 3601 1664 3233 7390 8178 4486 4427 4876 9166 2772 6295 5001 5296 3371 6518 6327 854 8288 1912 5927 6202 5814 9032 1059 3214 6547 7038 5781 4390 6114 1622 4318 5803 5984 736 3561 6554 5045 4277 7386 9081 8462 2034 4955 2701 932 7758 7176 9205 3077 3803 3562 8054 7946 295 1843 7728 1629 7768 2971 431 9285 2513 1116 3656 4529 5758 6339 8398 816 4153 2536 1826 7870 8113 7730 7101 6555 9256 6774 1072 4578 2598 3604 5880 861 3350 3117 4685 4334 5165 7224 4066 4253 4447 3815 5038 253 3658 330 3967 6443 2143 7336 6135 2734 8390 4655 7800 1399 1173 5618 2822 4431 2443 1568 3909 1974 2496 4772 5164 2138 2864 3799 3924 4882 8245 1585 5528 5692 5730 5832 137 3175 2894 2062 2752 4028 2113 5411 2647 730 3758 1667 9303 6653 3698 3968 3053 503 2150 4645 2257 4627 8303 7966 8742 4692 5901 8547 2277 5546 986 370 4697 8712 4804 1182 6650 7290 3487 2814 5668 7567 5333 4164 3084 8896 3888 6537 17 6882 3531 704 1037 8866 5263 6758 3762 1393 3824 5112 214 1439 5700 8932 1306 5011 6928 5173 4098 1132 7352 4778 7723 1368 2390 670 2685 5855 1772 6380 3853 940 5424 6091 1748 5297 6572 8877 6874 430 5041 5267 7448 620 9112 4294 1432 72 130 7920 4597 6614 8889 3697 1895 3462 2616 4791 7846 8372 428 6559 8326 9211 1525 5980 7888 3331 8118 7899 615 7377 791 5930 6627 8322 1138 770 8460 5100 8274 8350 6316 2893 7594 9236 5082 8150 1986 1909 8902 2145 3617 3501 7 2426 5056 8016 2702 5360 8135 8385 8378 8018 8574 720 8893 3021 1978 4782 1816 2083 4051 1446 5870 9097 8006 4222 8287 686 1377 611 8153 4808 1536 679 4096 3891 4884 432 4615 8988 5560 3451 5589 3514 6169 1414 3244 1490 7100 3588 690 7317 4171 2266 6800 2793 5151 6977 8188 8752 5815 5116 263 3311 289 3392 5755 1022 5548 9319 8937 6011 7632 5328 4141 5407 520 7305 526 3645 1859 2520 3523 8629 7304 8881 3076 4005 8329 2205 2214 6925 8691 4136 8883 974 7952 3965 5887 7964 7189 2406 2783 8086 405 6568 5147 2021 4727 7674 1600 5078 2949 6624 6541 8986 5740 8500 3591 4434 398 983 7544 1478 4570 6012 465 9330 7206 808 8737 2356 4959 8812 3599 1420 1721 5897 8422 2 4023 2739 3619 8797 5496 8951 8181 6893 9254 1809 5682 4309 6929 2742 5988 3363 4493 8434 4210 1503 1876 5094 4600 4936 4798 3933 5216 646 3098 8773 4076 5335 3746 3327 47 4602 8636 4129 363 6417 7416 9025 4377 4766 2779 4151 9046 7860 3154 3476 7620 2052 1752 7199 4412 8882 2463 339 56 4821 7555 6558 1905 5258 4205 3580 6735 1023 4511 3850 161 7395 2532 3349 7055 7387 758 1907 3006 659 815 1961 6902 7668 4708 1904 4433 5159 6816 8664 6918 1016 6513 7314 7480 9313 716 3395 6843 918 4329 8593 3404 5212 837 480 8524 1342 7414 288 8863 3352 1628 135 3314 2181 8650 5915 8078 6812 1375 906 5635 7126 1387 7458 6119 5591 3795 1531 95 1960 7522 898 4921 2623 6268 7063 1326 9075 2505 7400 1284 2951 747 6466 1357 6493 7320 5892 576 5107 5559 97 2583 6361 8843 3509 7892 6086 1476 4612 4267 9094 7050 6048 8382 2227 284 2898 3221 2353 2157 5990 5810 3581 7279 6188 7859 3549 5539 2022 630 2500 5111 6561 5127 5569 6123 1338 8605 3491 4187 8220 7334 9213 3067 6997 2853 4735 4372 5954 6662 2207 973 3361 960 6350 7431 8076 1129 750 7194 2300 6590 5893 6889 3125 8788 7286 3472 8164 7693 1469 5563 4773 3210 6324 3113 9070 3638 7551 2541 3506 5138 4069 7198 7560 3306 6100 2932 1741 14 4672 7564 8748 8874 3804 3678 2610 1358 42 5176 9326 8464 1038 2993 3017 9072 32 4809 4364 2808 4125 152 7299 5431 6178 793 9120 8410 4963 772 6954 3014 6881 286 553 1948 6398 6255 3057 8646 6176 2700 5663 6683 1281 6013 8799 7635 9289 1885 442 2225 6294 5054 2674 7884 8730 8216 4203 1488 7111 3623 7950 1971 3248 2900 1553 472 3865 7796 6937 4591 8098 5208 294 5627 5691 5687 7149 4879 3624 7005 2773 3112 9185 1633 7830 5101 8707 8469 4678 4860 700 5527 9194 2794 5068 1177 4282 6492 5859 5029 5123 522 5048 7230 2104 6642 6731 2717 5149 2043 9059 5277 844 5515 6706 3651 9105 7671 2880 3607 6410 2508 8463 2394 1916 1125 5343 3322 5307 4547 1589 8478 8899 2955 8028 4058 2781 8715 1272 4474 4863 4367 49 8844 5605 8671 6743 4281 1874 2626 2516 258 5249 6186 7958 5432 3801 6288 4732 9121 7558 6819 7508 584 215 5036 4261 8978 5228 647 4657 2591 5931 5088 9204 929 4381 5421 2965 5050 6495 5033 4799 959 1232 5811 317 7705 3842 2178 7187 1373 7112 2694 8627 8493 3991 7441 6308 6462 3406 7673 8660 2902 752 1025 849 7682 6982 6652 3612 298 5148 4873 3414 1693 1458 327 2016 5002 6768 7016 5583 3270 8232 7158 7981 4676 4675 2164 8360 6709 8143 365 4062 4527 7928 9009 6228 5818 2533 9305 8887 55 2507 8870 6649 5158 76 5595 6693 5306 8666 3020 7527 3082 6304 1591 6145 6868 7205 9107 1165 6773 172 1993 4176 8400 4611 7589 5386 6095 6335 1561 5963 7393 3681 2037 4968 7451 3360 7466 8361 4455 4064 5422 1689 3977 7269 362 4178 4145 6127 5162 2399 9225 7068 794 1348 7736 444 6081 5298 2026 2543 9087 7425 3730 8468 2641 7529 1720 6377 5851 7956 3150 3785 6485 3611 2869 8510 4775 4463 1251 9124 6873 3391 4118 7051 3213 3668 5347 8452 6289 5840 478 3522 453 3376 6190 3342 2237 2870 5178 5567 5952 6919 3005 134 3397 8539 6822 5264 3288 5962 8421 6744 8608 4656 1802 4271 1043 8211 2196 5260 3789 7211 7571 7834 5680 2047 5502 3369 3437 3286 5517 3912 1442 6961 2191 2417 9088 5155 6813 4520 7375 1224 811 1891 3748 4123 2789 5305 8419 7248 9237 992 4038 4499 2060 850 2669 7612 9290 2526 1287 4160 4633 7125 742 4534 2407 4555 8764 4722 7721 3205 6657 1214 3754 6080 4593 3018 8792 2294 4450 7701 127 7069 6243 8025 4010 8632 4715 5284 4574 726 4252 4561 7354 299 6088 1090 5012 5684 3489 4888 1584 1969 4846 2915 6804 2775 7306 9306 5231 7740 4283 953 6725 8290 1504 1539 8885 138 3764 1256 257 335 7060 5986 9323 4740 8994 4140 6807 8254 3963 9297 2102 9207 4910 8709 4411 1672 457 8037 4932 3679 2362 8592 495 1608 2155 7411 2881 9244 37 6535 8219 4505 8635 1928 8384 2570 8996 7610 2128 8728 6656 6681 2070 176 9062 514 1796 4039 6838 2462 230 569 5521 4637 4939 4420 672 3807 447 1656 3297 8858 2118 6309 1926 481 1509 1228 1787 5978 8678 3951 2929 4980 5039 4713 7002 151 5536 8148 3823 2299 142 7067 2372 3761 9 2265 5747 2764 724 2913 3151 4525 6370 4247 5494 629 3621 7371 1999 6704 3734 2698 4691 6938 8415 6353 6750 9077 2679 2478 7321 6611 4007 5772 6416 2264 8348 2672 6546 754 6934 8546 4404 592 4748 6625 7944 2377 6 8929 8275 4524 3660 8710 419 6878 8313 7460 8753 2917 6891 6663 4918 7129 396 7256 3500 631 5585 8343 2695 6168 6292 3176 5092 5160 3701 9021 7221 1216 1438 3471 2318 8923 6223 2182 7621 8514 9010 8987 1252 1972 1872 1715 8205 6463 8138 8989 5661 2890 565 2427 8946 1303 3718 6000 3620 5276 9260 1467 6173 7641 7520 5061 4677 5757 4400 2620 2719 8995 2079 1683 8141 7754 5744 2952 7568 7457 5368 1510 1513 3072 1456 9164 3163 3035 6111 5042 7161 1401 1084 8000 8531 5404 6550 8379 9141 8681 7752 6394 7011 3739 8253 978 4771 6024 4828 7959 1649 1727 7073 8349 6952 661 7283 3159 2590 3496 8741 3969 2956 4565 920 1830 8558 1930 6677 6825 8256 7454 4710 1768 3753 5292 1397 2733 946 6711 3242 4929 5006 3202 2295 2746 1293 2124 5405 4065 818 7464 1820 1312 6994 6920 261 987 6120 3109 2986 4338 7774 5122 1364 8969 6712 8161 7595 5940 1566 6419 4432 6047 4749 6076 1161 8217 674 8494 3688 2447 4704 969 7477 1160 3243 4979 9288 6860 1662 6171 225 5143 313 8327 3385 7626 3103 4401 6794 5600 5043 7664 6830 4452 3980 5875 4635 5756 3329 1751 8108 4817 1989 1237 1893 2848 8875 4981 5417 4134 877 6688 3545 4943 5615 2476 1684 7396 1171 3415 3644 340 6630 8284 3256 7240 5371 3405 2108 6360 1734 5612 8638 2343 1103 6809 3055 188 8031 3124 3683 4537 988 2297 4893 839 4467 5195 4041 6457 4441 6472 4912 6884 5922 7014 1660 1595 6752 4554 1292 2709 3800 1980 8775 6392 6263 7214 5219 282 309 6685 6311 4092 18 7570 5543 4081 2515 6278 8690 5294 6184 5215 9130 6720 250 7250 639 3567 7841 2636 4067 8446 5703 8609 2586 7695 1253 6701 7930 6317 5921 7719 8501 7312 4110 6219 4552 5059 4088 7975 9132 6054 692 3412 4079 6950 5281 8321 3877 7614 4188 2223 239 4745 6875 7096 5571 4403 2640 1845 6690 1825 4157 314 4682 8825 8093 7215 6465 99 8077 4206 366 1208 6043 4640 5475 4985 1351 3090 5625 7307 8466 2003 8854 218 1500 2293 1847 5032 2147 866 3710 2552 1749 6692 3926 4112 6458 735 9171 60 9304 6726 2630 2882 1178 1151 4922 4662 173 7233 1776 4113 2423 2425 4343 970 6372 1009 6607 3068 8435 6423 3126 4813 1709 1201 7104 5620 3932 3366 5023 5079 627 290 779 5572 5233 1392 4975 8534 8210 2269 2475 2562 905 4546 267 3536 8538 449 101 7367 2722 4605 7356 6781 8537 8697 6820 8340 8926 2349 2259 6545 8100 8395 2258 2911 3946 1406 8683 8296 5579 2177 8264 1425 957 3647 515 5342 8363 2449 1001 2937 3452 5574 4319 9184 8381 945 6876 600 5714 4871 8532 8856 392 2018 369 5711 9230 5304 7266 1681 7829 2309 4683 8938 2255 6159 3207 4651 2029 4341 5106 5794 9024 4712 2434 7151 7359 6431 1290 5918 8705 5554 8876 7415 6290 5373 3805 2950 2331 6772 8997 6576 2307 8515 4033 3428 6487 6595 45 5792 333 2383 3388 666 460 943 364 8223 8221 637 6218 4108 5381 4649 5096 1614 8768 5095 3809 5030 984 3538 5120 2498 5222 5613 5486 241 5707 9227 4109 7771 728 3671 9327 1230 9270 1070 8565 4769 7056 5654 1793 5956 7883 1362 5479 8769 8821 8320 1901 1994 2461 5552 389 2839 6467 2762 4763 3499 1487 7599 4488 3241 8272 1131 4496 7006 7265 4897 2747 6618 5291 4563 1939 6369 8548 5526 9030 5349 8433 1477 4265 9200 3878 462 6846 4806 3519 6798 5464 5179 546 6044 8114 7216 6276 1495 494 8146 5434 856 8403 8071 5544 3337 1546 2824 1718 6009 2042 251 3330 192 3797 394 7814 7699 4659 4689 4156 7903 9054 7332 7811 1119 5531 6782 5210 8412 2633 7924 4624 8314 5666 3240 2310 4262 8160 4553 8196 2661 7213 7455 7399 870 1227 1226 781 937 6343 2578 2892 2792 5696 6865 6455 8312 5193 6026 5251 3787 4460 4687 7923 1140 9106 796 2482 9170 8695 2749 6734 4825 114 827 390 7611 7484 1249 7727 955 579 3629 8915 2958 885 7227 1424 4810 4604 1535 774 7518 5428 8233 2645 2167 6484 3855 1502 4861 2333 2973 4829 1906 3966 476 9023 6960 3483 2748 5891 8174 7702 8948 5324 4396 1605 2823 7348 7347 5933 310 9082 916 203 4239 5976 6200 6435 4425 787 1121 6034 39 3104 5961 5507 5785 1463 7339 1575 7801 5445 8283 5951 6995 999 5163 6023 6536 5850 3524 3528 4508 6674 2939 8227 4598 7550 8495 8622 1152 4538 1318 739 8202 1552 5236 3576 4699 9238 1879 433 5587 1678 8552 6445 7971 6880 7476 7282 7271 6489 8091 9287 7351 1765 5286 6921 542 1762 8553 4987 894 3622 7855 92 3131 4811 6517 4510 733 4954 1360 5669 2842 8107 5646 5968 1827 7709 8521 5807 5321 9239 5501 3745 4437 1586 5265 7917 1607 6074 7061 1580 8694 8461 4573 618 9173 5243 435 8770 2421 7450 3870 8308 2605 2934 9240 6887 4512 1198 7585 7691 7738 2843 8423 6971 7854 86 9128 4298 622 6579 2203 7716 1265 1174 7380 623 8936 4306 8082 4312 8661 5753 7243 2768 8155 85 4143 3047 8479 7809 2833 5555 7578 1637 1936 8130 5549 8062 7143 5522 8966 5614 8105 8719 7655 7502 8268 5760 6695 5565 7615 9226 4870 4507 3160 4835 1598 4422 5248 7867 1078 5015 6660 1676 6391 5351 7184 6280 5936 6124 1327 2906 269 8292 8809 5167 8142 8204 2713 1910 2930 2494 5592 7384 7726 5727 1735 5710 5518 2491 1410 4989 5183 8777 6562 4947 3692 384 1097 5209 3723 7272 6895 2459 543 8621 5394 6211 2074 1511 2524 7776 5055 7191 6207 7922 281 8436 2918 3141 4800 6323 7631 8903 3735 5301 3975 2800 7963 105 1920 7391 4909 1754 4816 5145 5139 5268 9317 8631 4346 7318 136 3993 1220 2151 308 7483 3071 1339 3777 8191 5378 7087 1056 7465 5608 6564 2754 2687 1596 5376 1512 566 6382 1757 8035 2296 4264 1053 4716 8518 254 6253 7132 8557 3490 9267 5473 2412 7539 7136 6670 891 1323 1217 2879 9118 1259 2317 7033 2467 6665 6244 2180 2140 7098 4150 547 4307 1725 2737 8549 8195 1245 6286 935 1756 1701 1626 7379 3492 3717 5802 2817 1234 1005 4101 21 2576 4650 3381 1030 2844 1641 936 2729 6469 8913 5994 341 4083 5152 3380 8739 6615 3829 164 7927 4779 4216 8528 3641 4606 2769 6970 8850 4971 5489 2008 4564 8682 7784 5768 9252 901 438 3577 2765 5904 664 3348 6298 3602 2502 8617 7684 5805 4126 2451 6906 7234 9243 3778 1087 9053 5026 2504 5283 2820 4242 797 3925 1383 8750 7861 1403 6973 7617 3065 5395 4347 8144 2688 6527 8597 8673 7327 6331 1422 7115 244 7013 2092 54 7970 5742 4823 8588 2938 3060 4149 2375 6616 8803 1555 4369 1380 3011 6144 3367 7370 1995 2602 985 8785 8480 9125 1927 3269 3771 1032 7378 5726 2731 2020 6727 8793 523 6036 58 7993 5512 5049 2721 8482 673 7937 1168 4472 8247 7287 9017 6421 9190 3584 1819 1792 2810 6033 6749 7677 981 7160 4726 1886 7845 6975 7422 4613 4501 2569 4263 3206 4133 2420 3706 8894 2263 5774 4925 9180 8888 2945 2091 1873 6303 729 2156 3267 1860 6597 4930 5253 938 580 5825 166 8198 6892 8701 74 7094 8954 3156 6140 4279 2229 5466 8413 7105 8192 2632 7638 9308 8530 832 4643 2201 3268 4322 6510 2967 262 403 1258 8828 5838 8529 2788 237 3838 1291 4056 5628 7281 6476 7935 2850 6041 2013 4016 4576 5312 6827 6321 8669 830 1519 2750 6106 6993 6235 5899 7313 5331 4371 7086 8600 2660 5409 3465 5499 6231 5745 1801 5337 4468 1451 4192 1275 1114 4960 8860 3900 6468 1505 8868 5588 3858 1947 2565 1472 243 6583 7085 5374 4291 4426 492 2311 8305 3662 8780 7488 3890 5005 4680 7358 9116 4397 5999 7902 83 3566 2134 8942 4767 6601 1745 5736 5254 8017 4015 7690 3798 8947 1067 7945 590 2547 2535 64 2053 5359 2493 6669 7473 6147 7175 6983 5196 745 2657 3497 697 3161 7528 2239 5991 3201 7681 5189 2959 2044 8917 2046 6313 6333 5318 4301 2213 2933 4121 3903 4392 7889 5323 1055 707 3857 518 6078 5134 6645 9138 1592 680 4446 7943 3461 3887 5601 2321 6621 558 4914 913 5637 6453 8511 4531 1218 5508 2603 6802 8426 8297 2947 5971 6552 5262 5935 782 7435 8357 6139 1136 5008 3585 3627 5356 2997 2347 881 4849 8808 8351 4017 2010 6836 4391 3630 3712 2969 5238 4333 2301 4406 1236 1050 1864 8408 8251 8795 5879 3365 7481 8206 2452 1767 8859 124 3948 4444 8962 4438 5003 8428 3105 5117 1095 8755 7881 3097 4877 155 1917 2455 6042 337 6724 6045 8483 7135 2242 4566 1679 834 1746 795 3548 2314 2036 4046 9129 7084 5091 2413 8170 5775 1817 529 813 2916 5130 126 1243 2370 4831 9122 3010 5104 2613 6761 5340 3512 6283 2346 653 6121 2615 7421 1869 1002 8834 2991 8992 632 1093 4543 645 2352 4115 373 1483 6966 8598 3896 3434 5987 8318 1815 1223 1548 6885 5073 6330 2573 1369 4095 1431 2185 5766 1301 7258 8048 7598 2847 1996 2378 8561 743 6381 271 1956 7439 7134 6636 5804 1858 6214 4730 8536 1203 3118 9202 1875 5885 168 5898 4014 4186 3346 3041 5558 9296 8157 4339 3234 2604 6803 5387 5590 125 2173 8012 8005 4858 651 372 378 8366 6299 1449 7793 8541 3235 8043 3086 3983 6949 4690 6494 8406 7408 350 7021 8224 7044 7662 6697 7679 169 528 7029 2790 7432 7602 8333 1582 1378 482 9279 8015 4514 3542 628 5053 6699 6227 2094 1621 847 3598 2728 7276 6620 8345 4278 4059 9058 4173 8134 1997 3182 3224 8129 5109 4494 189 7640 180 2963 1123 5593 3263 4185 7140 8990 6320 9275 4601 4854 5907 1135 8083 5964 7788 1992 8069 9174 6160 35 8572 2865 46 3952 6418 2510 5783 3816 2715 3930 2548 5204 708 7756 3825 777 3550 3929 5440 6751 7764 4070 7331 3743 9131 9206 3828 23 41 4197 234 5723 7622 8832 2169 5599 2976 5266 1967 90 822 2538 3169 6771 7442 498 4967 5580 7581 7680 4728 1115 1064 3106 6266 4415 9294 5597 7059 197 7218 6948 5690 1653 4485 4019 3370 919 1330 6085 2078 5427 4545 2435 8862 3633 8145 5221 1388 5913 8140 7471 7156 6989 1190 6832 2830 4387 3454 7469 2910 4526 5187 2410 9223 4681 1300 7407 6523 3616 6894 7253 4515 5874 5448 7137 7957 1130 3092 7054 3516 5797 1000 4336 9090 6403 7255 8919 6522 6760 8898 4803 374 8686 3985 7045 3475 6065 7991 1409 7851 6671 6090 5826 7857 1155 8964 1117 7072 6064 2497 4899 2397 3189 2369 5027 5754 8950 5617 8391 914 6264 279 6174 5184 3733 5278 2924 567 7994 352 8084 2148 2723 3359 70 1870 7708 220 3994 9013 3191 9220 4155 5717 1110 2198 785 5325 4770 4250 52 4634 9037 601 8036 7996 2483 7232 8675 8836 1279 5346 7676 6104 1515 4603 5607 5144 2628 68 440 3586 3083 4830 4378 7762 1134 4542 7850 6296 4011 8751 4776 7954 7102 5697 2032 5729 5017 6962 2051 1092 9019 2759 8581 8618 912 2382 4892 8447 8176 5491 5695 5504 1060 578 4320 2379 7649 8416 1613 5344 7512 7865 3037 6689 6557 1569 5955 3707 9168 8566 1775 5950 6943 7804 434 6179 1142 7947 6456 6291 5789 6538 9134 3049 5075 5161 1623 948 6302 6063 7516 117 506 3302 7146 355 1081 2827 1496 2574 6167 3183 4287 5482 7319 7277 3860 3443 3298 8364 3826 7254 2360 5093 7039 6325 2567 4443 559 2625 4228 8967 6405 1674 3936 4475 8556 8585 896 3713 6259 4297 6718 2392 2279 4927 1283 2860 7665 663 596 6293 6805 2811 7383 8306 8330 3153 2153 2618 2441 3615 8092 552 5285 8124 9247 5530 8175 6242 5660 3433 1610 1832 3892 3862 640 2127 4196 3495 7217 5206 4836 7759 800 4227 3699 9055 5665 6826 7463 9065 4720 5069 3453 3358 6532 5970 7921 4087 1547 3424 8040 7995 6787 9069 8716 2561 8199 1479 2767 7818 7145 604 7597 4896 9281 4666 185 7978 3059 9221 2135 1800 2974 1529 5948 446 4436 8672 3508 6208 5673 6998 5203 278 7041 9110 5853 8121 1764 3046 6575 4738 2228 7761 9322 7019 6931 6383 6762 283 3935 6785 471 8214 231 3844 5746 2011 7209 336 6433 756 9167 6741 3345 7685 4018 6682 9147 4790 5836 5906 676 3964 6362 3510 7510 2308 1806 5917 3387 5423 8900 147 3780 1696 9111 6783 6497 4104 3987 260 4616 2121 9283 1400 4670 2735 2096 6521 1423 4523 2243 6667 6990 3944 6915 6763 404 2691 1015 7092 7562 8624 2291 5934 5503 2326 2960 842 1963 5568 9050 3806 439 9154 6055 6451 7633 688 4354 8890 2813 2872 8102 6609 1497 8389 6449 1682 3594 5103 5812 863 3054 8079 2260 2027 3091 7687 6703 3557 2019 8427 2799 8182 6641 3168 2284 1934 6507 1658 3811 1774 7897 2238 2943 191 3869 3188 414 8072 7838 1382 4962 5363 4042 1983 4077 7429 4044 1109 1295 386 5481 3927 311 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/got10k_vot_val_split.txt ================================================ 1349 5878 562 2202 8904 1501 8654 2975 2689 3680 5180 1900 7707 4723 8912 4029 3579 869 2888 8657 6599 741 4288 2244 7357 5704 8791 208 4805 8526 4887 8871 7468 3343 886 7794 2646 6454 6101 7885 7744 1297 4119 4856 122 2286 2925 5131 5843 5320 5626 540 1862 7335 699 7760 9198 3259 7345 8698 1280 6479 3100 3988 1322 5737 1268 3257 6791 3326 4815 7644 1082 2826 6821 8984 2553 5290 5909 4762 8096 8066 4325 6666 7193 7114 8060 7872 6788 3544 5460 3507 2509 6626 3429 5542 4220 2968 5271 3863 1868 5581 2012 6270 8038 4050 121 2845 1565 1998 2275 5524 6068 7624 4913 9277 1506 803 8848 5925 2450 2072 8190 4753 9162 825 7303 9028 2088 8516 1556 5937 7847 2367 7549 1049 1521 4739 3931 8958 4130 7876 897 5985 7346 7537 111 3700 1126 7896 3419 1051 5720 1068 3458 146 291 6256 5514 2857 4580 6239 6525 8717 391 4841 6676 4360 4211 73 1675 1987 4025 1321 662 8265 6424 2758 7765 7656 3209 7497 7600 9039 7697 5177 2983 5622 9295 3284 964 2024 1269 4551 8088 5659 2212 5199 5551 8607 5573 5200 7951 8429 7720 5919 1273 3529 6707 9176 7552 3255 5649 6110 1137 9272 788 5786 5186 2667 7630 3953 1828 8827 6471 7815 467 6387 3195 6238 6508 2373 5983 4931 2948 921 2438 517 3949 2137 3216 5683 3695 1719 4837 9159 6981 860 7410 5497 1770 5557 8810 5194 4857 9100 6329 2609 1925 3686 9041 4924 349 9187 3393 3661 7120 6858 4587 3831 3130 5060 6486 8023 824 1354 8861 5534 7292 4389 6029 6226 3505 4326 7445 581 6089 3450 7324 6516 6775 1207 4575 5135 3918 9020 3473 3898 7812 6571 6757 6639 2557 1206 6148 7325 8790 4938 7026 4383 8041 1250 7267 1952 7561 8811 4941 8373 4848 6602 8355 8104 5214 4330 3181 3422 456 1782 3408 6530 719 7587 3058 740 4207 5336 2798 2473 4221 1493 3281 171 9157 9139 7766 3324 5308 3708 2431 8080 2093 2585 406 7040 5064 5247 4758 6512 4257 4935 2705 2572 3436 8513 1385 2637 7091 2761 6007 6694 2422 4917 2186 6898 1390 6965 7698 2002 2692 7365 7373 4091 947 3962 8692 1788 6862 6856 1950 1914 5658 3635 1620 4780 2580 1454 2786 687 7238 3648 6452 1197 3190 5900 9043 4958 1821 1187 1153 7169 7350 5674 6254 3025 6680 1690 2899 3893 1577 5728 9189 5077 3560 2179 5462 1402 3654 1376 5506 1179 5647 4686 8644 1352 2855 6079 2254 2668 2287 2457 3418 7264 677 3074 2655 1042 2210 4504 8309 4209 4280 3258 2977 84 4705 1244 3511 6355 8813 3228 9266 1122 613 732 5202 8425 2638 6470 3541 8132 2063 5129 2818 7949 8090 4465 7295 5239 7009 9271 8563 2832 952 8136 6776 3565 5188 7288 6999 285 5487 7608 8584 2071 7868 2804 3655 6847 3276 4272 3910 1574 4559 7580 5014 8183 6386 7574 356 4937 2487 9315 7572 3040 671 2682 8626 3868 387 8679 4074 1481 3527 3595 4754 2453 1579 4638 9123 1829 3009 3691 763 4875 3572 4273 2777 6032 4793 233 7147 996 3199 8835 3517 7210 6125 6037 3684 3915 3180 7043 4458 2889 57 7667 8375 1434 7493 4733 5827 2111 1313 7986 3075 2614 7547 4977 8527 3212 7300 5842 5244 3291 597 1007 2030 227 3830 5540 247 5643 9333 1958 1371 5220 7926 2927 1516 7130 193 1522 6165 6923 3794 4223 5535 2472 8630 3971 9101 2946 4609 7291 8542 6501 7548 4557 6274 5226 7309 1317 6275 1099 4191 7270 5392 2316 3819 1670 8045 4807 8864 2391 5908 8338 8218 6400 9193 3165 843 6613 6941 5629 7557 4321 3702 681 1159 4665 5959 1697 5509 8774 7389 3832 3751 8637 1680 6841 703 684 8293 3682 5733 4818 3231 5562 9001 3889 7024 2519 1713 3287 219 8776 2289 7212 4832 4684 4617 4237 2649 8185 6326 3568 551 1426 8869 312 2905 4165 8248 2558 900 1044 8613 7743 5437 7604 3122 5708 8649 2878 4695 4491 7533 5223 7711 1844 5751 3008 8055 4636 61 198 2271 5698 4596 4500 5709 5819 7972 2992 1643 1048 6281 8886 360 4198 6814 3960 2606 7001 5888 450 7133 7015 7034 5153 8920 5066 469 1302 8816 463 8651 5869 6582 5578 1231 9274 7260 7751 8052 6799 2089 2342 8451 3260 5550 7795 2288 1205 40 496 8367 7836 5973 3908 5242 5062 2706 997 5419 9201 1965 6062 3050 5302 8735 358 2398 7470 1644 8179 7047 1549 5414 2539 7381 589 8166 8505 6035 3956 4540 6721 8074 1062 2384 2531 7159 3902 4584 2554 264 8720 2849 4916 5218 7202 883 4560 1677 4317 7863 4509 6577 2903 1452 1416 5369 473 6233 6359 5992 4934 8059 6834 4907 3320 8267 8280 2066 2402 1485 3772 3732 4764 9126 3575 5564 5641 1884 2330 1804 344 698 3089 1532 4454 761 8094 3432 6811 8722 8826 3222 8614 2901 7003 652 8663 4266 413 810 75 3334 4905 6438 4756 5137 6528 6534 6988 6177 8533 889 5384 7201 5132 7802 6864 3973 873 4840 1482 8376 3769 5858 6675 4286 2593 5863 4353 7817 7540 4999 4838 2303 7913 1508 7755 2784 4964 3431 6209 3755 6399 3954 455 5416 7591 245 140 9210 4084 967 7798 6795 7095 6733 3861 9264 1045 755 8042 7074 7778 6415 4724 6450 2049 1307 3485 1790 7869 3282 6907 3920 2868 5801 5632 5009 3955 7517 5128 3417 3019 1784 2312 2753 6976 342 8266 1849 2273 5037 7880 3793 7401 5412 8279 1257 3670 9049 3266 8955 6519 8916 2858 694 5650 4669 1785 3533 2704 8603 3726 6668 497 6815 6157 6646 6964 8097 5645 8481 8215 3775 2542 7514 5699 3518 3740 1404 8981 4086 6397 4204 6899 682 6589 4340 7424 9208 6504 4409 1 145 1882 4620 2634 4992 5453 3377 7875 530 1235 7605 504 1771 8489 345 7353 7797 7174 5914 2871 5721 6067 3582 5467 6234 691 8758 2122 1213 1492 1437 2187 1266 2395 7278 8491 5256 1554 8163 5966 7128 7904 1691 6272 3996 1706 1334 1316 6478 6935 1518 6700 8703 8744 8152 8778 5367 4218 9007 6312 606 7565 5293 2891 675 2120 826 7008 5705 7748 8010 1498 5330 5472 2215 7627 3016 6588 1850 4128 8569 6987 148 8151 8789 7907 8596 715 9060 3872 1750 5889 4047 5960 3120 3449 1421 1102 3333 9197 8796 8123 8007 2028 8404 1945 1985 8109 5380 3504 6739 4180 5835 4243 25 4002 1976 158 5181 4885 8985 11 6425 5926 7062 5083 8394 4259 5844 1990 3942 5532 2220 28 5957 149 6748 3559 7647 2566 1359 5259 7010 554 6005 8172 8125 1350 9051 1973 1386 159 7007 3220 1846 3093 4445 2056 8370 3211 4384 2231 273 642 5311 265 226 9012 7879 118 7109 7251 1760 8667 2876 7162 3552 6901 6779 5021 6524 4957 3114 4544 441 1848 2136 2458 8662 1127 5541 3026 1080 6780 2224 8259 1073 9000 7244 7977 500 4435 7376 7979 1435 9291 7704 3521 210 6269 8570 3285 8039 3546 6203 1183 6107 4147 2234 7185 3192 7155 2001 7777 876 944 908 7791 6784 65 9172 5675 3886 7891 2978 1008 5630 591 5067 1139 577 9015 574 8137 7786 5765 4900 4090 7842 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/lasot_train_split.txt ================================================ airplane-10 airplane-11 airplane-12 airplane-14 airplane-16 airplane-17 airplane-18 airplane-19 airplane-2 airplane-20 airplane-3 airplane-4 airplane-5 airplane-6 airplane-7 airplane-8 basketball-10 basketball-12 basketball-13 basketball-14 basketball-15 basketball-16 basketball-17 basketball-18 basketball-19 basketball-2 basketball-20 basketball-3 basketball-4 basketball-5 basketball-8 basketball-9 bear-1 bear-10 bear-11 bear-12 bear-13 bear-14 bear-15 bear-16 bear-18 bear-19 bear-20 bear-3 bear-5 bear-7 bear-8 bear-9 bicycle-1 bicycle-10 bicycle-11 bicycle-12 bicycle-13 bicycle-14 bicycle-15 bicycle-16 bicycle-17 bicycle-19 bicycle-20 bicycle-3 bicycle-4 bicycle-5 bicycle-6 bicycle-8 bird-1 bird-10 bird-11 bird-12 bird-13 bird-14 bird-16 bird-18 bird-19 bird-20 bird-4 bird-5 bird-6 bird-7 bird-8 bird-9 boat-1 boat-10 boat-11 boat-13 boat-14 boat-15 boat-16 boat-18 boat-19 boat-2 boat-20 boat-5 boat-6 boat-7 boat-8 boat-9 book-1 book-12 book-13 book-14 book-15 book-16 book-17 book-18 book-2 book-20 book-4 book-5 book-6 book-7 book-8 book-9 bottle-10 bottle-11 bottle-13 bottle-15 bottle-16 bottle-17 bottle-19 bottle-2 bottle-20 bottle-3 bottle-4 bottle-5 bottle-6 bottle-7 bottle-8 bottle-9 bus-1 bus-10 bus-11 bus-12 bus-13 bus-14 bus-15 bus-16 bus-18 bus-20 bus-3 bus-4 bus-6 bus-7 bus-8 bus-9 car-1 car-10 car-11 car-12 car-13 car-14 car-15 car-16 car-18 car-19 car-20 car-3 car-4 car-5 car-7 car-8 cat-10 cat-11 cat-12 cat-13 cat-14 cat-15 cat-16 cat-17 cat-19 cat-2 cat-4 cat-5 cat-6 cat-7 cat-8 cat-9 cattle-1 cattle-10 cattle-11 cattle-14 cattle-15 cattle-16 cattle-17 cattle-18 cattle-19 cattle-20 cattle-3 cattle-4 cattle-5 cattle-6 cattle-8 cattle-9 chameleon-1 chameleon-10 chameleon-12 chameleon-13 chameleon-14 chameleon-15 chameleon-16 chameleon-17 chameleon-18 chameleon-19 chameleon-2 chameleon-4 chameleon-5 chameleon-7 chameleon-8 chameleon-9 coin-1 coin-10 coin-11 coin-12 coin-13 coin-14 coin-15 coin-16 coin-17 coin-19 coin-2 coin-20 coin-4 coin-5 coin-8 coin-9 crab-1 crab-10 crab-11 crab-13 crab-14 crab-15 crab-16 crab-17 crab-19 crab-2 crab-20 crab-4 crab-5 crab-7 crab-8 crab-9 crocodile-1 crocodile-11 crocodile-12 crocodile-13 crocodile-15 crocodile-16 crocodile-17 crocodile-18 crocodile-19 crocodile-2 crocodile-20 crocodile-5 crocodile-6 crocodile-7 crocodile-8 crocodile-9 cup-10 cup-11 cup-12 cup-13 cup-14 cup-15 cup-16 cup-18 cup-19 cup-2 cup-20 cup-3 cup-5 cup-6 cup-8 cup-9 deer-1 deer-11 deer-12 deer-13 deer-15 deer-16 deer-17 deer-18 deer-19 deer-2 deer-20 deer-3 deer-5 deer-6 deer-7 deer-9 dog-10 dog-11 dog-12 dog-13 dog-14 dog-16 dog-17 dog-18 dog-2 dog-20 dog-3 dog-4 dog-5 dog-6 dog-8 dog-9 drone-1 drone-10 drone-11 drone-12 drone-14 drone-16 drone-17 drone-18 drone-19 drone-20 drone-3 drone-4 drone-5 drone-6 drone-8 drone-9 electricfan-11 electricfan-12 electricfan-13 electricfan-14 electricfan-15 electricfan-16 electricfan-17 electricfan-19 electricfan-2 electricfan-3 electricfan-4 electricfan-5 electricfan-6 electricfan-7 electricfan-8 electricfan-9 elephant-10 elephant-11 elephant-13 elephant-14 elephant-15 elephant-17 elephant-19 elephant-2 elephant-20 elephant-3 elephant-4 elephant-5 elephant-6 elephant-7 elephant-8 elephant-9 flag-1 flag-10 flag-11 flag-12 flag-13 flag-14 flag-15 flag-16 flag-17 flag-18 flag-19 flag-20 flag-4 flag-6 flag-7 flag-8 fox-1 fox-10 fox-11 fox-12 fox-13 fox-14 fox-15 fox-16 fox-17 fox-18 fox-19 fox-4 fox-6 fox-7 fox-8 fox-9 frog-1 frog-10 frog-11 frog-12 frog-13 frog-14 frog-15 frog-16 frog-17 frog-18 frog-19 frog-2 frog-5 frog-6 frog-7 frog-8 gametarget-10 gametarget-11 gametarget-12 gametarget-14 gametarget-15 gametarget-16 gametarget-17 gametarget-18 gametarget-19 gametarget-20 gametarget-3 gametarget-4 gametarget-5 gametarget-6 gametarget-8 gametarget-9 gecko-10 gecko-11 gecko-12 gecko-13 gecko-14 gecko-15 gecko-17 gecko-18 gecko-2 gecko-20 gecko-3 gecko-4 gecko-6 gecko-7 gecko-8 gecko-9 giraffe-1 giraffe-11 giraffe-12 giraffe-14 giraffe-16 giraffe-17 giraffe-18 giraffe-19 giraffe-20 giraffe-3 giraffe-4 giraffe-5 giraffe-6 giraffe-7 giraffe-8 giraffe-9 goldfish-1 goldfish-11 goldfish-12 goldfish-13 goldfish-14 goldfish-15 goldfish-16 goldfish-17 goldfish-18 goldfish-19 goldfish-2 goldfish-20 goldfish-4 goldfish-5 goldfish-6 goldfish-9 gorilla-1 gorilla-10 gorilla-11 gorilla-12 gorilla-14 gorilla-15 gorilla-16 gorilla-17 gorilla-18 gorilla-19 gorilla-2 gorilla-20 gorilla-3 gorilla-5 gorilla-7 gorilla-8 guitar-1 guitar-11 guitar-12 guitar-13 guitar-14 guitar-15 guitar-17 guitar-18 guitar-19 guitar-2 guitar-20 guitar-4 guitar-5 guitar-6 guitar-7 guitar-9 hand-1 hand-10 hand-11 hand-12 hand-13 hand-14 hand-15 hand-17 hand-18 hand-19 hand-20 hand-4 hand-5 hand-6 hand-7 hand-8 hat-10 hat-11 hat-12 hat-13 hat-14 hat-15 hat-16 hat-17 hat-19 hat-20 hat-3 hat-4 hat-6 hat-7 hat-8 hat-9 helmet-1 helmet-10 helmet-12 helmet-14 helmet-15 helmet-16 helmet-17 helmet-18 helmet-2 helmet-20 helmet-3 helmet-4 helmet-6 helmet-7 helmet-8 helmet-9 hippo-10 hippo-11 hippo-12 hippo-13 hippo-14 hippo-15 hippo-16 hippo-17 hippo-18 hippo-19 hippo-2 hippo-3 hippo-4 hippo-5 hippo-6 hippo-8 horse-10 horse-11 horse-13 horse-14 horse-16 horse-17 horse-18 horse-19 horse-2 horse-20 horse-3 horse-5 horse-6 horse-7 horse-8 horse-9 kangaroo-1 kangaroo-10 kangaroo-12 kangaroo-13 kangaroo-15 kangaroo-16 kangaroo-17 kangaroo-18 kangaroo-19 kangaroo-20 kangaroo-3 kangaroo-4 kangaroo-6 kangaroo-7 kangaroo-8 kangaroo-9 kite-1 kite-11 kite-12 kite-13 kite-14 kite-16 kite-17 kite-18 kite-19 kite-2 kite-20 kite-3 kite-5 kite-7 kite-8 kite-9 leopard-10 leopard-11 leopard-12 leopard-13 leopard-14 leopard-15 leopard-17 leopard-18 leopard-19 leopard-2 leopard-3 leopard-4 leopard-5 leopard-6 leopard-8 leopard-9 licenseplate-1 licenseplate-10 licenseplate-11 licenseplate-14 licenseplate-16 licenseplate-17 licenseplate-18 licenseplate-19 licenseplate-2 licenseplate-20 licenseplate-3 licenseplate-4 licenseplate-5 licenseplate-7 licenseplate-8 licenseplate-9 lion-10 lion-11 lion-13 lion-14 lion-15 lion-16 lion-17 lion-18 lion-19 lion-2 lion-3 lion-4 lion-6 lion-7 lion-8 lion-9 lizard-10 lizard-11 lizard-12 lizard-14 lizard-15 lizard-16 lizard-17 lizard-18 lizard-19 lizard-2 lizard-20 lizard-4 lizard-5 lizard-7 lizard-8 lizard-9 microphone-1 microphone-10 microphone-11 microphone-12 microphone-13 microphone-15 microphone-17 microphone-18 microphone-19 microphone-20 microphone-3 microphone-4 microphone-5 microphone-7 microphone-8 microphone-9 monkey-1 monkey-10 monkey-11 monkey-12 monkey-13 monkey-14 monkey-15 monkey-16 monkey-18 monkey-19 monkey-2 monkey-20 monkey-5 monkey-6 monkey-7 monkey-8 motorcycle-10 motorcycle-11 motorcycle-12 motorcycle-13 motorcycle-14 motorcycle-15 motorcycle-16 motorcycle-17 motorcycle-19 motorcycle-2 motorcycle-20 motorcycle-4 motorcycle-5 motorcycle-6 motorcycle-7 motorcycle-8 mouse-10 mouse-11 mouse-12 mouse-13 mouse-14 mouse-15 mouse-16 mouse-18 mouse-19 mouse-2 mouse-20 mouse-3 mouse-4 mouse-5 mouse-6 mouse-7 person-11 person-13 person-14 person-15 person-16 person-17 person-18 person-19 person-2 person-20 person-3 person-4 person-6 person-7 person-8 person-9 pig-1 pig-11 pig-12 pig-14 pig-15 pig-16 pig-17 pig-19 pig-20 pig-3 pig-4 pig-5 pig-6 pig-7 pig-8 pig-9 pool-1 pool-10 pool-11 pool-13 pool-14 pool-16 pool-17 pool-18 pool-19 pool-2 pool-20 pool-4 pool-5 pool-6 pool-8 pool-9 rabbit-1 rabbit-11 rabbit-12 rabbit-14 rabbit-15 rabbit-16 rabbit-18 rabbit-2 rabbit-20 rabbit-3 rabbit-4 rabbit-5 rabbit-6 rabbit-7 rabbit-8 rabbit-9 racing-1 racing-11 racing-12 racing-13 racing-14 racing-17 racing-18 racing-19 racing-2 racing-3 racing-4 racing-5 racing-6 racing-7 racing-8 racing-9 robot-10 robot-11 robot-12 robot-13 robot-14 robot-15 robot-16 robot-17 robot-18 robot-2 robot-20 robot-3 robot-4 robot-6 robot-7 robot-9 rubicCube-10 rubicCube-11 rubicCube-12 rubicCube-13 rubicCube-15 rubicCube-16 rubicCube-17 rubicCube-18 rubicCube-2 rubicCube-20 rubicCube-3 rubicCube-4 rubicCube-5 rubicCube-7 rubicCube-8 rubicCube-9 sepia-1 sepia-10 sepia-11 sepia-12 sepia-14 sepia-15 sepia-17 sepia-18 sepia-19 sepia-2 sepia-20 sepia-3 sepia-4 sepia-5 sepia-7 sepia-9 shark-1 shark-10 shark-11 shark-12 shark-13 shark-14 shark-15 shark-16 shark-17 shark-18 shark-19 shark-20 shark-4 shark-7 shark-8 shark-9 sheep-1 sheep-10 sheep-11 sheep-12 sheep-13 sheep-14 sheep-15 sheep-16 sheep-17 sheep-18 sheep-19 sheep-2 sheep-20 sheep-4 sheep-6 sheep-8 skateboard-1 skateboard-10 skateboard-11 skateboard-12 skateboard-13 skateboard-14 skateboard-15 skateboard-17 skateboard-18 skateboard-2 skateboard-20 skateboard-4 skateboard-5 skateboard-6 skateboard-7 skateboard-9 spider-1 spider-10 spider-11 spider-12 spider-13 spider-15 spider-17 spider-19 spider-2 spider-3 spider-4 spider-5 spider-6 spider-7 spider-8 spider-9 squirrel-1 squirrel-10 squirrel-12 squirrel-14 squirrel-15 squirrel-16 squirrel-17 squirrel-18 squirrel-2 squirrel-20 squirrel-3 squirrel-4 squirrel-5 squirrel-6 squirrel-7 squirrel-9 surfboard-1 surfboard-10 surfboard-11 surfboard-13 surfboard-14 surfboard-15 surfboard-16 surfboard-17 surfboard-18 surfboard-19 surfboard-2 surfboard-20 surfboard-3 surfboard-6 surfboard-7 surfboard-9 swing-1 swing-11 swing-12 swing-13 swing-15 swing-16 swing-18 swing-19 swing-2 swing-3 swing-4 swing-5 swing-6 swing-7 swing-8 swing-9 tank-1 tank-10 tank-11 tank-12 tank-13 tank-15 tank-17 tank-18 tank-19 tank-2 tank-20 tank-3 tank-4 tank-5 tank-7 tank-8 tiger-1 tiger-10 tiger-11 tiger-13 tiger-14 tiger-15 tiger-16 tiger-17 tiger-19 tiger-2 tiger-20 tiger-3 tiger-5 tiger-7 tiger-8 tiger-9 train-10 train-12 train-13 train-14 train-15 train-16 train-17 train-18 train-19 train-2 train-3 train-4 train-5 train-6 train-8 train-9 truck-1 truck-10 truck-11 truck-12 truck-13 truck-14 truck-15 truck-17 truck-18 truck-19 truck-2 truck-20 truck-4 truck-5 truck-8 truck-9 turtle-1 turtle-10 turtle-11 turtle-12 turtle-13 turtle-14 turtle-15 turtle-17 turtle-18 turtle-19 turtle-2 turtle-20 turtle-3 turtle-4 turtle-6 turtle-7 umbrella-1 umbrella-10 umbrella-11 umbrella-12 umbrella-13 umbrella-14 umbrella-15 umbrella-16 umbrella-18 umbrella-20 umbrella-3 umbrella-4 umbrella-5 umbrella-6 umbrella-7 umbrella-8 volleyball-10 volleyball-11 volleyball-12 volleyball-14 volleyball-15 volleyball-16 volleyball-17 volleyball-2 volleyball-20 volleyball-3 volleyball-4 volleyball-5 volleyball-6 volleyball-7 volleyball-8 volleyball-9 yoyo-1 yoyo-10 yoyo-11 yoyo-12 yoyo-13 yoyo-14 yoyo-16 yoyo-18 yoyo-2 yoyo-20 yoyo-3 yoyo-4 yoyo-5 yoyo-6 yoyo-8 yoyo-9 zebra-1 zebra-11 zebra-12 zebra-13 zebra-15 zebra-18 zebra-19 zebra-2 zebra-20 zebra-3 zebra-4 zebra-5 zebra-6 zebra-7 zebra-8 zebra-9 ================================================ FILE: artrackv2_mindspore/lib/train/data_specs/trackingnet_classmap.txt ================================================ Nf1aqv5Fg5o_0 airplane AAB6lO-XiKE_0 person AACM71csS-Q_0 person AACM71csS-Q_1 person AARNQeeGCeM_1 person AARldOxX9Qc_0 bird AATSbTthMRo_1 person AAVQ--F7Bk8_7 bird AAVQ--F7Bk8_2 bird AAVQ--F7Bk8_8 bird AAWK6esRYaE_0 person AAWK6esRYaE_1 person AAjY2Ci68z8_0 person AA19zjGEPvg_1 bear AA28Bcp5cJ4_0 train ABBGULxaufw_0 person ABF8Qzi1y6k_1 bear ABIlEiPfEC4_0 bird ABJ_agLToOw_0 bird ABZMoeeFyek_0 bicycle ABny-jw1_S0_0 elephant ABrhnT3LRWs_2 cat ABxlnMGfo5c_0 umbrella AByCCGnybVU_1 person AB2MjrpRiEQ_0 horse AB-q-hxh9XQ_4 bus AB-q-hxh9XQ_1 bus AB-q-hxh9XQ_3 bus ACDuy9fWQCs_1 umbrella ACFxVnoXE2k_1 horse ACMvGMt8Neo_0 person ACM6PJWHfcM_0 person ACOGOPL4ZH0_1 person ACOGOPL4ZH0_0 person ACS5TtaAdG8_0 truck ACarEC5tuT8_0 truck ACiNZsAvVTE_0 person ACkYaVC9f9M_1 umbrella ACnQKLobnGE_4 airplane ACnQKLobnGE_5 airplane AC0Z4yw1hf0_0 person AC0Z4yw1hf0_1 person AC-10OYYnLM_1 person AC-10OYYnLM_0 person ADHNPU5iB_4_0 cat ADWpC6kDWFU_0 person ADiIG2D8pds_2 motorcycle ADiIG2D8pds_0 motorcycle ADi674XOuRY_0 dog ADn8ZdVYOcc_0 train ADn8ZdVYOcc_2 train AD1cVG81mpA_0 person AD4EACfWAIM_0 horse AD4EACfWAIM_1 horse AD531xkux4k_0 person AD7A6_o0Las_0 horse AEQT6XxEeT0_0 person AEQT6XxEeT0_1 person AESfphazWKA_0 person AESfphazWKA_1 person AEokTVMPd4A_0 person AEtwwIR9UkI_0 dog AE2TrzJHr2s_1 motorcycle AE3t_VNk3eo_0 person AE6G6W2CL9M_1 person AE7tEK8S9pk_0 bird AE7tEK8S9pk_3 bird AE-k9jcdaJk_1 giraffe AFLrK88FzTI_0 motorcycle AFOjy-9Kf-8_0 person AFSTw_O6inE_0 person AFSTw_O6inE_1 person AFT64SYoPTo_1 person AFeRUltwvNE_0 knife AFeRUltwvNE_2 knife AFf9I30fB6U_0 person AFkSCsJ_jeg_0 person AFkSCsJ_jeg_1 person AFnPp9mvoJs_0 horse AFpVfranYCA_1 knife AFrLubifeb4_0 airplane AFrLubifeb4_2 airplane AFsmSsZBS6I_1 person AFsmSsZBS6I_0 person AF0FDnfdpro_0 train AF0-2lDeBME_1 bird AF2bYjH_Q8c_0 person AF4nO1MeUis_1 train AGV9gZ6ePKk_0 airplane AGXVFK896Os_0 cow AGYehDNUqx0_1 airplane AGYehDNUqx0_0 airplane AGdqwMVGRoU_0 horse AGfcGfMXHPM_3 elephant AGsg2IV8FME_1 skateboard ZBPURFcpqDM_0 motorcycle ZBXAMWkamQk_2 knife ZBXAMWkamQk_1 knife ZBcCcSynS3Y_1 car ZBcTSnaCcqE_1 person ZBcTSnaCcqE_0 person ZBcjhADZaUk_0 bear ZBdz7fg01uE_0 umbrella ZBp5ICCzoK8_0 person ZBriZpPQR6Q_0 cat ZBvEIHeKcKg_2 zebra ZBvEIHeKcKg_9 zebra ZBvEIHeKcKg_0 zebra ZBvEIHeKcKg_1 zebra ZBvEIHeKcKg_3 zebra ZBvEIHeKcKg_4 zebra ZBvEIHeKcKg_5 zebra ZBvEIHeKcKg_6 zebra ZBvEIHeKcKg_7 zebra ZBvEIHeKcKg_8 zebra ZB0EfmbWfng_0 horse ZB0kV8Ni0e8_0 person ZB_pe6v1lVI_0 person ZB_pe6v1lVI_2 person ZCAOpABRfTI_10 elephant ZCAOpABRfTI_0 elephant ZCAOpABRfTI_3 elephant ZCAOpABRfTI_4 elephant ZCAOpABRfTI_6 elephant ZCAOpABRfTI_7 elephant ZCAOpABRfTI_8 elephant ZCFCltdIjeg_1 person ZCFCltdIjeg_0 person ZCGB4r_lWmY_0 horse ZCS_eyAufDo_0 person ZCTwXcewINc_0 cow ZCfqT4CDOYA_1 bird ZCgDbEHLsIg_0 person ZClABNZVqqw_1 person ZCmoG6WgVO4_1 person ZCmoG6WgVO4_0 person ZCnJ6weWtz8_1 person ZCnJ6weWtz8_0 person ZCnJ6weWtz8_2 person ZCzrSOZhkx8_1 person ZCzrSOZhkx8_2 person ZC3Y42jSG_0_0 person ZC5Jtr93Fc0_0 cat ZDDtjYsFrzY_0 motorcycle ZDMLHna_uZU_1 skateboard ZDMSLfnIpw0_0 person ZDS-TQTDheA_0 person ZDWUEeCoa0c_0 person ZDfRsMjEWrU_0 person ZDucdx9SldA_0 bicycle ZDwG7VWIZ2E_0 motorcycle ZDw-tgE8yQw_0 person ZEA5lDwY3hY_0 person ZERPmLuCNr0_1 skateboard ZEYyXBrvcIU_0 person ZEbxfeAOLec_1 motorcycle ZEdGptkowmk_2 cow ZEdsROg2ZAk_2 horse ZEgcTqeZxOk_1 person ZEiW5hvCQyM_0 bird ZE16Mis16oE_0 bus ZE3Vro7d4pA_0 cat ZE415SbIjYI_7 bird ZE5h8vmL_Vw_0 boat ZE6oeN8ZzDA_1 person ZE6oeN8ZzDA_0 person ZFKQ9r76HHU_1 elephant ZFKYTz9Jkhw_0 umbrella ZFSspVdQ_1M_0 person ZFSspVdQ_1M_1 person ZFe5vGzmYgY_0 bear ZFe5vGzmYgY_4 bear ZFfH8M8dMH8_5 bird ZFk9b7tQz1g_0 person ZFn422HSENU_2 airplane ZFw7fJO3h3U_0 motorcycle ZF2yE0Tm8D0_0 cow ZF5yV-qvHfg_0 bicycle ZF8rySXBivY_0 person ZF_u1UFqAvg_0 person ZGHtP6pLosk_0 person ZGT9Ky1jJ0E_0 horse ZGWqLNy2PDM_2 bird ZGeWYNFOH7U_0 person ZGhdqsb3kNA_0 car ZGhdqsb3kNA_3 car ZGhdqsb3kNA_1 car ZGkmBkelEBU_0 person ZGpMZT1HUiw_0 horse ZGsHiz0oPuw_0 bus ZGvfU-Fgk40_1 person ZGyWFwMmdbs_0 person ZG9dVnPGocw_0 person ZHDkDNgRSz0_0 train ZHFPykjdFAY_1 person ZHPeB20mRyI_0 cow ZHPeB20mRyI_1 cow ZHX1xXuU_Jw_0 person ZHlb-NoDPiE_1 elephant ZHlb-NoDPiE_2 elephant ZHlb-NoDPiE_4 elephant ZHl7b8RItn0_0 horse ZHnW6ge8wBc_0 cat ZHodaPFcFYU_0 person ZHovXJVH8xk_0 truck ZHpZ3CGHl44_0 person ZHrrW673jzQ_1 person ZHrrW673jzQ_0 person ZHrsTuxP7aI_1 horse ZHu6CNOlw3g_0 cow ZHu6CNOlw3g_1 cow ZHxx4jT0QY8_0 person ZH1tP4KBq4c_0 giraffe ZH5HXdNA_Vg_0 person ZH-X6nu5grI_33 horse ZH-X6nu5grI_2 horse ZH-X6nu5grI_3 horse ZH-X6nu5grI_6 horse ZH-X6nu5grI_7 horse ZH-X6nu5grI_8 horse ZH_6GNzE7AE_0 person ZIAnd6kIMac_0 bird ZIAnd6kIMac_1 bird ZICz-o8kLz0_0 skateboard AGx9YQ6C-6o_7 car AG1KXUn4YG0_0 person AG_bCNeWGbQ_0 elephant AHARpIfT490_0 dog AHIF--VOeQs_0 person AHJcPNPqKmI_0 horse AHKFqtjfRZA_2 bear AHLL47_EdEA_1 person AHLL47_EdEA_0 person AHNC2jifaeA_1 airplane AHQLEaBATbw_0 person AHQW1ru8IzY_0 airplane AHQrFFp5yq4_0 airplane AHiwgwMi8HU_0 dog AHjEWaIP4Us_0 cow AHkvSb7kMDQ_0 person AHn7KxEbpSw_0 person AHvhccaU6e0_0 bus AHx-m9m2WSM_0 person AIAtwCnT8D0_1 person AIBVp_3pm4U_1 person AIBVp_3pm4U_0 person AIFwUvUUIAU_1 person AIPKb-NMVjk_0 airplane AIPKb-NMVjk_3 airplane AIVpT8BRXaQ_1 horse AIYDjtWzamM_0 bear AIYDjtWzamM_1 bear AIZGolX95Do_0 person AIbvvs9Mppk_0 person AIduTWoo-tY_0 skateboard AIeFzUH7L38_1 train AIkHZuaZGZc_1 elephant AIkHZuaZGZc_2 elephant AIpwAHaTBsI_0 train AI00Hva5A8g_0 person AI38cuNcfsE_0 knife AI73dwp8OlI_1 train AJAy74dPvNA_0 person AJCXZxF7mEU_1 skateboard AJDMiWpRbdY_0 person AJILdTCo1mA_0 dog AJKXpUsj3I0_0 bird AJRdbCnFyVo_0 elephant AJTfeXepoNQ_0 bus AJZ65x_ashE_0 airplane AJaOK6nLWLU_0 person AJaOK6nLWLU_1 person AJaOK6nLWLU_2 person AJh6EhObuEU_0 person AJiQZJH_ZsU_0 bird AJiYw7-oCvA_1 knife AJiYw7-oCvA_2 knife AJiYw7-oCvA_0 knife AJkWw2b2Qjg_0 horse AJor90pfjM8_0 cow AJtuQLfNvSs_0 cat AKBoEjrtQwE_1 train AKDi2KVrR1Q_0 skateboard AKIcyYzL9C0_0 cat AKMl62ZFICw_3 bus AKMl62ZFICw_1 bus AKN6nvHB7P0_2 airplane AKN6nvHB7P0_3 airplane AKPDvaUNx94_1 horse AKPDvaUNx94_2 horse AKVUSpeg9Jk_0 knife AKxpzCrmsi8_0 bus AK4AJfDZfEo_0 cat AK64udGI1BA_0 umbrella AK8imx-InYk_1 horse AK8imx-InYk_2 horse AK_J57sNeeo_1 elephant AK_0-KHw9wc_1 horse ALCj6V-0pU8_0 person ALKBlOms7sk_0 truck ALLYkPepYRc_0 train ALRR_HHP500_0 person ALRzJ2FzEoY_0 person ALYKJChPG6k_0 knife ALjxXEqJFTg_0 train ALpnjTPWIN4_0 bird AL73oE_aovA_2 bicycle AL73oE_aovA_3 bicycle AMDjY36EpsU_0 truck AMEZhZVe7hk_0 person AMEZhZVe7hk_1 person AMI4Xu1mmNw_0 elephant AMZeyszxY78_0 knife AMn7aithVV8_0 car AMz8PhUkmpM_0 horse AMz8PhUkmpM_3 horse AMz8PhUkmpM_7 horse AMz8PhUkmpM_2 horse AMz8PhUkmpM_5 horse AM5_HQ705r4_1 giraffe AM6sweCILPU_0 airplane ANHdxFi36CM_1 bird ANNbcEcj8Do_0 person ANQZ1MB6gI4_0 skateboard ANVkluf6XZA_0 cat ANWtZTJoYYc_0 dog ANZDRJnX_Os_0 person ANlhuKqnObE_1 person ANlhuKqnObE_0 person ANmJ_3l01rw_2 horse ANmJ_3l01rw_3 horse ANmkxc2V7qQ_0 person ANufFQ7Fqao_0 car ANufFQ7Fqao_1 car ANvWNG7bZj0_0 person ANwXehjlmOU_0 giraffe ANwXehjlmOU_2 giraffe ANwXehjlmOU_6 giraffe ANwXehjlmOU_7 giraffe AOFbvqQZz1M_0 person AOJiO3o1Pgw_0 person AONi1Rhl0VI_2 person AONi1Rhl0VI_1 person AOmvm3OOZZQ_0 person AOn9I3GEHoU_0 person AOo1qXfZWsc_0 bus AOq0zSQhX1E_0 person AOq0zSQhX1E_1 person AO9zthhr-og_0 person AO9zthhr-og_1 person APAgxsDsZqs_0 person APCppiM1SL4_0 person APEd6F66jXU_1 airplane APHhGoshqFo_0 umbrella APIrIPchQwg_1 person APIrIPchQwg_0 person APJ4_CEV8HQ_0 bus APLJsXaOe1c_0 person APQ99QCF6pA_0 person APRuUBgcBZc_1 person APYAGnOjUQQ_0 person APa_Xoa9qgg_1 motorcycle APcliMIvBe4_2 person APcliMIvBe4_0 person APcliMIvBe4_1 person APp-0CsKxpY_1 person APp-0CsKxpY_0 person APqdtMhtWlU_0 motorcycle APtqUIS_Hyo_0 person APwqoNNZyaA_0 person APyVeEcEt1U_0 airplane APyxRCm1XlY_0 person AP5QrGcnGoU_0 cow AP_vNEBzhqM_0 person AQALHMjkeh0_1 giraffe AQKHDJ9HKck_0 dog AQNEkyvgbeA_1 cow AQRKvHpsUk8_0 person AQTk87BXkxk_0 person AQVhyDD8GEk_0 person AQVthZjIETQ_0 truck AQcg3TVkW1s_0 person AQcg3TVkW1s_1 person AQi0YSJ74cw_0 person AQj3enGQQeE_0 boat AQminPRA2W8_0 person AQtIgG8RHRY_0 person AQvltP0EarU_0 person AQy7gL42wfo_0 airplane AQzJp7Qi_yA_2 elephant AQzJp7Qi_yA_13 elephant AQ2bfY90nuU_0 person AQ7YDkmwB4M_0 dog ARAX6-JmsNQ_0 zebra ARAX6-JmsNQ_2 zebra ARFd2qxDhpQ_0 airplane ARNkmINZamQ_0 cow ARNkmINZamQ_1 cow AROrQJq2sWY_0 person ARRADkl3-30_0 person ARW5DipSrBo_0 dog ARmfFWE2ruc_0 person ARmsnBnMyPc_0 person ARnGZQm8zOM_0 truck ARqQUEVhu24_0 person ARrbFDLoy0Q_1 person ARtGNhHj2NU_0 cat ARyGQdkbuyM_0 person ARyGQdkbuyM_1 person ASBgE1svBKQ_0 person ASD516fNs3g_0 person ASExrIzixaM_0 truck ASc0m6oxXVI_0 person ASc0m6oxXVI_1 person ASm_mkHCybA_0 cat AS1xCm7MYs8_0 person AS1xCm7MYs8_1 person AS2tsNB9LBI_1 knife AS5hg_3pOXM_0 person AS9kBpj7qvE_0 person ATKytgCulZM_0 umbrella ATakdxmz3qU_0 car ATkJNKtd8yo_0 person ATk9e0fbxBk_0 horse ATk9e0fbxBk_1 horse ATk9e0fbxBk_2 horse AT1zSxV6stw_0 cat AT5urL0Fr0c_0 bird AUGQ4XFEkGY_3 knife AUI-RsDtk4s_0 person AUMHV6JiwU0_0 bird AUZevw68t_s_0 bear AUcOQ1L4Nj0_0 train AUfaVvy5QxU_0 train AUguk_8JO_U_0 skateboard AUgw-t2MrtU_0 person AUzge-cBHfM_0 bear AU0RtWdAXcU_0 person AU114x-Qif4_0 person AU3mKa0Npq4_0 person AU8GXMxyP9U_0 person AVHVVt5Srow_0 bear ZIGThAlQuUU_1 truck ZIGkCx4o3G0_0 person ZIMLdoIIFbg_0 person ZIWkcVTlaRU_1 person ZIamYwe-hJ8_0 car ZIawXDt6JH4_0 cat ZIlyoSrDQQ8_0 person ZImLYekhFBQ_3 bus ZI6J2WSiZy0_0 giraffe ZI7DX2OSzzQ_0 airplane ZJCSQFa1W3M_0 person ZJDAzZZQ38k_1 knife ZJDAzZZQ38k_0 knife ZJEQHkA9NLw_1 truck ZJHeFXEtwNE_0 knife ZJJoit687Tc_0 person ZJJpIPciUts_2 skateboard ZJL9WONxDB8_0 person ZJMJBrWq8-o_0 person ZJOVhmSGVMM_0 person ZJXuyIEaSc4_0 horse ZJYXcUOxNRc_1 person ZJdKrkzHR94_0 person ZJdKrkzHR94_1 motorcycle ZJe2QoJwNa0_0 horse ZJimYyH6VUI_0 car ZJoQRLyRs8o_0 person ZJpozi2Piqc_0 motorcycle ZJwWllfPFjo_0 person ZJyDrvmQwY8_0 elephant ZJyDrvmQwY8_1 elephant ZJ5n1Y-yXqM_0 person ZKF4kfqyu6U_0 person ZKIuqz6GDSA_0 horse ZKJuI7-4560_0 cat ZKKalWR8MBM_0 boat ZKSF-y6kC1I_0 elephant ZKSF-y6kC1I_1 elephant ZKTseP8JqIw_0 person ZKk703iOFmY_0 horse ZKrJdHuvvR8_0 person ZKy67yESvjM_0 person ZK1zKp1iJY4_5 elephant ZK1zKp1iJY4_2 elephant ZK3-Em8w4HE_0 horse ZK6pkPtSd_4_0 cow ZK_BL_TGwo0_1 train ZLFXKnOp0LM_1 knife ZLH6HbQ5Miw_0 person ZLSqYLLWQLc_1 cow ZLSqYLLWQLc_2 cow ZLcGyr4ZfJU_1 airplane ZLdb8-YkoiY_0 person ZLm8Hen6OFM_1 bicycle ZLm8Hen6OFM_2 bicycle ZLnf4vSxfgo_1 umbrella ZLqSGXI7FdM_3 knife ZLuY9hS-wd4_0 bus ZLuY9hS-wd4_1 bus ZLuY9hS-wd4_2 bus ZLupIiWNPOY_0 person ZL18xmfIKH4_1 motorcycle ZL18xmfIKH4_3 motorcycle ZL18xmfIKH4_2 motorcycle ZL3DgidLXjw_0 person ZL5SCZpZWtA_1 horse ZL-60We4drw_0 dog ZMDe7QMaLa8_0 person ZMD2tP69gaU_1 person ZMKFhrS_QnY_0 cow ZML6VoRZ_Tk_0 person ZMMDA6nYXZs_0 bird ZMPdl-1FCMQ_0 person ZMZU_V7d3-I_1 umbrella ZMa0bYeg_NE_0 dog ZMdAlm9Zx_A_1 car ZMeQ1Vc3HZk_0 person ZMuwZKOfK1s_0 motorcycle ZMvdpTH-1Ug_9 airplane ZMxu4wRDuqU_1 person ZMyEEXdgJeA_0 person ZM1xadWQqKQ_0 bus ZM2SMTrxUr0_0 train ZM3QVkm1izg_0 person ZM5-iyB8rFk_1 dog ZM_TO-0UDp4_0 person ZNJ8aytwo1E_0 person ZNP23sy27W0_0 person ZNTqZ3wERJE_0 person ZNUBh1ppeyo_0 skateboard ZNXCWGzmxK8_0 person ZNZx7hTxCQE_0 airplane ZNaTV3nGl6M_0 person ZNcUW5m7eRw_0 giraffe ZNg9OZgsMqc_0 bear ZNoQrAOf3Ns_0 truck ZNqpyPcacjY_0 motorcycle ZNv_LrEIljc_0 umbrella ZNxw9kVCouU_0 bus ZNzeI_r7GT4_0 truck ZN2bt7wkvH0_1 bear ZN5ukEMKLY4_0 cow ZN_gFe4IzxE_0 truck ZODUj9lsCzk_0 horse ZOEa1JGwnwE_0 person ZOEa1JGwnwE_1 person ZOGP8-XsFYc_0 person ZOIuTsiGyRY_0 bird ZOJSvR5KOsE_0 dog ZOMPRnYycak_2 cow ZOMnEZ4dWMk_0 elephant ZOStUYUIEdA_0 skateboard ZOTSBcRwdRA_0 person ZOX1xH7rOus_0 train ZOthVGHUcjo_3 cow ZOwhFlp5EiA_0 person ZOxDsYnvl0M_0 person ZOymkqw58fw_0 person ZOzQfVh1LN8_1 motorcycle ZO_5hZ2ex6Y_0 person ZPKaBLqoKvQ_0 person ZPNr3zZg6jk_1 person ZPNr3zZg6jk_0 person ZPNr3zZg6jk_2 person ZPQ0lqiH9uw_0 train ZPQ0lqiH9uw_1 train ZPQ3tbJp33I_0 train ZPVOrRypdRM_0 horse ZPZjgecd6OQ_1 boat ZPaWYb_4S8Y_0 person ZPeRU9CLLew_0 person ZPgUlFmZyP4_0 person ZPjN0Rp_1ZA_0 horse ZPkO4x8HPaI_1 person ZPqs3xJ8sMY_0 person ZPqs3xJ8sMY_1 person ZPq9qgTZ4XI_0 truck ZPyxQD17Fq4_2 person ZPyxQD17Fq4_4 person ZP7SN9kW5kg_0 person ZP7sET2Y9dU_0 person ZP8YaHDM_qE_0 horse ZQCFPzE41bg_0 cow ZQDoAEWZCQk_0 person ZQG5CpZ3fLM_0 person ZQRzkpfy378_0 bus ZQZRNVrE9hk_0 person ZQarE1lLDl4_0 person ZQdhjMVGJrk_0 person ZQdhjMVGJrk_1 person ZQmTc5C-h8w_0 person ZQrMMWQidx0_0 person ZQuVUoqiT_I_0 giraffe ZQuVUoqiT_I_1 giraffe ZQ3LAYCIDf8_3 bear ZQ8X2cqYANs_0 train ZQ9G0UkTR1c_1 person ZQ_vGl5xbKY_0 cat ZRFMzM7kxuI_3 cow ZRFMzM7kxuI_0 cow ZRFMzM7kxuI_1 cow ZRFMzM7kxuI_2 cow ZRLkkoSR8o8_0 knife ZRMOgw0VYRI_0 person ZRNQrzQlVwA_0 person ZRNgdckx504_0 person ZRQug2qT1tc_0 person ZRSRBBpyBG8_0 person ZRXjiNMKvis_0 airplane ZRc8GDK_9hc_1 umbrella ZRkHgC0EAz8_0 person ZRmkeBogj-U_0 person ZRoz_bGkPaE_0 person ZRuQ3ipcK3o_0 bus ZRzOWgIAwe8_0 bird ZRzOWgIAwe8_3 bird ZR0Qj5P8snw_1 bear ZR4yO1ASDwo_2 person ZR_VWPjxLTU_0 dog ZSDCxbSs-Hs_0 person ZSFzv92w5z4_0 motorcycle ZSGJwERlcvM_0 person ZSXoUfKY7t8_0 person ZSdzUC2BB8Q_0 train ZSdzUC2BB8Q_1 train ZSkkNWgXm6E_0 skateboard ZSkkNWgXm6E_1 skateboard ZSn4gRAJToo_0 cat ZSoJT194AtI_1 skateboard ZSoJT194AtI_0 skateboard ZSruK26cGuI_0 dog ZSs6Knma-Q0_0 cow ZSs6Knma-Q0_1 cow ZSu3GocMJzI_0 car ZS29l3t9vK8_0 person ZS6NQXztroI_0 person ZS_wuZnVzbw_0 person ZTLDJDjvSuQ_0 truck ZTPTnzEs_Lc_0 person ZTcRmNM1n8M_0 person ZTjOZ-dZDEg_1 car ZTmHHCmX7aw_0 skateboard ZTnEKCqMNHs_0 person ZTo33r_63Wg_0 knife ZTw6Dkp-LPU_7 elephant ZTw6Dkp-LPU_0 elephant ZTw6Dkp-LPU_4 elephant ZTw6Dkp-LPU_5 elephant ZTw6Dkp-LPU_6 elephant ZT5iwG3vEhM_0 umbrella ZUCf2cVBY08_0 person ZUWSpLaJj4M_0 bird ZUYtIKrcaKo_0 person ZUaHjAaQqF0_0 bus ZUdCQl7WU_U_1 person ZUdCQl7WU_U_0 person ZUd0IAbilBA_0 elephant ZUoFqGf_ijs_0 elephant ZUoJFmQ6ro4_0 person ZUwniKcHERQ_0 horse ZU0WSpOWSak_1 bear ZU0_sT3EbVY_0 zebra ZU9LGiLzKJg_0 motorcycle ZU-ZhVyhBpA_1 bicycle ZVAHreexSa0_0 person ZVBjo5HM0Do_0 knife ZVD-ea5SjMg_0 person ZVJpmiue5IA_0 truck ZVKyUsgomW4_0 person ZVOMkt8TORM_0 train ZVQo_9tFZGY_0 bus ZVY_873YYQY_0 skateboard ZVZJRbJ2h1A_0 cat ZViLnbCdjZM_1 person ZVlOetMc3m4_0 person ZVl8So4V1Ss_0 cat ZVnaHf8vAhA_0 zebra ZVtPRAs8Za0_0 person ZV8NIO3XuLQ_0 person ZV9eJe2grq4_1 bear ZWIPlBvd1DI_0 person ZWIPlBvd1DI_1 person ZWJv_-wAdws_1 skateboard ZWKHlq-W7_8_9 train ZWKHlq-W7_8_14 train ZWKHlq-W7_8_0 train ZWKHlq-W7_8_1 train ZWKHlq-W7_8_4 train ZWKHlq-W7_8_7 train ZWKHlq-W7_8_10 train ZWKHlq-W7_8_11 train ZWKHlq-W7_8_12 train ZWKHlq-W7_8_13 train ZWNe-zcl-IY_0 boat ZWNjUm5Uzh0_1 bicycle ZWNjUm5Uzh0_5 bicycle ZWXE7IAaWrg_0 person ZWXSnELtawA_1 knife ZWXSnELtawA_3 knife ZWX1cGhJG98_0 bicycle ZWlTD6EbOTo_0 person ZWqzdCz6UvY_0 bird ZWr6RECjqV0_1 horse ZWr6ZU_-ir4_1 person ZWthtO1iGtQ_0 person ZWwlzozPAk8_0 person ZWxn8yT0bXo_0 cow ZW0HC4IRa64_0 person ZW3CWoXzrn4_0 bicycle ZW3CWoXzrn4_1 bicycle ZW5VkDNSfWA_0 cat ZXMqiFE6KOE_0 airplane ZXRcWIcok2I_0 person ZXgYAh2AWyk_0 horse ZXp6jOe8DUE_0 person ZXyJafbGcBM_0 horse ZXzno8CjUyM_0 elephant ZYB9yzoJ6jc_0 person ZYG83auB9Lk_0 train ZYIgTdUmOWk_0 elephant ZYKlgXftesk_0 cow ZYM0_4YzeeQ_0 person ZYRgw5rNhE4_0 person ZYS7WVlJbuU_0 person ZYX53PWsBdk_0 person ZYY8vkvB1zU_0 person ZYkIkq9kfLc_0 dog ZYlANECCXnI_0 person ZYocOIOyuqs_0 person ZYsifQxv94s_1 motorcycle ZYs7rbZt8Zw_0 airplane ZYs7rbZt8Zw_1 airplane ZYs7rbZt8Zw_2 airplane ZYtk2iVNC90_2 airplane ZYtk2iVNC90_0 airplane ZYxn9wmzRI4_0 bicycle ZYxn9wmzRI4_1 bicycle ZYzeKMdP2SE_0 person ZYz6B5dwXcE_0 person ZY_urkqeQLM_0 bicycle ZZANjG2Z5Jk_0 person ZZFzCaL48sE_0 cow ZZNRG-ux4fw_0 person ZZQDFjbEcHQ_1 bird ZZQDFjbEcHQ_2 bird ZZQSDwoLZ00_4 knife ZZSFKq4WH78_0 cat ZZVPKuh-2v8_0 person ZZVx_IT4voA_0 person ZZlf3LtDpH8_1 bear ZZpLkBcXUgs_1 person ZZpLkBcXUgs_2 person ZZxMtMlV-MM_0 cow ZZyW-2jZcIo_0 horse ZZyW-2jZcIo_1 horse ZZ20JXRExdg_0 person ZZ8OuI39UTM_1 person ZZ85EAvnAGU_0 person ZZ85EAvnAGU_1 person ZaDVUoq6h5o_1 person ZaD5V9_Vw2w_0 person ZaJb3JTan7Q_0 person ZaLqPrH_aVo_0 train ZaLqPrH_aVo_1 train ZaNZV-lM-3o_0 person ZaNZV-lM-3o_1 person ZaPC288yVBg_1 bicycle ZaPC288yVBg_5 bicycle ZaPC288yVBg_7 bicycle ZaPltFe0S_o_1 truck Zabt7ElK3jM_0 person ZacHdhX9F9M_2 dog ZadGgAG3PzE_0 person Zaew_bHz-PQ_11 umbrella Zaflj5gSZEw_0 person ZanT0hXyJhk_0 bird ZavCWamLatc_2 person ZavCWamLatc_1 person Za4BYhhaFFQ_1 zebra Za6oX4aQR34_0 airplane ZbB-tdDvITQ_0 motorcycle ZbDu8V7ppZE_0 motorcycle ZbHt1sn7oTI_0 person ZbJvtTVTTV8_0 knife ZbQXzueqj4Y_0 horse Zbgfg8usx-k_0 person Zbgfg8usx-k_1 person Zbm5_qB8fEs_0 person ZbrJHC_mHlo_1 person ZbrJHC_mHlo_2 person ZbrJHC_mHlo_0 person ZbrqZYGiMvE_1 cow Zb2Vz655gh4_2 horse Zb755JeGMpU_2 person Zb-JKfQ5emU_1 person Zb-JKfQ5emU_2 person Zb-JKfQ5emU_0 person ZcJPap_gVyo_0 person ZcXA6CyQBi8_0 cat ZchU4DxP5A8_0 person Zcw7wSfd2JM_0 person Zcw7wSfd2JM_1 person ZdElKzM-US0_0 umbrella ZdKO1sC4o60_0 person ZdMbx0IXDzs_0 person ZdMm6j__cQM_8 bicycle ZdTZrRX0dv4_0 truck ZdXrQlOU7iw_1 bicycle ZdaFXJzLLUs_0 person ZdaFXJzLLUs_1 person ZdeTj7nyN-s_0 boat Zdevf1MbY8U_0 train Zdevf1MbY8U_1 train Zdevf1MbY8U_2 train ZdirtQF_sjE_0 person ZdlnVpHrDcg_0 giraffe ZdlnVpHrDcg_2 giraffe Zdq2csZeJr8_2 person Zdrk4yHmMXA_0 person ZdtUPHscS-s_0 person ZdxD4gqVioQ_0 cat ZdxHWwaivLc_0 cow ZdyBZtlMq-M_2 bear Zd3j0bQV6NI_0 person ZeHLf0q4Z1Q_0 person ZeZAZbMg1zY_0 person ZeaoaXZDhPw_0 person ZemOY1F1bVo_0 truck ZemOY1F1bVo_3 truck ZemOY1F1bVo_1 truck ZerHfx3SLxU_0 person ZerYXYTyhoc_0 person ZetcbIDyydg_1 car ZetcbIDyydg_0 car ZeuqVhpsVu0_0 horse Ze6GIOUVxZU_0 person Ze8W47hBrrE_2 skateboard ZfAFALQjUwI_2 person ZfAFALQjUwI_1 person ZfAM39o5Cbc_0 bird ZfDkxwMowSk_4 elephant ZfF5Z0hrOQw_0 person ZfHSyDaLaw0_0 airplane ZfHSyDaLaw0_2 airplane ZfHSyDaLaw0_1 airplane ZfJvZeaN7Ro_1 person ZfTTW39iHJQ_0 person Zflcz9EKz4g_4 elephant Zflcz9EKz4g_1 elephant Zflcz9EKz4g_2 elephant Zfmwrq2aghI_0 person Zf86HoPHmBs_1 bird Zf86HoPHmBs_0 bird Zf-rSx5ZNB8_0 person ZgK0Y4PgWSM_0 person ZgOr7facaIw_0 skateboard ZgP7q-rIhs0_1 person ZgTDthFY-aI_0 bird ZgZ18HIfCGc_1 motorcycle ZggirLBvHSw_0 dog ZgjspuwgTAc_0 person ZgtG8Zy63UQ_0 person Zg18GZ5OFWw_1 person Zg2YrzGNuZs_0 person Zg4f2iY8_zo_1 cat Zg4f2iY8_zo_0 cat Zg5MdsCXRWM_1 cow Zg5MdsCXRWM_0 cow ZhLB-laOg_g_9 bicycle ZhLB-laOg_g_3 bicycle ZhLB-laOg_g_5 bicycle ZhLB-laOg_g_6 bicycle ZhLB-laOg_g_10 bicycle ZhLB-laOg_g_12 bicycle ZhPafr5WTEs_0 person ZhtgT8q5Gm4_0 person Zhtr_XhO6_4_0 train Zhtr_XhO6_4_1 train Zh6QWGGQ9dU_0 person ZiJFOBVGah4_0 horse ZiPO1UcM3IY_0 dog ZiP2ydBHuPs_2 person ZiSl_Dy1ZB4_0 person Zibk3bXvHCY_0 cat Zig1VrVbQc0_0 horse ZimvCFcji0A_0 person ZisoM7y_CS4_0 person ZitUYI22J54_1 knife ZitUYI22J54_0 knife Zi1etYbSUmQ_1 person ZjCbmE2jLo4_0 person ZjFb1VLHvyg_1 horse ZjPmZ4grIFA_0 person ZjPmZ4grIFA_1 person ZjQqfJ1Docg_0 person ZjQ9lIlCehk_0 skateboard ZjSloqSrfWU_1 airplane ZjSloqSrfWU_3 airplane ZjWBw4tZUO4_0 train ZjWBw4tZUO4_1 train ZjWBw4tZUO4_2 train ZjWBw4tZUO4_3 train ZjWBw4tZUO4_4 train ZjWBw4tZUO4_5 train ZjWBw4tZUO4_6 train ZjbhM1ZiKW8_0 person ZjbhM1ZiKW8_2 person ZjcEfOHRyLQ_0 truck ZjcevqmMJvY_0 person ZjgTSjb7Vh4_1 car ZjnaerD1MHM_0 elephant Zjn6uD43ewg_4 airplane Zjn6uD43ewg_5 airplane Zjn6uD43ewg_1 airplane Zjn6uD43ewg_2 airplane ZjpmS5k09Ug_1 person Zjpzw1n9Lvc_0 skateboard ZjsEX7nNYdQ_0 person ZjxiHzcXOAs_0 person ZjxiHzcXOAs_1 person Zj2HBun9kBY_0 person AVW26zY72Ns_0 person AVXWb0s5LZw_0 person AVqCe7X9Pp4_0 cow AVragVmWr8M_0 motorcycle AVvnZ-Ky-ew_0 person AV9y4LnUV84_0 dog AWAQTemnBJc_0 person AWCUoghX20A_0 cow AWD_KAfvb0U_0 skateboard AWOhJ9RZReg_0 person AWOhJ9RZReg_1 person AWPNd7zPJzg_0 person AWPNd7zPJzg_1 person AWZt9EdU3BU_3 zebra AWdKXFitdJI_0 boat AWh2S4rI6kc_0 person AW1SjuoheU8_0 cat AW2cvkaExG4_0 cow AW8munaOGqw_0 person AW--f4fsLFY_0 train AXB4hYQKqUw_0 person AXB4hYQKqUw_2 person AXQlwoC_K0g_1 truck AXX66Oq_RkU_0 person AXhx8hncZvA_0 boat AXm0KvcIchQ_0 train AXtXzxTXTqI_0 elephant AX2rS0bpAmM_0 horse AX4Hsfdm-Fo_0 elephant AX8WoOXfJDA_0 person AX-xVtjP42Q_0 person AYLoR7L3CMs_3 bird AYLoR7L3CMs_1 bird AYUGoWokN_0_0 person AYYdBxTI_54_1 train AYakvLR8aVM_0 person AYe6Wf0URgo_0 truck AYgbgSVClN4_0 person AYg1V2ol96s_0 dog AYj70IRvvwI_2 airplane AYj70IRvvwI_3 airplane AYn-qtOy_nc_0 person AY7foLy1uok_0 elephant AY7foLy1uok_1 elephant AY-AbrJPyY0_0 train AZHYXkv5rMk_0 bird AZJsII37MPY_0 bird AZMW1TyN6Z4_0 person AZQjsUm-CXk_1 person AZhH2ej_x_g_0 person AZjZ1ZSyCeE_0 person AZk4MAu-j90_0 person AZleWF5zAxc_1 bear AZl3Emy9K3A_0 horse AZouBTtQrtM_0 person AZpAuvQryZo_0 person AZpAuvQryZo_1 person AZ9SW8bxD3E_0 bicycle AaGwVQ6UjOE_0 person AaRVwgGBmWU_0 person AaTW4oc5bBU_0 person AaZsdPwg9qg_3 bus Aac18k-eLZI_0 person Aac18k-eLZI_1 person Aac18k-eLZI_2 person AakpjcyvFSo_0 person AalaqaXsEbs_3 umbrella AalaqaXsEbs_0 umbrella AalaqaXsEbs_1 umbrella AalaqaXsEbs_2 umbrella AaoK6DPQKII_0 bus AaotWWHg4eU_0 truck AaotWWHg4eU_1 truck AaotWWHg4eU_2 truck AasksRmCk1g_0 person AatNkWo2ryE_0 person Aa0FU2EIMZ4_0 bird Aa-wzDtjCGc_0 person Aa_biYfYp08_0 person AbEsU9EX9XQ_0 elephant AbEsU9EX9XQ_2 elephant AbO_VrlyQ8I_0 umbrella AbTxhwSueZw_0 person Abd7Vn-Nyt8_1 truck AbeOAFhMXBY_1 bird AbeOAFhMXBY_2 bird AblKd4XIjqk_0 person AbmnNkzkXFg_0 elephant AbmnNkzkXFg_1 elephant AbuMVYzS0mw_0 skateboard AbvoOuTpLtA_0 dog AbwI4m0H9Hk_2 train Abx126RTs10_1 elephant Ab9zgKJnr9Y_1 person Ab-vGS2mqFQ_0 cow Ab-vGS2mqFQ_1 cow AcCU5YAWXlw_0 dog AcReGpoHOZI_0 person AcSmnBYhEsg_0 person AcTgPRNars0_1 truck AcUEWZRPoGA_0 umbrella AcZNiBe0Fgo_0 person AcZukbBG7tI_0 boat Acc1yTFpH2c_0 dog AcpBKywfL4o_1 cat AcpOxyI_YPI_0 person AcprJcYvkbY_0 person AdDiiRHwZ2E_0 cat AdEH-oHs1Qo_3 train AdEiQT7Nm0o_1 motorcycle AdE2jnpk6AM_0 boat AdbsyVjq_Xs_0 cow AddL-M622TI_0 knife AdgTVbi_kus_0 person AdsPsjswSGQ_0 motorcycle Ad044xbRhE8_0 person Ad2TSmaLvX8_0 person AeDfdgrccVw_0 person AeHbZ3U8S8U_2 train AeWBkNuJmEA_0 truck AeWBkNuJmEA_3 truck AeWBkNuJmEA_4 truck AeWBkNuJmEA_5 truck AeakbNNwcW0_0 train Aec4uweTSes_2 skateboard AeflYi3Sxss_0 person AegDGWXkWNw_0 person AenVUPH1ils_0 bird AendE1XHSps_0 bicycle AerUXP3Mmks_0 person Ae5qWkNt6RU_2 car Ae7ucKj40mw_0 dog Ae9Zd3lP7bg_0 person AfHkdkvxhNs_0 elephant AfNCSPijpao_0 person AfNGR5iEpvU_0 cat AfNtKiB_rD8_1 motorcycle AfWHElsVCyM_0 cow AfWfexnwsHg_0 person AfWfexnwsHg_1 person AfkKO6j4jWc_0 person AfmMpft13ZU_0 person AfnQoNimSjc_0 person AfynslRqwxI_0 car Afz2VDV4UHg_1 person Afz2VDV4UHg_0 person Af2MGhdZAn8_0 person Af2VyQEZtfk_0 person Af6Ve26JUOg_0 person AgBaUhTbzxA_0 airplane AgBaUhTbzxA_4 airplane AgBaUhTbzxA_5 airplane AgBaUhTbzxA_3 airplane AgBaZRmz8IY_0 skateboard AgJCf77qxsY_0 person AgP2HoU83S4_4 knife AgYhFemsFag_0 person AgZ2iflIKWc_1 person AgaetfTOzc8_0 person AgdrEW8jmw4_0 truck AgqmhFD0R94_2 elephant AgqmhFD0R94_3 elephant AgqmhFD0R94_1 elephant AgrKeQXSU2M_0 elephant AgrKeQXSU2M_1 elephant AgrKeQXSU2M_2 elephant AgtCW50wfig_0 person AgvxdVNj5Oc_0 skateboard Agw5t7YSQbE_0 skateboard AhAW4UKPzz0_0 giraffe AhE2vDF6Gbc_0 horse AhE2vDF6Gbc_1 horse AhjsDq9fEzQ_0 person Ahv2jhPqRPg_0 person AhwGPZWtf3E_0 person Ahxq6Rtu3lc_0 person Ahx3IZujXDw_0 bus Ah0AGjta1qg_5 bird Ah04VeRs2hg_0 truck Ah4x4EfR3BY_0 motorcycle Ah4x4EfR3BY_1 motorcycle AiIc8FW3q98_0 car AiL_iCJ8HZI_1 person AiNLvzwt3_w_1 bird AiNLvzwt3_w_2 bird AiP7EOvTpK4_0 motorcycle AiP7EOvTpK4_2 motorcycle AiU_T3DZI2w_1 bus AiU_T3DZI2w_2 bus AieRY99VkmE_0 person AieVzbENJv0_3 bicycle AiieCerOKpc_0 person Aik2hirrxEo_3 airplane Aik2hirrxEo_0 airplane Aik2hirrxEo_1 airplane Aim6_lZQi4g_0 person AiqqXxqnPPM_1 cow AiqqXxqnPPM_0 cow AittR1dd2SI_0 train AittR1dd2SI_1 train Aiv3XHMuVq8_0 train Aiyfw0Zh38k_0 person Ai29fDmklxM_1 person Ai29fDmklxM_0 person Ai3S7n1Aofs_0 elephant Ai-487iZv0E_0 person AjFhyF1XZw4_0 person AjJHvamHoMU_0 horse AjPBAy1xgrY_0 person AjVe8d0vc1E_0 person AjamPk2Geuw_1 bus Ajg7q9zxJUo_0 person AjroIzI2OW8_1 truck AjroIzI2OW8_2 truck Ajsu2bGngDw_1 person Ajs4qdBK7Jk_0 elephant ZkD_WAxZB3o_0 cow ZkHPsjy-YUQ_1 knife Zkbav-Qoxds_0 horse Zkbav-Qoxds_2 horse Zkbav-Qoxds_1 horse ZkidaaVx2VU_1 bus ZknqgRL504A_4 bear ZkqA2kLudwE_4 train ZkqA2kLudwE_0 train ZkqA2kLudwE_3 train Zku9JAotBZ0_0 boat ZkzM2jvV2AY_0 person ZlBfF2yK2vg_1 person ZlBfF2yK2vg_2 person ZlBfF2yK2vg_0 person ZlDsSDEHEzY_1 cow ZlDsSDEHEzY_0 cow ZlDsSDEHEzY_2 cow ZlFElBglnHA_0 cat ZlP8tmFYeyY_5 bird ZlfyrRfHDoc_0 cow Zljx0icnRa8_0 person Zljx0icnRa8_1 person Zlmsqen0qZo_0 person Zln667JkWo8_0 person ZmHKBIsSjQA_0 horse ZmHKBIsSjQA_1 horse ZmVLw9-fLDo_0 car ZmbXlevaX2U_1 boat ZmgJjFt3JU4_0 skateboard ZmhKe4_d5Ag_0 person ZmiCqFxUJSw_1 airplane ZmkKOYN1dRw_0 person ZmrCaB8p3IM_0 bear ZmuzvhzN6EI_0 cow Zm3AU4TEpEw_0 person Zm5VvBaQUwU_0 bird ZnRgQ1VBIGE_1 person ZnWAM5ju8NM_0 person Zne4XpVG2YQ_1 person Zne4XpVG2YQ_0 person Znr-Uiobo-k_0 person ZntDSf8cCPI_0 person ZnvLWU_PCZ0_0 motorcycle Zn-r14oEJwM_0 airplane ZoC1knYO0Tg_0 cow ZoJIup20AGU_0 person ZoKfc3OL0JY_0 person ZoK4wKRoZjY_0 person ZoN4k6UNw6I_1 horse ZoOvu218D6M_0 person ZoR1yoQzsbM_0 person ZouHgocvjDI_0 bird Zo-8G7N2DXU_0 person ZpAlbL-YE0E_0 bus ZpCrRb_a9QI_0 person ZpCuVDLXQSw_1 horse ZpCuVDLXQSw_0 horse ZpSzmFLEm0c_1 car ZpURI0wRgws_0 person ZpXJ-0dv6Us_1 cat ZppFK22HdIk_0 person ZpqXtZfe-3w_0 cat Zp1nQXN7dyg_0 horse Zp2CuvTAZLw_1 person Zp740cgCPPE_0 person Zp8GHxi_5l0_0 knife Zp8GHxi_5l0_1 knife ZqM9VL5DJ28_1 person ZqOcOhiAI6k_0 cow ZqS1PqS3iT0_0 truck ZqW027iDkCI_0 person ZqXFvdeNrYI_1 person Zqa0-AUnl9s_0 person Zqm8A3wpeJQ_0 person ZqtVs5joekw_0 cow Zq018zZzx1c_0 person Zq1u84GLCHI_0 motorcycle Zq5nK49UZ_o_2 elephant Zq5nK49UZ_o_3 elephant Zq5r3BwLg_c_0 skateboard Zq-RNCVoZFs_0 person ZrA0NE09ipc_0 dog ZrDoGqu-A5A_0 train ZrI4ruv6B3o_0 bird ZrKpKmp29_o_1 bird ZrKpKmp29_o_3 bird ZrKpKmp29_o_6 bird ZrK5JKg83qU_0 person ZrUx83OGIOk_0 person ZrW7Si0hJKI_0 person ZrbVa__ne-0_0 person ZrfPtqkS_MY_0 airplane ZrfPtqkS_MY_1 airplane ZrfPtqkS_MY_5 airplane ZrfPtqkS_MY_6 airplane ZrfPtqkS_MY_7 airplane ZrgMnk8f_TA_0 person ZrgMnk8f_TA_1 person ZruJ2hhn9z0_1 person ZrvWeRZ_dyU_1 cow ZrvWeRZ_dyU_0 cow ZrwXUWAxjIM_0 giraffe ZrzdqF_ePkM_0 horse ZrzdqF_ePkM_2 horse Zr5eAtkuxQ0_0 bear Zr_AAxouNfg_0 cow ZsCaDsfPNec_0 cow ZsDDOO-bpFA_0 person ZsDDOO-bpFA_1 person ZsESx0nIYqI_0 elephant ZsESx0nIYqI_6 elephant ZsESx0nIYqI_7 elephant ZsJCwiPEvkI_0 person ZsLDBiZ0o14_0 skateboard ZsPVRik6m_c_1 bear ZsSkZhL-HOM_2 bicycle Zsb2ucv_mAg_0 person Zsdv_3EWODM_0 person ZsyMk67bjIM_0 dog Zs0j_1tuTDo_0 person Zs1ltKMvRec_0 person Zs1ltKMvRec_1 person Zs79wUXMpx8_0 bear ZtA8n6dsH-w_4 car ZtA8n6dsH-w_1 car ZtA8n6dsH-w_2 car ZtA8n6dsH-w_3 car ZtDUifuLGrM_2 bird ZtEDTuHcM9U_0 person ZtM6JRtVtpU_0 motorcycle ZtToUMIMdYE_0 person ZtlDJ70ap8Q_1 bear ZtlJcLPPjsg_0 person ZtsGzhfZg9g_0 person ZttTri7sEK4_0 train Ztyep9o6CLE_4 bus Ztyep9o6CLE_6 bus Ztyep9o6CLE_7 bus Zt9qKAA_xyA_0 person ZuC0Jr3Y3s8_0 car ZuGpcHtPLLA_0 person ZuWlzE4F84c_0 truck ZuhmoYvtP40_1 person Zuicm6_fX9I_1 bicycle Zunjyc7DIP4_2 train ZuoBIQ-Kq74_0 person ZuqXxaMAufU_1 person ZuuL_Yi4FZQ_1 dog ZuuL_Yi4FZQ_0 dog Zuy59kV2M-0_1 person Zu-vh46IwiU_0 cow Zu_dXJvDHdo_0 person Zu_f8xuOweg_3 elephant Zu_f8xuOweg_1 elephant Zu_f8xuOweg_2 elephant ZvDo2WbWL4g_1 person ZvDo2WbWL4g_0 person ZvJItzBdO04_1 person ZvJrqHsPVL0_0 bus ZvSN_Y6vK3c_0 person ZvV5mqJgbcQ_0 cow ZvfCrJvE1Tg_0 horse ZvfIYK-AWCw_0 person Zvlx8vSlAPs_0 bicycle ZvtGPgtfhE8_0 person ZvtuffxB5EY_0 person ZvyOzgxu-4Y_0 truck ZvzVi9irgvw_0 bear Zv6DWiKAux4_1 person Zv9e9Vm6Vis_0 motorcycle ZwDqCxCFpF4_0 bicycle ZwDqCxCFpF4_3 bicycle ZwH5xnh6Thw_0 person ZwW6ybIP8ys_0 bus ZwdSYMz9ioo_0 person ZwmRodW5wgg_0 horse ZwrtmR7ewc4_0 person Zw7a69yU7f0_0 motorcycle ZxAlVbDwlCc_2 bird ZxAuwcxhXxc_0 person ZxE5MjV6i4w_0 skateboard ZxOVw-Lc-NI_0 person ZxStkYy-wgo_0 motorcycle ZxUKijmOWJc_0 person ZxitXAY6Xsc_1 knife ZxqbwwO81Xc_0 train Zxv2BRQIWm0_4 airplane Zxv2BRQIWm0_5 airplane Zxv2BRQIWm0_7 airplane Zxv2BRQIWm0_8 airplane Aj7HWiU0iQg_0 skateboard Aj_E-ObfzoE_1 person AkGYKkcRyPM_0 dog AkHT5Oo22rQ_0 person AkMpnm9JrLU_0 person AkWcVIeIx34_0 boat AkaR-XgClv0_0 person AkaR-XgClv0_1 person AkeAdeJpbpg_0 train AkeAdeJpbpg_3 train AkeAdeJpbpg_1 train Akh0VNTS6G4_0 person Akh0VNTS6G4_1 person Akh0VNTS6G4_2 person Akh0VNTS6G4_3 person AkkNBGH82Ic_0 horse AknHhsIpRqc_0 airplane AkxKeaxEnvQ_0 dog Ak3XQg9z8XQ_0 person Ak8ygMb5ykk_0 person Ak8y7dALcJI_0 person AlAUJSBL-e4_0 dog AlNCPdpo1gg_2 bicycle AlNCPdpo1gg_5 bicycle AlNCPdpo1gg_6 bicycle AlNCPdpo1gg_0 bicycle AlNCPdpo1gg_3 bicycle AlNCPdpo1gg_4 bicycle AlPZeADzCKc_0 person AlPZeADzCKc_1 person AlXlVnkucyU_3 train AlXlVnkucyU_1 train AldX05MqOs0_0 person AleuxLN7VcU_1 bird AlfbdsgKBAc_1 person AlhjN5qz_WI_0 train AlikgfDMckk_0 person AlnIWAFamHE_0 bear AltA5vQ7Icw_0 bus AlzB8mXDcYc_0 horse Al2hm71ia6E_0 person Al9l6-4QDz0_0 horse Al9wCTPpSWM_0 skateboard AmPe5gTOCTo_2 person AmPe5gTOCTo_0 person AmPe5gTOCTo_1 person AmQ_UrwLf3g_0 person AmRyW4hmSjw_0 person AmcAzvpvDRg_0 bear AmeaTbvmKvo_0 car Amt8BGudD0w_0 skateboard Amt8BGudD0w_2 skateboard AmuX-Lv7OeM_2 cow AmwvLxALyCw_0 person Am2wElVETcw_0 cat AnD6ijSktyM_0 person AnEC6v3fXrE_0 cow AnOwuTW7DKk_0 cow AnOwuTW7DKk_1 cow AnQ2ZY1JxAY_2 person AnWClR8yyu8_0 person AnZKri0xn-c_1 cow AnZKri0xn-c_2 cow Anb2IyxcJbk_0 horse Anevw4PbqTo_0 person AnkgvW70F5E_0 person AnkgvW70F5E_1 person An342tYqi5g_0 person AoI1hSI0PSI_2 car AoKs5jwMuHc_0 person AoP-So0vjIc_0 cat AoSwFyY0f_A_0 person AoXHZgatpco_1 horse AoXHZgatpco_2 horse AoXHZgatpco_3 horse AoXHZgatpco_4 horse Aof87CGS8NQ_1 skateboard AoiCmKM8xz0_1 truck AojgueRMVCY_0 person AolLjcEFv5o_0 person AopGnIjKuEk_0 motorcycle Ao0EDmBMIQk_0 person Ao0EDmBMIQk_1 person Ao7Iys-_lZs_0 skateboard Ao_b43xexzA_0 person ApJMiJjCxCY_1 car ApJMiJjCxCY_4 car ApJMiJjCxCY_5 car ApP4eoyM72g_1 skateboard ApWIa9pt-vk_0 person ApilCZCROGI_0 motorcycle ApjCOCv29N8_0 person AppgdYQTII8_0 truck Ap1gZJZynL4_0 person Ap-iaHj5SLk_4 elephant Ap-iaHj5SLk_5 elephant Ap-3HonA5go_0 person AqBYSr4wmpQ_0 person AqKP0V3Xj7E_0 cow AqOxDunFl08_0 airplane AqOxDunFl08_1 airplane AqSP11-eje8_0 boat AqUxRBRS-n0_0 skateboard AqZhKjLLG70_2 boat AqdAnSsQLI8_1 person AqdAnSsQLI8_0 person AqlHHwyJypE_0 bird AqmXAZYmPJc_0 person AqmXAZYmPJc_1 person Aqo5yZkzz8I_4 truck AqpinwPH8gM_1 person AqpinwPH8gM_0 person Aqqs8XxA8gM_1 horse Aqqs8XxA8gM_0 horse Aqqs8XxA8gM_2 horse AqqvZzLy3IE_0 motorcycle AqsuBaW1L0Q_0 person AqxTv7XRAH0_0 person Aq_n86sub5o_2 bicycle Aq_n86sub5o_3 bicycle ArJNEsuLzDc_0 person ArJaHKwfOEo_0 person ArM6GXi6YnI_1 dog ArbpF1NIm-s_0 car ArbpF1NIm-s_1 car ArfeHbvYvKY_0 motorcycle AriIdq0ZPfE_1 elephant AroxRXjr3po_3 bear ArrB-hbOgf8_1 elephant ArvYqb1hJSk_0 person AryOE3od43M_0 person Ar7WaiToztg_0 person Ar8Wk3m0uZ0_1 person Ar8Wk3m0uZ0_0 person Ar-vOeN30bM_0 cat AsJt3MHLGiM_0 person AsKUm364aHg_0 person AsNy8gmdVec_0 person AsWWfQtZSHA_0 person AsY1dt4QojM_0 person AsZa3il8cZQ_0 person AsfAcK_laZA_2 horse Asix5lGmXlg_0 airplane AskNHLhn1t0_0 cow As_a3CyN-kQ_0 bicycle As_a3CyN-kQ_2 bicycle As_a3CyN-kQ_7 bicycle As_a3CyN-kQ_8 bicycle As_a3CyN-kQ_10 bicycle AtFOIFqxLKs_0 person AtG98YoPQyg_0 bird AtKUkiMSzfs_2 elephant AtKieG766oI_0 person AtawrCflbrM_0 person AtfXsIpaSgQ_0 person AtmVV-8Pjsg_0 person AtmVV-8Pjsg_1 person At0-VpJyfBY_0 skateboard At81P33v_z8_0 person AuA4_FjCMvo_0 person AuJLIGyAoj4_1 horse AuJalbdpJP8_0 train AuLw9iNhPvw_0 bird AuQYS5w13co_0 bus AucK5ZDM060_1 airplane AuchGbKLdmk_0 person Aucxkj3w3nc_0 person AugnPC3tdso_0 motorcycle AunfkfLwN1w_0 bear AunfkfLwN1w_3 bear AunfkfLwN1w_2 bear AutsbWiMLoY_0 person AuuZLhOpxcI_1 elephant AuuZLhOpxcI_6 elephant AvGLANxpJ-Y_1 person AvJexx39uCE_0 person AvOpMSLKXTM_1 person AvOpMSLKXTM_0 person AvP_DY8SuU4_0 person AvQgdEmyoFA_0 airplane AvVBLLWgeWo_0 horse AvdUsPyX5lE_0 person AvdgweWTeeg_0 cat AvgusAC7DUU_0 bird Avlg_B60Z0E_0 bear Avlg_B60Z0E_4 bear Avp80BzoG9Y_1 person Avp80BzoG9Y_0 person Avr6FKguO2o_4 skateboard Avr6FKguO2o_1 skateboard AvvWfbj5x88_0 person Av78r-lWmCs_0 horse Av8Hkyi1fdc_1 knife Av8k98IyQhs_0 person AwAX85eLJH4_0 cow AwDIxdZSWKQ_0 person AwECiro8_h4_1 elephant AwEtKHnfKJ8_1 cow AwEtKHnfKJ8_2 cow AwFA2LuUWN8_0 person AwM3QWX5Jsc_0 person AwOJkAFe8Xs_0 bicycle AwZ6nHwMMuA_0 dog AwqZ_9G0pWg_0 person AwsAA0Xk1J8_0 person Aw-D6USSthk_0 bear AxAIZDsViZw_0 person AxAIZDsViZw_1 person AxAkf4tRXbI_0 person AxLiwCy5umU_0 person AxUFYNgnIq4_0 person Axg0nab1SDc_0 person AxvrCidcYqM_1 person Ax2iIXU4Gyc_0 person Ax5dd2_2sFA_1 car Ax5dd2_2sFA_0 car AyAAL3Rd_Rg_3 bicycle AyAAL3Rd_Rg_5 bicycle AyAA5q5B-84_0 person AyAA5q5B-84_1 person AyH0zvW0ndQ_1 bird AyKf0Ufaa_o_0 person Ayfmwf4oW_k_0 person AyhXfIgl4Kk_0 knife Ayo9w6aKSY0_0 person AyqiYJuONPs_0 airplane AyqvDNKC1CQ_0 person Ay2VXLYZW50_1 person AzFaa7gRy0k_0 person AzMHek-Oow0_0 cat AzNf4dneWFU_1 person AzVMbaXM_QM_1 boat AzVoOWc-ueY_0 person AzaUz9OpHMI_0 truck AzeA4K-S0CI_1 person Azew3w3WZfI_5 skateboard Azew3w3WZfI_1 skateboard Azew3w3WZfI_3 skateboard Aze0ijK2t2M_0 person Aze_lfqL6mw_0 cow AzhTPVtwJVk_0 person Azh82KkzMVs_0 bird Azh82KkzMVs_1 bird Az0Hr5pa_Pw_0 person Az5vE5ssYxk_0 person Az5vE5ssYxk_1 person Az7glF28oOw_0 person Az_5XR0RSv0_1 person Az_5XR0RSv0_2 person A0JB0OdZ2NE_1 knife A0L6M_8fDyM_0 person A0Nx4JbdXO0_0 person A0PQ6Si3nOU_0 airplane A0XGvY-NO00_5 airplane A0jhzA4HvrY_0 umbrella A0n7dLEgCjo_0 cow A02wb1V5W0A_1 person A02wb1V5W0A_0 person A08TTc4NLik_0 person A1Hvxm2NCpk_1 airplane A1H8wrYSPlQ_0 bicycle A1NBheOGWNE_0 bird A1fdw6WBO_w_0 cat A1oQZf9EXPg_0 person A1oQZf9EXPg_1 person A1oQZf9EXPg_2 person A1r3FpgoeP0_0 elephant A1unjHSiYuk_0 skateboard A1w5Z9ryeJI_0 elephant A1w5Z9ryeJI_2 elephant A1w5Z9ryeJI_1 elephant A11L_7hymDI_0 train A2ODL8T477o_0 umbrella A2UiM17u3Ao_0 bear A2Vhzr_2AAY_0 person A2WfZtUfAy4_1 person A2gisYdnTi0_0 bird A2iD7VC-A9g_1 cow A2p7Z_Ia9Ak_0 person A2p7Z_Ia9Ak_1 person A2rOJWkWoRo_0 person A23nZy9maYk_1 person A23nZy9maYk_0 person A29DgqMHeEQ_0 person A3EcM1p8r14_0 person A3FTEFw2Bo0_3 horse A3JmvJSIxeU_0 person A3Lmb8E3Ovw_0 person A3L2pdrSYdE_0 person A3MpR785VH8_0 person A3MpR785VH8_1 person A3UoQh4P1_o_0 person A3ZIKfh-QPo_0 person A3b1bCXjWWE_1 knife A3eocVVFaX8_0 person A3vXSLx3blY_0 person A4BVLpu2EQI_1 cow A4CYcvyDGec_0 person A4P_7hjid7Q_0 person A4gw9TbmL54_0 train A4ijVvmthCQ_0 person A4oNmb9PiYQ_0 person A4t4imYj0tA_1 dog A4u61iOuzr0_0 person A4u61iOuzr0_1 person A4u61iOuzr0_2 person A4wLmZZODQU_1 person A4zzoIg6-W4_0 skateboard A42uEePHr8c_0 person A438LRj4MN0_0 horse A5Ho_qla_bQ_0 skateboard A5Kii0lU4h4_0 person A5ZAKa7xw_I_0 person A5ciZloGW2o_2 horse A5nuZ-mKcBE_4 airplane A5nuZ-mKcBE_7 airplane A5-RNkQ5yzU_0 person A5-yfb7-1NM_1 person A6DfgaqbLDM_0 person A6GND629_dg_0 person A6IIHamstQo_0 person A6KXKalaC7M_0 train A6KXKalaC7M_1 train A6LmIR6_mtk_1 truck A6L7XcS8oF4_0 person A6MkQdxLBSI_1 bicycle A6MkQdxLBSI_6 bicycle A6SipDli3dE_0 person A6Tx9smTdyo_0 boat A6Zbpn5hd6Q_0 person A6jEv9bIawA_1 bus A6rxrML8vyk_0 horse A66pUkVBt_M_0 person A7GxuMCyr50_0 cat A7KLi_xOQFc_0 person A7SDQoaalEY_0 person A7SIvy9srFU_0 person A7Zz2ESO-PM_2 bear A7aEqy5QRJ4_0 cat A7cjjAkLjfQ_1 person A7cjjAkLjfQ_0 person A7coVhNQrSs_0 cow A7c_1Wcr5hM_0 cow A7ltojA7WTk_0 person A729VkZvy_s_0 person A7_WDIFj23s_0 cow A7_hPlvWyGc_0 cow A8F5UnJOU5A_0 boat A8MGPGEOAWk_0 train A8PGaHrBO-g_0 bus A8PlfHNTHVQ_0 person A8RztgyPvCE_1 horse A8U5HWirVCk_0 person A8gL-e9dRa8_2 bear A8oMFSrcteU_0 bicycle A80V1BVUvf4_0 airplane A89eQvkZ4go_1 car A89eQvkZ4go_0 car A89tFE_-szI_0 person A9ACfqLHRIM_0 person A9ACfqLHRIM_1 person A9LEZHrMOh8_0 person A9Mw5uHZ7WM_0 dog A9UlOqoTO3A_0 car A9WAS-oLC8Q_1 train A9WAS-oLC8Q_2 train A9etwHCHkQM_0 person A9fblLjEn7E_1 person A9fblLjEn7E_0 person A9f0bktW-uM_0 train A9sznaQipiM_1 person A9sznaQipiM_3 person A9tOXINxUeA_2 person A-BcgCHWiLE_1 knife A-JRl34Jmok_0 elephant A-JRl34Jmok_1 elephant A-JRl34Jmok_2 elephant A-JRl34Jmok_3 elephant A-MMqq_FLXo_0 person A-R5A0HMT3w_0 boat A-SdlQGGdZg_1 person A-Vo3GQZrd8_0 skateboard A-gQnulNzVo_0 person A-gZpG3OWNM_0 person A-jGPkEGCdo_0 person A-qT3DcitzM_0 skateboard A-0o6fFroLk_3 bird A-1_sR8c39g_0 skateboard A-1_sR8c39g_3 skateboard A-37XpNHfQw_0 cow A_AbA6K8Ouc_0 person A_AbA6K8Ouc_1 person A_B83i3dvWQ_0 person A_CDsn7za4c_1 person A_CDsn7za4c_0 person A_DqzmxTyPQ_0 dog A_Eaoo5O71M_0 skateboard A_Eaoo5O71M_3 skateboard A_Nb1jSK7vY_0 person A_RHSgWC24U_0 elephant A_R7iK_MLgM_0 elephant A_Z7Cj10nKA_0 truck A_aN9LUuMY8_0 person A_g6G7vBr8I_1 person A_qnLTG_VBg_0 person A_uC3UuAVQE_0 cow A_uxGLJDf9I_0 person A_xtvYH_7vg_0 person A__fHCZfwtM_0 person BACWpC6GdxY_5 airplane BACWpC6GdxY_3 airplane BANdhsMHpw0_0 person BANdhsMHpw0_1 person BANdhsMHpw0_2 person BAOR6YBIb8U_1 skateboard BAO0Uce3vXA_0 cat BARELTt_9Ko_0 elephant BAWN6Xpw7sg_0 person Zx3x1-cBu7I_0 person Zx3x1-cBu7I_1 person Zx8LkdyJzG8_0 person ZyDqefuyQfU_1 cat ZyDqefuyQfU_2 cat ZyNwfXl7s2w_0 motorcycle ZyQL8Ugiq4Y_0 person ZyQxolWsw2o_0 cat ZyQ_gFztNXU_0 train ZyQ_gFztNXU_2 train ZyqvHk5Ugjk_0 bird ZyrTKvb3Uq4_0 person ZyuoNtTPexE_0 person ZywGdneFaWs_0 dog Zyw6pIArS1g_0 train Zy04v73t_oU_0 person Zy4s6kQgRAs_0 person Zy7a1FYT_2I_0 person Zy9BXzUqORk_0 horse ZzAgbPU4qoA_0 person ZzBP5IPOX7Q_0 person ZzBP5IPOX7Q_1 person ZzFvfG2mfRU_0 cow ZzIeftZXBMw_0 person ZzPUlKXnUgE_0 person ZzRMRSyCzzU_0 person ZzS_a0D4AhE_1 skateboard ZzWMnTc1LBY_0 person Zzdl60FMu48_0 person ZzeCPtqruzg_0 person ZzgU7APbNfs_0 person Zzgoobk2eIA_0 person Zzgoobk2eIA_1 motorcycle ZzhCWdZJAQY_0 person Zzic21J3Ea8_0 person ZznEoJsdkVI_0 person ZzpccfyFyL0_0 person ZzpccfyFyL0_1 person Zzq_S3HujTo_0 person ZztD-tmxwyc_0 person ZzwlUbCfscM_1 dog ZzxRC2pLBVA_0 person Zz2oIdSVB6Q_0 person Zz5GwCMuMj0_0 person Z0D6uKz7v5Q_0 person Z0m37r4St5Q_3 truck Z0pLWU6Wg-o_0 dog Z0stjlmfTpU_0 cat Z0xYA5PwrjI_0 person Z02r-T2hINk_0 elephant Z04k6LBSuRk_1 person Z1G9pYdQwCY_0 person Z1HK6zDIJhg_0 person Z1MvNM4bmxs_0 person Z1SML4zVPik_0 person Z1U7Wnf_WiA_0 cat Z1XafO8l8gs_0 person Z1aU1CigISE_0 person Z1a8Tqg-yjE_0 person Z1e-5FLWf6I_0 cat Z1gxFkBk4EY_0 horse Z1j81keSb9Q_0 motorcycle Z1j81keSb9Q_1 motorcycle Z1nr46t7EVk_0 airplane Z1pv5a0as9c_0 train Z1rB_fu2lKY_0 dog Z1x8sEeQIuI_1 motorcycle Z13O2uGP1nE_0 car Z14p6heAJRc_2 person Z14p6heAJRc_0 person Z14p6heAJRc_1 person Z15QqHX1Z6M_1 train Z2HF5_tyxR4_0 bus Z2K03YbfcGg_0 elephant Z2QWOKCHkM8_0 cow Z2QWOKCHkM8_2 cow Z2QWOKCHkM8_1 cow Z2SljfwK58g_0 skateboard Z2SljfwK58g_1 skateboard Z2VI7eM7BB0_0 bear Z2acpS-e_cg_0 person Z2cvYI55Dps_0 skateboard Z2dab1zmqv8_0 horse Z2gvlPrX5HA_5 elephant Z2gvlPrX5HA_6 elephant Z2kcVxTMZtM_0 person Z2n2a39MxJQ_7 bicycle Z2n2a39MxJQ_1 bicycle Z2n2a39MxJQ_2 bicycle Z2n2a39MxJQ_3 bicycle Z2n2a39MxJQ_4 bicycle Z2n2a39MxJQ_6 bicycle Z21DONVXY1Q_2 zebra Z23Gg06mNj8_0 person Z236ql8Tpvg_0 person Z23_3K28VSI_1 giraffe Z3AHrAB9qhw_0 cat Z3AplkSO6kA_1 car Z3KMX_N6WSg_0 person Z3KMX_N6WSg_1 person Z3KMX_N6WSg_2 person Z3PzgfwbjLk_0 truck Z3i5sys0boU_0 person Z3i5sys0boU_1 person Z3sRLCOCxMY_0 cat Z37dIpwPIqI_3 bicycle Z4DQoYcs5mM_2 person Z4DQoYcs5mM_0 person Z4DQoYcs5mM_1 person Z4XLmQjbg7Y_0 person Z4XLmQjbg7Y_1 person Z4ZKg0KbSm4_0 bicycle Z4ZPyzSGdRU_0 dog Z4bO8cpjQZI_0 person Z4bO8cpjQZI_1 person Z4bW8HHeYP8_0 car Z4mYWGPFVkw_0 person Z4n5ieSA6cM_0 cow Z4tOSluXWnE_1 umbrella Z4u3PPkCYOs_0 person Z4u4zasFeAw_1 bird Z4u4zasFeAw_0 bird Z4vRtZE1WjQ_0 dog Z4voZ3h_Dyk_1 person Z4xVMaYAqJ4_1 bicycle Z446P08C8vE_0 person Z5KGx49qaAE_3 bird Z5KGx49qaAE_5 bird Z5KGx49qaAE_6 bird Z5Qo8xdb8os_0 elephant Z5RKMhlNHEE_0 person Z5ZBRI0sc4Q_0 bicycle Z5iJRTvm-Kw_1 person Z5iV683VDk0_0 person Z5ls93B1bBk_0 person Z5mQ_0ttu74_1 elephant Z5mQ_0ttu74_2 elephant Z5yNMm-TIjI_0 bus Z5zGHZ82r9A_0 person Z53B8-gR640_0 person Z6BVtmEMfkI_0 person Z6FikDWrKkA_0 person Z6MfvYa9hCs_2 car Z6MfvYa9hCs_3 car Z6PyYboRq5c_0 dog Z6Q3LdMwgi4_0 cat Z6WrlM4ZZKA_0 person Z6j-7La25S4_0 person Z6j-7La25S4_1 person Z6j-7La25S4_2 person Z6k1unwmsfA_1 person Z6sd800eFC4_0 person Z6tGpP8q53A_9 elephant Z6tGpP8q53A_2 elephant Z6tGpP8q53A_4 elephant Z6vCDHs6NrM_0 person Z6yNyxXPPOw_0 elephant Z60iXtKpGMQ_0 bus Z61B0fShfbs_1 cow Z7AqkWEBwV8_0 person Z7DGMMQP79U_0 cat Z7I8r1AqMhU_0 person Z7JHCdt48hA_0 airplane Z7KEzuE_7hQ_0 person Z7LfnFm4OHs_0 person Z7WaJYiX_1o_0 person Z7WaJYiX_1o_1 person Z7bMdjLGiAo_0 person Z7eGCBjkKrU_0 dog Z7gxE6ZSQXI_0 airplane Z7iq45DtCTM_4 horse Z7iq45DtCTM_5 horse Z7zeXJ5lJRY_1 person Z7zeXJ5lJRY_0 person Z72sIqrQAF4_0 skateboard Z74EGXvFjFM_0 person Z76Y_PNOgK4_1 person Z76Y_PNOgK4_0 person Z78P87kjtu4_0 person Z8CXvEObu4c_0 dog Z8NfZN7WDKw_0 person Z8Oi5HJEyS4_0 skateboard Z8k0TTq5BC8_0 horse Z8s-Kg1PuSg_0 horse Z86E7eIS9t8_1 airplane Z89mG68LE2k_0 person Z8942_IPiTo_0 bicycle Z8942_IPiTo_2 bicycle Z9SwanypLJM_0 bear Z9SwanypLJM_1 bear Z9XS4cvVVy4_2 person Z9awHnw5J4o_0 truck Z9bt3xT5dCc_0 cat Z9f--QLEQqI_1 motorcycle Z9jDpr533Cg_0 cat Z9o5BEm1UeI_0 person Z9pHCguAO5c_0 person Z9wO9tftNG0_0 bus Z9x_cPvKErA_0 person Z98EscJ1IG8_0 person Z98GFnZo-LA_0 person Z-I0S45eRT0_0 person Z-J0UQfvb5M_0 person Z-MvTXpMdm4_0 truck Z-PMnTjqAS8_0 person Z-QO3lrbh7c_1 skateboard Z-VVWO3Ovgs_0 person Z-djkrj-5Cs_0 horse Z-glDeBd2xA_0 boat Z-lrIzXr9ck_0 train Z-mTl_ipVa4_0 umbrella Z-mXYrvubn8_0 dog Z-zy-BzjLT0_0 motorcycle Z-zy-BzjLT0_1 motorcycle Z-7W_lh96xg_1 airplane Z_JXyC6v_-s_0 person Z_KItWz0mTI_0 elephant Z_PViIzihe8_0 person Z_QVuM8wEmQ_0 person Z_QVuM8wEmQ_1 person Z_kPrUEqYXE_0 bird Z_p4gYNjwG0_0 person Z_85vV3FHUg_0 person Z_85vV3FHUg_1 person aACqXYewohQ_0 person aAI7SN5_3CY_4 bus BAhHrnCKvcM_2 boat BAhHrnCKvcM_3 boat BAhHrnCKvcM_5 boat BAmy5TQke7w_0 person BAnfbsB8rIY_0 bear BAnn4L-iNLE_0 person BAq_fnyQ6z4_0 person BA4ZGv8flRA_0 person BBCBbdz3Qvs_0 dog BBCBbdz3Qvs_1 dog BBLAyHVLHh8_0 person BBOd-YBAUgw_0 bicycle BBPlqTbAphY_1 person BBQ2xu9OehQ_1 dog BBS5owVJaTU_1 skateboard BBS5owVJaTU_0 person BBVPb5z0x7k_0 cat BBXs1J4j2mA_0 skateboard BBdA1qc9H-g_0 skateboard BBk7ZnOEjMA_0 person BBopEl_n3Fc_0 person BBpFu8j2fBc_0 bus BBpFu8j2fBc_1 bus BBqTHwpYeEc_0 train BBrfgTTduuI_0 person BB9l_znmPls_0 umbrella BCBCK2k2Bdw_0 person BCBgjRWuOcA_0 person BCGB6zaBDpg_1 person BCGB6zaBDpg_0 person BCI91i3aEek_0 motorcycle BCJbf6um28s_1 airplane BCKVauIBDFM_2 bear BCin0MjzM8Y_0 cow BCoTKGNhMVw_0 dog BCoTKGNhMVw_1 dog BCo8e6n2dYQ_1 dog BCqYnyGIols_1 bicycle BCsmPvRqaNk_0 person BCuzA73UTl4_0 person BCwAdqAouFU_0 boat BCwyoTwckSE_0 truck BDFBV8JbIF8_0 person BDFVkc87amI_0 person BDHUAJn9nnc_0 person BDHsXkbkS-w_0 skateboard BDOemJGz04I_1 person BDcTOMebCHs_0 person BDcTOMebCHs_2 person BDcTOMebCHs_1 person BDdIKtFwnjA_1 train BDdbk3ZQrP0_0 cat BDdhenNSY9o_0 person BDk-BklqSdI_0 person BDroGke9Ogg_0 horse BDroGke9Ogg_2 horse BDtGFVFexaU_0 person BDzXi4ukhN0_1 person BDzXi4ukhN0_0 elephant BD30MTvTuYU_0 person BD7TQWBytfQ_0 knife BEArUGKSB-Y_0 train BEArUGKSB-Y_1 train BEKMcritl6M_1 person BEMcwkY2beQ_0 person BERvmKL4Glc_0 person BESdHwoIDsA_0 dog BEUB64a3AIY_0 elephant BEUB64a3AIY_1 elephant BEYy-ZRSWSk_0 skateboard BEa_8wp0528_0 cow BEqG56tHTEI_2 bus BEqPniAgjaY_0 cat BErty5GnulU_0 person BEuXjB1zLeE_1 car BExSp8l17GY_0 person BExlFv0scM0_0 person BE10HJUHUHw_1 person BE8KS4PZH54_0 elephant BE-crlUXSSE_0 dog BFC3DWxOces_2 airplane BFC3DWxOces_1 airplane BFC3DWxOces_3 airplane BFC3DWxOces_4 airplane BFC3DWxOces_5 airplane BFJ4v-XlKAg_0 skateboard BFPQCoJqTRk_0 person BFeIwErwdS8_0 person BFeIwErwdS8_1 person BFggPKKt6wk_0 person BFggPKKt6wk_1 person BFhh8z0Fmk0_0 person BFponHgVsdA_0 person BFs239KuGa8_1 person BFxUyTrqZhU_2 horse BFxUyTrqZhU_4 horse BF4YTMGtDs8_1 skateboard BGAQlsAiJ_0_0 airplane BGAQlsAiJ_0_1 airplane BGAQlsAiJ_0_2 airplane BGAQlsAiJ_0_3 airplane BGAQlsAiJ_0_4 airplane BGAQlsAiJ_0_5 airplane BGAQlsAiJ_0_6 airplane BGLM4yl_Ka4_2 horse BGO3DBbNozc_0 skateboard BGR1gMrCTpA_0 person BGT-p0CgoFg_1 person BGW9SDHTWKY_1 person BGW9SDHTWKY_0 person BGee3Ar-Fbg_0 airplane BGpx9Xow9Ew_0 cat BGqNnzNtWkc_0 person BGq6TeZHkLU_0 elephant BGshZfVDb5w_0 person BG4QyYPKYvg_0 person BG4QyYPKYvg_1 person BG_x-4YUtFE_0 dog BHA5UUg4lCw_2 train BHH2sTfHwks_0 person BHH2sTfHwks_1 person BHPSyq8L5S8_1 person BHQkdwmXrtI_1 skateboard BHQkdwmXrtI_2 skateboard BHYrJ1yaM-w_0 car BHdbqcxv3Vw_0 truck BHfXgxJCcrw_0 boat BH5fxWFpHvE_0 airplane BH5npOcPlY0_0 car BH6nqU68dWo_0 person BH74QV_0vtc_0 bird BH9Ob6Uiw1w_1 person BH_SlBCiQ_8_0 person BIETPRRGGgY_4 elephant BIETPRRGGgY_5 elephant BIIU36E15Vo_0 person BIMggdk7AHQ_0 cat BIQeL2o_Ogg_0 person BIUQ935UkDo_0 cow BIVLmUTNYbk_0 person BIV-1bNQ7pI_0 skateboard BIfqcruNiic_0 person BIkDAHYmcFw_0 person BIkDAHYmcFw_1 person BInC--gFqHM_0 person BIvTK9qvP1w_0 skateboard BIxCP9ck4-8_0 cat BI5i3aDb_FQ_1 person BI-kr0tFSDg_0 person BJIZYdOZHzg_0 umbrella BJK_SXpLtnI_0 bird BJMP05du3Eg_0 person BJQstPOa8Wk_0 person BJS2YLbErJg_1 person BJfRrRcfmF4_0 skateboard BJf9nFjqLvg_1 bird BJlcWhfsg_g_0 person BJriJT6zJl8_1 skateboard BJwoZcHbBK0_0 umbrella BJ05o1_UKzw_0 dog BJ44CIPaDf8_0 person BKAo6GZ_kNs_0 train BKTCaKgjiag_2 person BKUKi0vTt0A_0 person BKdSO_PNJ4U_1 person BKdSO_PNJ4U_2 person BKdSO_PNJ4U_0 person BKl0wLRzoD8_0 person BKw9UQxZ3a8_1 horse BK-rIrwen6U_1 motorcycle BLB0F-XD8IA_1 person BLB0F-XD8IA_0 person BLEdcnrUmEo_0 cat BLE9cZ8L3a0_1 skateboard BLFYe-dU9ZU_0 airplane BLO7KJUu8t4_0 elephant BLSwwE9mtTQ_1 knife BLcOGv-0-dc_1 dog BLfmgLou27o_0 cat BLvowRU6z7s_0 bird BLxsg2_sjDM_1 person BLy6RcifNl0_0 bus BLy6RcifNl0_1 bus BLy6RcifNl0_3 bus BL6tcorHrT4_0 bicycle BMH2ReDeKuc_0 person BMUnKa8FUGQ_0 person BMUnKa8FUGQ_1 person BMavrQABR1Y_0 person BMa4xJ1U3Zk_0 person BMbZc-jxEfo_0 person BMbZc-jxEfo_1 person BMfsf9tDz8o_0 cow BMfsf9tDz8o_1 cow BMhy1f7EuXM_0 elephant BMptIGI1Il8_0 car BMuO2fjJoOw_0 car BMweJTmvCBg_0 person BMweJTmvCBg_1 person BMypDovEOEE_0 person BMypDovEOEE_1 person BM0QiiStqd8_1 skateboard BM6XrBQQ7NE_0 person BM6609PpfO0_1 person BM6609PpfO0_0 person BNGDM8sFM8Y_0 person BNIVhG5pZh8_1 dog BNJwAx3eUKc_0 person BNK68rC7RdI_0 umbrella BNTS3OPHAP4_0 horse BNXKRPSr66c_0 person BNXKRPSr66c_3 person BNXKRPSr66c_1 person BNXKRPSr66c_2 person BNbPQGMLs2w_0 person BNbPQGMLs2w_1 person BNbSUPI8feg_0 person BNcj3161E9o_0 person BNeWUyqXAC0_1 airplane BNmMB68b1PA_0 person BNnVfaIfBx0_0 airplane BNnVfaIfBx0_1 airplane BNyK_4tt2fg_0 car BNybc47kPjg_0 person BN1HT0FOOhI_0 dog BN7YfmbYuVs_0 elephant BOE82LEqzWw_0 cow BOF3tFvEu0o_0 person BOHE8JNUcQc_0 boat BOMeyjZNH5k_0 bicycle BOQiuL9QlIo_1 person BOUcPea33eY_2 skateboard BOfgzvAgVQw_0 bus aAMhdGuR5DE_0 cat aARa5-CLhG8_0 person aAVaqjgY1m8_1 person aAZ2fVjhcIE_0 person aAj0EN1Rnc0_0 bird aAj0EN1Rnc0_1 bird aAlTiBaLr8M_0 person aAmVIu8X7p4_1 person aAma36YlaAo_0 zebra aAsr-Rf6rEE_0 person aAsr-Rf6rEE_1 person aAuz7EfR_fU_0 cow aAyTLM_PmzA_0 skateboard aAzpA1iK_bE_0 person aA0FrWtkjXk_0 person aA3okCsYx6Y_0 bird aA5DYzky6o4_0 cow aA8Tz4nZ99g_0 person aBBtHXQoEtM_2 person aBBtHXQoEtM_1 person aBQm5kN1TfY_0 cat aBexNnNkORk_0 airplane aBq4NF1upak_0 person aBvvXrP1BJs_0 person aB-tGXFmyFU_0 person aCQAel27T4o_2 person aCSzhpU1heQ_0 cow aCXfvvg8CF8_0 airplane aCiDDC9KFS8_0 motorcycle aClye1Ctc9E_3 truck aCl98J6O9Hk_1 person aCuXZ3LmfSo_0 person aDGpg2xtDk8_1 person aDRE08tF2Wc_1 bus aDTQRnSeu_E_0 skateboard aDTTYd0Z5Vk_1 person aDjhOS5Xa9Q_0 boat aDmLwCb_o30_0 dog aDtJSv7XR90_0 car aDte-e70l7U_0 cow aDte-e70l7U_2 cow aDte-e70l7U_3 cow aDt4Puik-kU_0 horse aDwTy9yiOms_0 umbrella aDxRlCI40wo_0 person aD2q00X0-eg_0 person aD2q00X0-eg_1 person aEJy28mvKPk_0 person aEJy28mvKPk_1 person aEMPa2NvIl4_0 horse aERed6pg_h8_0 person aER-VrHLWwY_0 person aER-VrHLWwY_1 person aEZ9vBpXNKU_0 person aEw_vtKlegE_0 elephant aExRtJpfZEs_0 knife aE1veVneq04_0 person aFC2Zy2-0dY_0 person aFFKeUdtPcQ_4 knife aFL2V522q9A_0 person aFZ03eEOZFE_0 bird aFbVlCimys8_0 bird aFdPuo5xB-c_0 person aFhKp8gVZSE_0 person aF86vrld8V4_0 person aF-CmWo8ooM_0 person aF-CmWo8ooM_1 person aGAB6WQFklc_0 person aGE8AphnkNU_0 knife aGGiVuwB1p8_0 bear aGY3LCiYRnQ_0 motorcycle aGgnovv6T3U_0 dog aGgxdwCpAN0_1 horse aGhNzJSHCOU_1 knife aGmxZatPe60_0 person aGmxZatPe60_1 person aGuWVv6XS8Q_0 person aGuWVv6XS8Q_1 person aGwPRbsru-4_0 cat aGxOl5SXjtM_0 person aG1c8x5Dl-w_3 bicycle aG1c8x5Dl-w_2 bicycle aG1c8x5Dl-w_4 bicycle aG20iwkTd_o_0 person aG6D_te6V3s_0 person aHEFx7Zz6E4_0 person aHb4yEpCinw_0 truck aHiGSUMMfBQ_0 person aHnMWEvjLzI_0 car aHrTcxckS-A_0 person aHrTcxckS-A_1 person aHsgQAyd8ss_0 person aH2ZxImdwaU_1 motorcycle aH2ZxImdwaU_2 motorcycle aH5Cd20kdJw_0 elephant aILjXrLJpHw_0 umbrella aIQf8LQ5QPU_0 person aISEbZGZH68_1 car aITryMUZ2b8_0 person aIUYT8pblHs_0 truck aIU5E5tHvdc_1 person aIVWVNBI-n0_0 elephant aIcFi8LMv0w_0 airplane aIjLf6T_K3o_1 bear aIoZO3mu_tQ_0 person aI311E3BWwI_0 elephant aI7axTZFW4A_0 truck aI80ysvYFG4_0 person aJChqX9Ki8A_6 airplane aJChqX9Ki8A_1 airplane aJChqX9Ki8A_2 airplane aJChqX9Ki8A_5 airplane aJN9lRsvUv8_0 person aJQ9scZQmz8_0 person aJTABCCQtK4_0 horse aJYmkpuijrk_0 motorcycle aJYurtxV0Og_0 train aJYurtxV0Og_1 train aJcPyWppCcI_0 motorcycle aJgpAyFnpeI_0 cat aJ0dUcEIE_U_0 person aJ1SzcgNcxI_0 cat aJ8w4L7E368_0 person aKLf2yC2diM_0 car aKMqeCkIJSg_0 person aKOMIxz2RsM_0 person aKOMIxz2RsM_1 person aKiwOUy71Lo_1 person aKiwOUy71Lo_0 person aKqrwq-Sigg_0 skateboard aKtBD-3wFMA_2 bear aKtBD-3wFMA_1 bear aKu-1-TFl1g_0 knife aK-rgio7orw_2 bus aLDq7roX-SU_0 cat aLFDqtBMblI_0 cat aLFxGnCM1zs_0 person aLIa7x90hQc_0 person aLUSnANtUlE_0 airplane aLX9cIe12C8_0 skateboard aLZAMgiWcXk_0 bird aLZ0lbLzg8Y_0 person aLZ0wCY2j2s_1 person aLeeoZ1uVcc_0 boat aLjomcNk9fc_0 person aLj4N9Tp6C0_0 skateboard aLj4N9Tp6C0_1 skateboard aLo-gekX9j0_0 person aLo-gekX9j0_1 person aLuNNRUC09A_1 bus aLuNNRUC09A_6 bus aLvCIWJQJbY_0 car aLvg1CWrY0Q_0 truck aLxJ8T4CFuM_0 person aLzL_Gldhzk_1 person aLzhO0EqNcc_3 horse aL6H2Jatw0k_0 cat aL70_drPJtA_0 train aL8hELYDnTc_0 person aMAKznXul5M_2 knife aMAYLrcEnZY_0 bus aMAeSegIdJg_0 person aMAeSegIdJg_1 person aMHtvIvWTBU_0 bear aMNbQ1Cl5GY_0 motorcycle aMRtQFBcLNM_0 person aMX0jhSq6UY_0 person aMb78Ixlbfw_0 skateboard aMqHsdXJ7UU_0 person aMzZxN9uvMc_2 horse aNB5rIhRL7g_0 airplane aNEpBEnAUhw_0 motorcycle aNF18KgxGHA_0 skateboard aNJuTWrnIfo_0 person aNJuTWrnIfo_1 person aNKleFpxS4M_0 person aNKleFpxS4M_1 person aNNWNDoOM_4_0 person aNNWNDoOM_4_1 person aNOXvvKZ3qU_0 person aNZMe4tov6w_0 cow aNdJrRu4imo_0 person aNjs-khPjiU_0 person aNj1xwowXYU_0 person aNqkQnGfWEc_2 skateboard aNqkQnGfWEc_0 skateboard aNwIHwPqFPc_0 car aN4Na3OaY4I_0 bicycle aN4NmH-GafU_0 person aN770kOQCD8_0 person aN82X1hXgEE_0 person aN82X1hXgEE_1 person aN9XAd7-rzE_0 person aN9XAd7-rzE_1 person aN_3Pwk-7oY_0 person aOHPVt_93RE_0 bicycle aON6RKmi-YQ_2 train aOPbvY62dMQ_0 airplane aOQ-8RoQYEU_0 person aOQ-8RoQYEU_1 person aOQ-8RoQYEU_2 person aOW81s5KlyA_0 person aOcGv3kcyhg_0 bear aOcGv3kcyhg_3 bear aOjjUIWuG6Q_1 elephant aOp2NlwNeoY_0 cat aOz0l6mLHmA_1 dog BOlBcGufEU8_0 person BOlBcGufEU8_1 person BOmgqlRxGlM_1 person BOmgqlRxGlM_0 person BOnvGIZd58M_0 person BOowRuwiNhU_0 person BOowRuwiNhU_1 person BOr7CffDWEU_0 person BOsNz8L3PXI_0 person BOtfIOm5kag_0 dog BO1T_-iFGdM_5 bird BO1T_-iFGdM_2 bird BO1T_-iFGdM_3 bird BO3UKxe7nyo_0 person BO5EdP_PO9M_0 person BO7sWBaaL7g_0 person BO7sWBaaL7g_1 person BO-3uvHhUdI_0 person BO-3uvHhUdI_1 person BPBBMIdFoiE_0 person BPEwUVhfaOk_1 knife BPVpq7UrI-k_0 person BPX5EquoyCU_0 motorcycle BPX5EquoyCU_3 motorcycle BPX5EquoyCU_1 motorcycle BPX5EquoyCU_2 motorcycle BPiWTYUA7eI_0 person BPjkQ-lEqcw_0 person BPrrZpiDdo4_0 cow BPsTDg4C4o0_1 person BPsTDg4C4o0_0 person BPxPfFzwlQA_0 truck BP-GGAbCOhE_1 bus BQDxNNWRtas_0 car BQDxNNWRtas_1 car BQEzj9pP1SU_0 person BQIO94PF6RE_0 person BQIO94PF6RE_1 person BQVcvMWyWpU_1 person BQZGptzIdjE_0 cow BQgPk0vRreM_0 bird BQgPk0vRreM_1 bird BQgPk0vRreM_3 bird BQgPk0vRreM_6 bird BQgPk0vRreM_9 bird BQh5Ib9nynM_0 truck BQtDUi4BxRg_0 person BQwLGv7fgQg_0 person BQxCcefrjSk_0 cat BQyowuIZqFQ_0 person BQzzKQ9ejzw_1 knife BRCb183ELe0_0 person BRHPsi_0nTg_0 motorcycle BRQiSnowTss_0 horse BRVNuDR5WzI_0 cow BRcQS0dQqEU_0 car BRfegSv5VEk_0 person BRfegSv5VEk_1 person BRi_AMaK3kc_0 dog BRjvUtQdukg_0 horse BRlWBt4WHdU_1 horse BRnsmPzoEsM_0 skateboard BRtCCpXG_N8_1 elephant BRt1o8xqxFs_0 person BRt5hLASRMU_0 bird BRxrw0-skYM_0 elephant BR0SGq2ioqU_2 train BR0SGq2ioqU_7 train BR1gOlJPEdk_2 elephant BR8cOV8KYX4_0 person BR-XwELzLV0_1 dog BSDy_dzOSS4_0 cow BSHg9I0V6Yc_2 bus BSJgV2iO0jc_0 person BSOCno_3bfI_0 person BSSyaPq1EoM_0 train BSWNCcyXeR4_1 horse BSWpwtIPQ9U_0 elephant BSWpwtIPQ9U_1 elephant BSWpwtIPQ9U_2 elephant BSWpwtIPQ9U_3 elephant BSqz3i60KPw_4 bicycle BSqz3i60KPw_1 bicycle BSqz3i60KPw_2 bicycle BSutEBx3H4A_0 truck BSvCnoryvn4_0 elephant BSyxB7X9SH0_5 truck BSyxB7X9SH0_7 truck BS1lexD0ugY_1 person BS1lexD0ugY_0 person BS5mJ0Y7Rys_0 person BS-S0nYSwkQ_0 person BS-S0nYSwkQ_1 person BTBmlFGHK-8_2 person BTBmlFGHK-8_0 person BTBmlFGHK-8_1 person BTKLizyvgcA_0 person BTR83oP1vpo_0 person BTlwglCdzOk_0 elephant BTpBteZfK7Q_0 cat BTxSuijXVPY_0 person BTywlpNCABw_0 cow BTzWqg8vHQI_0 car BT9sKGDb0Qw_0 train BT9sKGDb0Qw_1 train BUF45g7KGB8_0 motorcycle BUX8raEGFZk_0 dog BUX8raEGFZk_2 dog BUX8raEGFZk_3 dog BUY-_l8_v9s_0 person BUZ7x7JaQ1k_0 person BUrMlyUBryI_0 horse BU4SnrK9UiY_0 horse BU4SnrK9UiY_2 horse BU4yiA6qKAQ_0 bicycle BU5PaU-UTss_0 person BVAi_zqhIeg_1 person BVCe2emxuTQ_0 horse BVFYmsvoNTA_0 cow BVS5Q8eBmRs_0 person BVWEvs3lq0Y_0 person BVWEvs3lq0Y_1 person BVXMpcHTg80_2 motorcycle BVm9KRW0iu8_0 motorcycle BVo3XdFnAJM_0 horse BVxr6TGFsMQ_1 person BV5tXmVwddI_1 person BV-UtDJNS2w_1 motorcycle BWA5eWlt6Lg_0 car BWFYpOE-8yo_0 person BWcaU8lR4rM_0 person BWdhK5cwgt0_0 bus BWjRZ-aKRX4_1 person BWlnPrI8FLk_0 person BWnFU-Li_8E_0 person BWn3QGOyZJc_0 elephant BWn7EPWkJ2I_1 bear BWp2oVJMG1A_0 person BWqYVuIKaNA_0 person BW5r0Kv6h2U_0 boat BW56O_QhBmc_0 person BW7uP0jcst8_0 horse BXA3uMFAA9M_0 cow BXCd65rDsk4_0 dog BXCrD4eGGWw_0 person BXHktSPnW24_0 person BXTGSkuESqU_0 person BXUL3aLVZM4_0 person BXWXLNGacmc_1 motorcycle BXWXLNGacmc_0 motorcycle BXdMv9s3Rtw_0 person BXiQhR0Zj70_0 person BXrwbMjK_ZU_0 train BX8AJD8uL3U_2 person BX-SAZsC6yc_2 knife BX-SAZsC6yc_4 knife BYQfvvAP9rY_0 person BYRNeh3RRZs_0 person BYS-DmtMpWE_0 cat BYVhHLCSZ_M_1 dog BYYakMVK6Ko_0 person BYi8dYVDYak_0 person BYkytpBqzHQ_0 airplane BYq45niURL8_1 truck BYq45niURL8_0 truck BYud6fy8t8A_1 knife BYud6fy8t8A_0 knife BYud6fy8t8A_2 knife BYud6fy8t8A_3 knife BYxg5sQjvQ4_0 person BYyATiWsxZs_2 car BYyATiWsxZs_0 car BYyATiWsxZs_1 car BYyrXwDFF5U_0 person BY0XhpATtuI_0 umbrella BY2Fs4KDDbU_0 motorcycle BY7KYQ_Qf3Y_0 cow BY8mmPl_K_A_0 person BY-5sA1BbFE_0 dog BY-5sA1BbFE_2 dog BZDa7e9EFvI_0 knife BZERyxrpvg4_1 person BZIzw3XdAgI_1 person BZI3ovXxotQ_0 knife BZeIe9Nkb1E_0 cat BZgZ1H4t3hQ_0 person BZgxjWSM7Vc_0 bicycle BZhfYzqKuu8_0 person BZkYWI_qxz4_1 bird BZldivEoOo8_0 person BZli_iMMV8k_0 bear BZli_iMMV8k_7 bear BZ94WX4wHn0_0 skateboard BaDQg_CCQpU_0 person BaDQg_CCQpU_2 person BaHS1WcgbbE_0 bird BaHS1WcgbbE_1 bird BaJTQLa-vuU_0 person BaOQYsYuC6A_1 elephant BaRsW_taGVY_0 cat BaWQb_lSjYs_0 train BaYLeM_yk_Q_1 skateboard BafH7BetIyk_0 person BakCr5HeDNE_2 boat BakCr5HeDNE_0 boat BauKE-faLzM_1 person BavQVUFfmBU_1 person BavQVUFfmBU_0 person BavoG7kb0wo_0 car Baxc5TW06FU_1 knife Ba1sC-X1OF8_0 person Ba1sC-X1OF8_1 person Ba2T3joy6BQ_0 person Ba3CWVKFpBE_0 boat Ba5BO-nvDnE_1 horse Ba-SiAqH09k_2 truck BbAdBjyFFEA_0 bird BbAdBjyFFEA_1 bird BbAdBjyFFEA_2 bird BbEfZ9mUKOY_0 cat BbOabnT5V-E_0 person BbQyfmZx-2Y_2 bear BbRarKH6D_Q_0 horse BbYZ7Ee3Ixs_0 person BbYqjT1OzLY_0 person BbYqjT1OzLY_1 person BbfOXQD21Ac_1 motorcycle BbnSU5sRdBs_0 person BbnxzNL5tMk_0 person Bbq8h83cFE8_0 person Bbu_YM_GBG4_3 bird Bbu_YM_GBG4_0 bird Bbv9Y9Goufk_5 elephant Bbv9Y9Goufk_0 elephant Bbv9Y9Goufk_1 elephant Bbv9Y9Goufk_2 elephant Bb4uwSjmtKk_2 bird BcHl4OuJLT4_0 person BcHl4OuJLT4_1 person BcSXX5O_YDw_0 bicycle BcVn38vI_Zk_0 person BcV5QdDIrMg_0 person Bcg-TsdpO-Q_0 person BcjVHV-6WWM_0 person BcjZaclf1m0_3 bird aO4uLNN4Gt0_0 bear aPCEyodWBU4_0 person aPPUf7JUJRo_0 person aPf5SoOgmhQ_0 motorcycle aPheJtUTSps_1 boat aPm89i_7aKs_0 train aPm89i_7aKs_1 train aPswSvCaFDQ_0 elephant aPvqWgeR03U_0 person aQAieL0LKIo_0 horse aQB2gAnqQi0_1 person aQGQKDLwRqM_0 person aQVn7fJi_l4_0 cat aQaKnTZ4hDg_0 person aQfQqr5W5uI_1 truck aQfQqr5W5uI_2 truck aQfQqr5W5uI_4 truck aQlLjT95Hgs_3 horse aQub6VGWKzQ_0 car aQzKS5Sn9u0_0 person aQ1c75hfANo_0 person aQ6larydXgI_4 elephant aQ6larydXgI_0 elephant aRBWB79BIIg_1 umbrella aRHGn50eToQ_0 bear aRQQ75s9Ni4_0 boat aRRUAfurxVU_0 person aRcw_PTSf4o_0 person aRdAN9jVvqQ_1 dog aRnJ4lIPIL4_0 bus aRueDRgWEOs_0 truck aRzwrPXsTRI_0 truck aR6P3PtMIZc_0 person aSDuIU0pzYY_0 person aSH88cb0kww_0 person aSMzQpOjAc8_0 train aSUtY_pSN0k_0 bird aSWGbO-Nfcg_0 train aSWGbO-Nfcg_1 train aSb-LY3vBsg_0 giraffe aSkBoJ55w2Y_0 person aSqwAZJaQIk_0 bus aSqwAZJaQIk_2 bus aSsjyvISV94_0 train aSw1yhbXHuA_0 elephant aS2Zw7-j7p4_0 car aTBr31jkThQ_3 bus aTOn74Inw24_0 bird aTR3FylgTkA_1 person aTR3FylgTkA_2 person aTS8hur_yyo_0 person aTcDiEXEhhk_1 horse aTdIOtWasSE_0 person aTeFjqoG9fM_0 person aTeFjqoG9fM_1 person aTj38bNIsQo_0 cow aTvgsqSb5aA_0 person aTvoRXrEvG4_0 bicycle aTvoRXrEvG4_2 bicycle aT3idINTybY_0 umbrella aUFHlj5AVrU_0 person aUNlQPWMFHo_0 car aUQh47P34C0_0 person aUQh47P34C0_1 person aUX-HZraWQs_3 zebra aUh41vv5vdE_3 train aUh41vv5vdE_0 train aUh41vv5vdE_2 train aUv4LjbJxLs_0 bus aU5AZMYHZ2o_0 dog aU5tePXE5qE_1 elephant aVFbcdQrobU_0 person aVGtibXVt40_0 train aVMpwmT7ojA_0 truck aVPIHMyNEw8_0 truck aVZJ8qaxG3s_0 person aVif6Qc9Prw_0 cow aVknWcQimJA_0 bus aVm9jp_ttsk_0 elephant aVm9jp_ttsk_1 elephant aVm9jp_ttsk_4 elephant aVm9jp_ttsk_5 elephant aVm9jp_ttsk_6 elephant aVm9jp_ttsk_7 elephant aVm9jp_ttsk_8 elephant aVo-jvGoUGs_1 boat aVo-jvGoUGs_0 boat aVq4ezzbcTc_0 bird aVvuGEexwy0_1 person aVy9mhLlo5U_0 umbrella aV2_0JBmw8o_1 person aV7mSkydynI_4 bicycle aV7mSkydynI_1 bicycle aV7mSkydynI_2 bicycle aWCNGGW4Qew_0 person aWDtrDYqivs_0 person aWQxqFyyzng_0 cow aWQxqFyyzng_1 cow aWWMT0webCY_0 person aWWtWhgt_V0_0 cow aWYoUCAev64_2 bicycle aWYoUCAev64_0 bicycle aWcaF85RIM8_3 elephant aWgSKxQO5Ps_0 cat aWi51gAEIkY_0 person aWma4eTtHv0_0 person aWqBSBc-XpU_2 knife aWt13fGkYuA_0 cow aW9D5rT3GCo_0 bear aXFFLOGR_yI_0 person aXFgCWZLFj8_0 horse aXFgCWZLFj8_5 horse aXFgCWZLFj8_1 horse aXKbkyjRqkU_8 bear aXKbkyjRqkU_0 bear aXKbkyjRqkU_7 bear aXOPdDTpvxc_0 person aXWkAKNw0Dg_0 bird aXXfrIsIqi0_0 person aXhd5BhT4hs_0 cow aXhd5BhT4hs_1 cow aXml5kCJyDY_0 skateboard aXml5kCJyDY_2 skateboard aXn1cwN8vng_0 airplane aXn1cwN8vng_1 airplane aXxKLf5m61g_1 person aXxKLf5m61g_0 person aXxPxBeZjQI_0 person aX0JOJY-BDc_0 person aX0JOJY-BDc_2 person aYCA7dz0nbI_0 person aYJzxhE8-Rs_5 knife aYPCTMucy6A_0 person aYgA8AxT0V4_0 giraffe aY1i2TADX0c_0 person aY1i2TADX0c_1 person aY1i2TADX0c_2 person aY4dOYabpbs_0 cow aY6lI7qO6kI_0 person aZF83PK7HKU_0 person aZF83PK7HKU_1 person aZGZbrCAFl4_0 person aZGZbrCAFl4_1 person aZHznZSD2uE_0 person aZJ_vArnOC0_0 cow aZL_n-gon0U_0 boat aZT_v5WnLio_0 person aZVtxAF_Imw_0 dog aZZcXyRJwyI_0 person aZ4tzgju18s_1 train aZ-3jypmJiY_0 person aaAAXDB7ml4_0 elephant aaAAXDB7ml4_1 elephant aaA_qcyN3eM_1 cow aaBf3fxpR7E_1 person aaQjh2_8aVw_1 motorcycle aaQjh2_8aVw_0 motorcycle aaUXN-xWi1c_0 person aaWV0TEIbhM_0 skateboard aaWV0TEIbhM_2 skateboard aacFWGARp08_0 person aacLCDo8Zus_0 umbrella aacZc8VUtxg_0 bird aaoYsiVAFDY_0 airplane aas39xgvbfg_0 cat aatdoixvb4w_0 dog aazC6OJV2GY_0 person aa0jo00Yxz0_2 boat aa-J6xg9RH4_0 person abCu1bwDisA_0 umbrella abHvXnWduQQ_0 person abQ7YCx3QQM_0 train abbympAEM_k_0 cow ablCJGTLCow_1 elephant ablCJGTLCow_3 elephant ablCJGTLCow_4 elephant ablCJGTLCow_0 elephant able--ZWvkg_1 person abnCzyC9R28_0 person abpyt2p-uMg_1 bird abrKRGgLV0o_0 dog abrKRGgLV0o_1 dog abxcR1X4UIo_1 bird abxuxX4aHFI_1 horse abxuxX4aHFI_2 horse ab1RpuefUA0_3 bicycle ab2b2WA-fQs_1 person ab2b2WA-fQs_0 person ab2b2WA-fQs_2 person acDY2Ono9WA_0 dog acL58vxHnnc_0 person acOdf26jldk_0 person acYxvpS0b7s_2 airplane acZFDZif1ww_0 train aciCzrBQsM0_0 person acnOEnTXwJY_0 cow acnOEnTXwJY_1 cow ac4feYMso4k_0 train ac6NdTBtc6U_1 person adAkRe99CDA_0 truck adE0Nk3CKyI_0 car adKIteGSOIM_1 skateboard adY8EtfOO_w_0 train adcv2A70AoA_0 person adiBUyRiBfo_1 person adiBUyRiBfo_0 person adskAqVAdFQ_1 elephant ad2C17MGAEo_0 bus ad94BZD75ck_1 cow aeAjL4rCjIM_1 truck aeAjL4rCjIM_0 truck aeIzIOSHZek_0 person aeJKW7m42xo_2 airplane aeJKW7m42xo_0 airplane aeKckIdL0io_0 bird aeUVIIEtwdw_1 motorcycle aeUVIIEtwdw_2 motorcycle aeUVIIEtwdw_3 motorcycle aeUVIIEtwdw_4 motorcycle aeboOU_vdjo_0 person aeboOU_vdjo_1 person Bc2pPI9s8bM_2 horse Bc26F0eEyBg_0 person Bc5QvTVd-04_0 person Bc64C5jdZDg_0 person Bc7NXuSycR4_0 skateboard Bc-b4WhkWxw_0 person BdBZuvI8oak_0 truck BdBZuvI8oak_8 truck BdBZuvI8oak_1 truck BdBZuvI8oak_2 truck BdBZuvI8oak_3 truck BdBZuvI8oak_4 truck BdBZuvI8oak_7 truck BdB6NgtqioE_1 bear BdCnusBWLuw_0 bicycle BdC5wdGWMCw_0 person BdLMnBBX7rc_0 person BdQ8AC4jpkk_0 person BdR02myBXHY_0 person BdTRTQRbNqI_1 skateboard BdT2u0kYx90_0 bicycle BdT2u0kYx90_1 bicycle BdT2u0kYx90_2 bicycle BdT2u0kYx90_4 bicycle BdZOawocL-c_0 person BddRmrmaI6M_0 person Bd0JDJL6yXk_0 airplane Bd21KrWCyCg_0 cat Bd-WW1Hs9kk_1 train BeAD9m4Yu_U_0 person BeCQkxXRRww_1 person BeCQkxXRRww_0 person BeCmkGB-RCw_0 horse BeQWoctTF5I_0 bear BeQWoctTF5I_2 bear BeQupBkL2y8_0 train BeTu3Ag6XIw_4 bicycle BeTu3Ag6XIw_1 bicycle BeVqWRYzPkY_0 knife Bebzr4dP1Ug_2 person Bebzr4dP1Ug_0 person BedgXkpLAOs_0 person BefMC4f6Z3s_0 person BefMC4f6Z3s_1 person Befq3kL0E7o_0 person Begwn2Da_j8_0 person BepRWdKn0QA_0 cat BetAKo6E3rw_0 person BezlbA5t77I_1 person Be4NCK9GwQU_0 person Be4V9lpSpJw_0 knife BfIBlw1RkXc_1 truck BfJUkGEnxvE_0 person BfOXYUOsSf8_0 airplane BfSxTA9yZak_0 person BfT3bVAeXLU_2 boat BfWpLwfDFbc_0 person BffFognyZOA_1 skateboard BffFognyZOA_0 skateboard BfkXvdTkYF4_0 person BfkXvdTkYF4_1 person BfwHmAlZdKA_0 person Bf1cF3BfY18_0 person BgBDqhuoTr0_0 dog BgHvkS4H7w0_0 person BgamGCKlzTI_0 person BgbxYgCIde8_0 cow BggPqcJz12g_1 elephant BgjdCfaJfsE_0 elephant BglxBESIjlE_0 person BgsTkbznAjI_0 person BgwZN0Ui-Q8_0 person Bg0_DcQLOys_1 knife Bg3Zox43xGI_0 skateboard Bg4NtG5QkwM_0 person Bg_cKljiGGE_2 person Bg_cKljiGGE_0 person BhA7KMeJYAE_0 skateboard BhL184lkUcw_0 person BhPyQcTHRmg_0 boat BhXpOqm8Q5o_0 bird BhZl6ZTtKDo_0 person Bha-PhOr-bU_0 bird BhdcIu_nQYs_0 bus BhqZrCcQpD4_0 elephant Bh4QFujTqIo_0 train Bh5wIL7IE9A_0 person Bh5wIL7IE9A_1 person BiGYFhnDhMI_0 airplane BiQ4cYnaGPo_0 person BiYzQbOwhWY_1 train BiYzQbOwhWY_2 train BipPdxUV2PY_3 boat BirMOPf7k0I_0 knife BizSBnzOzy0_0 person BizSBnzOzy0_1 person Bi1KsDpJT8w_0 person Bi1KsDpJT8w_1 person BjGhd-Eq5ig_1 car BjGhd-Eq5ig_7 car BjJSECIrsd0_0 dog BjLJqIPSyUM_0 bicycle BjQO2ipch-w_1 dog BjRyA1cPxA4_0 cow BjZ9JRI_WkM_0 person BjbCdEHhCjI_0 person BjfwCDsBoeg_2 bicycle BjhITTFavAk_0 person BjiJ7HAaOj8_0 person Bjj4KdIbDBY_0 person Bjk2IA4thIE_0 bear BjogwheL3BI_0 horse BjpX2nla914_1 car BjqdFABBqxA_0 person BjqdFABBqxA_1 person BjraW0bXW-0_0 person Bj8lO8Jag3Y_0 person Bj9wPwHXNQo_1 horse Bj9wPwHXNQo_2 horse Bj9wPwHXNQo_3 horse Bj_fS2abD9o_1 bird BkFws1J8IM0_0 bird BkMb48QM-zQ_0 person Bkco3wJWvp0_0 person BkdBnU65i7Y_0 person BkdWJT3sWro_3 airplane BkdWJT3sWro_4 airplane BkfKa-zgphc_1 airplane BklBU6Epydc_4 horse BklBU6Epydc_1 horse BkoQ8_W4drM_0 umbrella BkteTGu81tQ_0 bus BkwpJBHM_DM_0 dog Bk3VbRagAwg_0 dog BlXhR1rRct8_0 bicycle BlfVNiQZtko_1 cow BlfVNiQZtko_0 cow BlhT8WFfI54_0 person Blj4FY__L6Y_0 person BllnWV-BIDo_0 bird BlqsGIq2hNg_0 person BlqsGIq2hNg_1 person BlzUBgB6BEc_0 person Bl-1081HLyM_0 motorcycle Bl--N1EQpuA_5 airplane BmCAiO-WNmE_0 skateboard BmG7dEBuS6s_0 cow BmHShiZ1Xus_3 airplane BmNwfiFBeRo_0 person BmNzw5vNQNI_0 skateboard BmRZWeMzQLg_3 bicycle BmRZWeMzQLg_0 bicycle BmSBpZrrEt8_0 cat BmXdIzhVZ0Q_2 bear BmZN0ljGa84_2 motorcycle BmfHrAPEMrk_2 person BmfHrAPEMrk_0 person BmfHrAPEMrk_1 person BmjBM58PfZE_0 cow BmjEEjKDJVI_0 person BmjLZgp38NI_0 cat Bm3l_RLjYpo_0 motorcycle Bm3wZ63Ymvo_2 motorcycle Bm7e-qOAcKQ_0 person Bm8qAGd91Gg_0 train BnADRMlWOsM_0 airplane BnNJUP6xfG8_0 bear BniJFr7IJRo_1 person BniJFr7IJRo_0 person BniJr-iCh9M_1 truck BnkIFwVPh8w_0 horse BnkIFwVPh8w_2 horse BnkIFwVPh8w_4 horse BnkU89Dq2IQ_0 person BoA6CUl4t70_0 cow BoGAxXRzHWs_0 cow BoLSvTrm3d8_3 cow BoNtUpvusGM_3 motorcycle BoNtUpvusGM_4 motorcycle BoNtUpvusGM_0 motorcycle BoNtUpvusGM_1 motorcycle BoNtUpvusGM_2 motorcycle BoOANS5_U9I_0 motorcycle BoPj2W_G2Qg_0 airplane BoYvNfndu60_0 skateboard BoZ3ZvdEZ4o_0 car BoZ3ZvdEZ4o_1 car BoiPpDeQ2mQ_0 airplane BomNEWAGolQ_0 person BomVU8_LL_Y_2 dog Bowyw_fhWZ8_0 person Boy5toMvMwo_0 giraffe Bo2qsQNYATk_3 skateboard Bo5bT8QP_Og_0 person BpDLFqS9EAE_0 person BpVyiSvjk4o_1 dog BpdZmCkSHco_0 giraffe BpjdKB7AJ8U_0 skateboard BpkMUQLoJUM_0 person BpoWgamMMro_0 cow Bp1zluIhHzc_0 person Bp4vXfVIVxA_0 skateboard BqBkvlijWKg_1 person BqDnDPIE18k_3 horse BqPcqKW3uAM_0 dog BqoRxXUz7q4_2 truck BqpA7iBOQ_s_0 person BqqPm3F1F_w_0 person Bq4id5zA48c_2 bear Bq_emgXftMI_0 person BrDdbgxB7qI_1 bird BrHDj1biLlA_0 airplane BrHDj1biLlA_1 airplane BrJiBbRF25U_0 person BrKgWUQnUWI_0 cow BrQNhzCKfxs_0 person aelph1Y8yPk_0 skateboard ae161Zq0QBg_0 skateboard afCYMTTgbMw_1 dog afD_y2ZEHn4_0 skateboard afLO-CD48TI_0 motorcycle afLO-CD48TI_1 motorcycle afWl3lTglsw_0 person afbS6cTlE5Q_0 person afu5-raaJEc_1 elephant af9Z_LR-L7M_0 person af-MtTvmPic_0 person agFlIZmS0zU_0 person agF_eyIgF3g_0 person agGuxSx4UdI_0 motorcycle agIme93Q6WA_0 person agMdtESL5kE_2 cow agSpfpV4EsQ_0 person agVHBb-qLAw_1 bus agWS48KnYWk_0 motorcycle agXPzkjMl4c_0 bird agYR35aJ1no_0 person ag1ohTMq9Iw_0 car ag5Gy7ZNbfw_2 knife ag5Gy7ZNbfw_3 knife ag6NY6nrTvw_0 bear ahE37MgcoUs_0 person ahMgOG4Bpcw_0 car ahQD9PpYoqE_1 train ahYD0J4XzC0_0 cat aheVwPx1egw_0 truck ahiO1CwoaY4_0 person ahnbyNWfvpM_1 cow ahsHWgQGPNI_0 person ahv6_xBxvmg_0 person ah03BOnPUqs_0 cow ah-2yN1cKOg_0 bus aiINQVIMx5o_0 person aiNcNIUbY3E_1 dog aiX8ymgR1g0_0 boat aiX8ymgR1g0_3 boat aierZPItkn8_0 bicycle aierZPItkn8_1 bicycle aiiN3X-f5Ss_0 person aiklFoEJX1Q_0 person ainWSZibSIM_1 bicycle aio5SboRXGU_0 person aio5SboRXGU_1 person aizJI68M2SY_2 truck aizJI68M2SY_1 truck ai1CTuarr50_0 bus ai3xYb_xvFA_0 person ai7WTyMnl1g_2 horse ai7WTyMnl1g_3 person ai7WTyMnl1g_0 horse ai7WTyMnl1g_1 horse ai9-_EMwk4U_0 skateboard ai_jmsLJTR0_0 person ajAuKSOFBKQ_2 bus ajAuKSOFBKQ_3 bus ajB-QUVDyXI_0 cat ajO4xx5beuE_1 bicycle ajPP5EY_nAo_0 person ajPP5EY_nAo_1 person ajPY1htweXM_0 person ajPY1htweXM_1 person ajtvjEY9TPA_0 airplane ajxcj5ovYdw_0 skateboard aj0Ll84jtZs_0 person aj0Ll84jtZs_1 person aj3UwQNtZPo_0 train aj6sqeG0k54_0 umbrella akH9ouIrOds_0 skateboard akIlFKpZAtk_0 person akOLIpAsxqc_1 person akQU-s0RCWE_1 bus akoVZ50spRM_0 person ak6iAVUNU7c_0 dog ak6iAVUNU7c_2 dog ak6iAVUNU7c_1 dog ak89dpHVmHc_1 person alAFNWeSJts_0 skateboard alDkqPNUFLU_0 person alDkqPNUFLU_1 person alKgZTVxcV4_0 motorcycle alX9MOY80Aw_0 person aluZTs_Ys8I_0 car alvKKzlOBKM_0 person alzWhOivD0E_0 person al2Vh0In4HU_0 bear al2Vh0In4HU_2 bear al2Vh0In4HU_3 bear al8Of2FWy80_0 cat al8vzWgNDbs_2 bicycle al8vzWgNDbs_7 bicycle al8vzWgNDbs_8 bicycle amIvXQ6aZkE_0 cow amL9Dar_hp0_0 person amTcWqrgBBg_3 airplane amjpcHzuYb4_0 person ams9MCDF15I_1 person ams9MCDF15I_0 person amvLPTONS1U_0 cow am-3XKJkCqg_0 train anAXVexurxo_2 dog anJbsuTwShw_0 person anLTttUpag0_0 skateboard anR9cuXRv6Q_0 person anWxwjzPRBA_0 person anYy3XNTTGw_0 person anZ9lxr24eY_0 person angay7OmUwA_0 truck aniCxSPm8Uc_0 car anlydfnmv7g_0 person annQpJsk6NI_0 bus anpsTMr_HIo_0 cat anrBShdHOz4_0 person anvk-OdKLBE_0 person anvngue8Qh8_0 cat anzrRzyYAAc_0 dog an-QcnhNhL4_0 person an-mFuTYuCk_0 person an_FRcZ669c_0 person aoBqV2Guvso_0 person aoDJu0KrrQs_0 motorcycle aoOJR-0sPM0_0 person aoSWWKtf8mU_0 person aohLKKJxjIM_0 person aoizdynEVYU_0 dog aoqMoScEfqE_1 horse aotBl0tvpFs_0 train aotBl0tvpFs_1 train ao9uUinn2WY_1 truck apKAwFA4oP0_0 bird apQKmVEucLQ_0 person apZAEWvk8XY_0 person apcgot45Ql0_0 person apdP6_tCdls_0 person apfZjUpoTy0_0 skateboard apfZjUpoTy0_1 skateboard apprUmnQTcI_2 cow aqGKBg0azPA_0 cow aqGp6tCGLOU_0 motorcycle aqKiwfY3Oqc_6 bus aqKiwfY3Oqc_5 bus aqKiwfY3Oqc_7 bus aqNz8TCica4_0 zebra aqUHuS5ALXE_0 cow aqWN-Q0wDHI_0 person aqWN-Q0wDHI_1 person aqZfqhHJPLo_0 person aqdSuLpYlwQ_0 person aqe_mdIg6k0_0 person aqmie50AFwE_0 dog aq2UMxzwliQ_0 person aq50xKvuSFg_0 skateboard aq59B_-6ilw_0 person aq9Sfxn9vMg_5 knife aq-QzG14KJ4_0 person arFKRc7lAo0_0 person arFKRc7lAo0_1 person arPGoY7uh4E_0 person arS7aqpkAU0_0 motorcycle arT4jZLX8pg_1 knife arW0ZUPkah8_0 person arZ_mIhaJMo_0 cat are5LvOB2nQ_1 skateboard are9NykT9FM_0 truck arn0j0l_IWI_0 person artWKQTC7CQ_0 person artcASpzYrU_0 person arwZ6ZPJuN4_0 cat ar7TRjurXMY_0 person ar-fzXT8Juc_0 truck asT-GJNeJok_0 person aseOdDcbIRE_2 person aseOdDcbIRE_0 person aseOdDcbIRE_1 person ashHnkqFz7g_0 bicycle ashHnkqFz7g_3 bicycle asl-XTE0jsE_0 person asrDocOfGQE_0 car asrDocOfGQE_1 car asrDocOfGQE_3 car asrDocOfGQE_4 car asrDocOfGQE_5 car asrDocOfGQE_6 car astLiScyoaQ_0 person asx2CkH0O6I_0 elephant as1twjKe3Cw_0 skateboard as6Y3-EaaCg_0 person as6Y3-EaaCg_1 person BrgRnN_LBGk_1 person BrgRnN_LBGk_0 person BrhMkJ6n-hQ_1 train BrnBTne3NBw_0 bear BrnBTne3NBw_1 bear BroiAN_qtCI_0 person BrpRmX410DU_0 person BrrAlsmwDnk_1 person BrrAlsmwDnk_0 person Brrlyds8g1A_0 person BrwABvccCWs_0 person BrzEfM8nWCw_0 cow Br3M-xsvXFQ_0 person Br9CVteHFEc_0 person BsCH_ABy0WE_0 person BsRC5xbG6uY_0 person BsXphFpnOxE_0 bird BsXwLsR6dm8_0 person Bsv8dNYzPkY_0 bear Bs1rRAtP7bw_1 bear Bs3BPJZMD9E_0 person Bs94h8vMmwg_0 person Bs_9E_Rq524_0 person Bs_9E_Rq524_1 person BtFwcgeJjsY_0 person BtKVAhU1LdI_0 knife BtKl-iqkgoY_0 cat BtN0FlaISuY_0 person Bt19SM8BenY_0 person Bt41QF0ze6E_1 person Bt7B7nkGO_4_0 truck Bt7B7nkGO_4_1 truck BuFYI1vYj1k_1 person BuH65mVX5yM_0 person BuPWtDPEJ-0_0 person BuXvxclES0s_0 bird Buco16wWyFA_1 motorcycle Buco16wWyFA_2 motorcycle Buco16wWyFA_3 motorcycle Buco16wWyFA_0 motorcycle BufY7NdKUlM_2 motorcycle BufY7NdKUlM_4 motorcycle BunvBFXoGPg_0 bus BuqljdjPWWc_0 knife BuqljdjPWWc_1 knife Buumm7rgDPY_0 person Bu0gJwoDkRw_0 cat Bu5Bgr9asUU_0 person Bu_HdLSyLSI_0 person Bu_3ep-qAi0_0 person BvEAIc3hmkk_0 motorcycle BvHzGHjR6rk_0 person BvLCgNWIHfA_0 person BvLJZAhIR3A_1 truck BvTLdUcIH5I_1 person BvTbuvBeunI_0 airplane BvTjf9mG5MU_0 person BvZ8DqslB-U_1 airplane BvZ8DqslB-U_2 airplane BviGbtAujq0_0 truck BvrORC4d2yg_0 train BvrORC4d2yg_1 train Bv4rjfW9RsM_0 dog Bv9IXbrDYLk_0 bird BwDccOS7_vw_0 person BwIoxW7Ee8M_4 train BwUYR-ZnpX8_0 horse BwW4Fs1eTRg_0 airplane BwW4Fs1eTRg_1 airplane BwergWBqOOs_2 train BwgJmjOzlRk_0 person BwoTsoC3hvQ_3 horse Bwo1MaJvxRs_0 person Bwrh4q5KLVg_1 dog BwsHsSpS0dQ_0 bird Bw2RhmesY5g_0 person Bw5iwcbP4eM_0 giraffe Bw6f2OXYtSo_0 cow BxHIRvoGZMM_0 person BxMoEE7XwL8_0 person BxNE34BGZ-4_0 person BxQp3-SCUGs_0 person BxQp3-SCUGs_1 person BxWs9aINEEI_0 person BxWs9aINEEI_2 person BxWs9aINEEI_1 person BxYdU6vB2YQ_1 motorcycle BxaEaD7zeX4_0 person BxhktnvjtLA_0 truck BxmeqCev3Kw_2 boat BxmeqCev3Kw_3 boat Bxm3EvRZAI0_0 skateboard BxvlWueS9vA_0 motorcycle BxwmNnxcI7o_1 person BxzVlf9-SLc_14 bicycle BxzVlf9-SLc_4 bicycle BxzVlf9-SLc_6 bicycle BxzVlf9-SLc_8 bicycle Bx2YQSFETcw_1 person Bx4ELKBw9PU_0 cow Bx4ngxnRjvM_0 motorcycle Bx-is-dL1ko_0 person Bx_z_4bt8O4_0 person Bx_z_4bt8O4_1 skateboard ByBWtiJJNqk_0 person ByBWtiJJNqk_1 person ByFCiUvKd4E_0 cow ByFCiUvKd4E_1 cow ByFCiUvKd4E_2 cow ByJNGLp-Q1Q_0 boat ByRne1VtDow_1 person ByfeHjkm0NA_0 bus ByhpLi9sRUs_4 train ByhpLi9sRUs_5 train ByhpLi9sRUs_0 train Byn2Qo7ghaQ_1 person ByvWskJDMGg_0 airplane ByvW2VADH6w_0 motorcycle By1cSo8DcUw_0 bicycle By8jq7bVrkw_0 person BzKADkfj5sM_0 cow BzNlO4ccRRY_0 person BzOo01dGJkw_0 person BzT8xDTB14c_2 truck BzWiQPw-vQc_0 person BzX2DmrGvp0_0 train BzeW7KdQ818_0 skateboard BzeW7KdQ818_1 skateboard Bzehenf5vSI_0 airplane BzgqI8VBlSE_0 person BzpY-JMNW4c_0 person BzrM5QG9q2o_0 train Bzr3gVS8SzI_1 boat Bz5rpBZ1dzs_0 person Bz7A9QxD1nY_0 knife Bz9MqNlU7KM_0 person B0AazXeFQIU_0 person B0BXcxFMgrk_0 knife B0EZ9LIObGc_1 motorcycle B0FupWyYbG8_1 person B0NJSrhuWwA_1 person B0NJSrhuWwA_0 person B0QFrtXczzE_0 person B0SYog80Y78_0 person B0WaLst2GGg_1 person B0YrdZ7s3UY_1 person B0YrdZ7s3UY_2 person B0aFuZP3nYE_0 person B0aFuZP3nYE_1 person B01lwUoyl90_0 person B03gLj0lJrk_0 horse B0-L6VbxLcU_0 cat B0-lAJ4tBN4_0 train B0-lAJ4tBN4_1 train B1IQyTNE7eg_0 skateboard B1Ojfucympw_0 person B1Ojfucympw_1 person B1YzUGPZQWo_0 train B1hkAet1OQI_0 person B1isEeljBFI_0 person B1pC6hfF_Do_0 person B1qSE-7JgXE_0 person B1yiSrv4Ocw_1 horse B1zPD20nhTg_0 person B12C84by_eA_0 person B12C84by_eA_3 elephant B12C84by_eA_1 person B12C84by_eA_2 person B12C84by_eA_4 person B12C84by_eA_5 person B12C84by_eA_7 person B12C84by_eA_10 person B12C84by_eA_11 person B2EMVGU5pNA_4 train B2VryVb5p54_0 horse B2VryVb5p54_2 cow B2V7kk7fqSc_0 person B2X9JzMNZb0_0 person B2ZpqEJpVX0_0 person B2fTIk9eCNc_1 elephant B2gJVve4I58_0 person B2hKNbDmBtM_0 cat B2lAxi3jIR0_0 person B2lAxi3jIR0_1 person B2lAxi3jIR0_2 person B2xcdU4Qoz8_0 bicycle B2xcdU4Qoz8_12 bicycle B23TpirETNE_0 horse B26AQtx7Xic_0 person B3HZSrALQYc_0 skateboard B3IjPORG3_w_1 bird B3J2umsYK7E_0 person B3QykPv8TnI_0 person B3X5wDENAUw_0 cat B3kTu0B4OjM_0 person B32uNSxqzgs_0 cow B33seWCiea4_1 person B33seWCiea4_0 person B4Q6pRC_mZ8_0 bicycle B4Q6pRC_mZ8_1 bicycle B4Srj2O1AWQ_0 cow B4dFepwxEOU_0 person B4iP6lAoNYo_0 person B4jbThMFW00_0 person B4mWkc8-_6A_0 bird B4oO-miJ6VU_0 umbrella B4vM2iKb8cs_0 person B4_mRuPC7o0_0 person B5BNEoIaQL4_0 person B5GwJoM3aX8_0 person B5NgN9mocgI_0 person B5PHI2HVtuc_0 person B5fv91yB4Gw_0 bicycle B5qSvRpXLS8_0 cat as7rVUFzyzg_0 skateboard as_Rz9F3slw_0 cat atA-Cgv2XHY_0 person atE1O6J4Wls_0 person atLGWZUbEuM_1 train atMjLEIbsBI_0 cow atxnLL4Vjuo_0 person at2dmAEDdmg_1 person at4pXKjEDic_0 person at4pXKjEDic_1 person at5edW3lMVA_0 person auA-q9fWwn4_0 elephant auDJ1xtxFlw_0 person auDJ1xtxFlw_1 person auFLAZb-gD8_4 truck auGyhsy8iLA_0 cow auNciV4eLVo_0 bus auOl1mbGUlk_0 bicycle auOo1Lg_wvU_0 dog aubLDLbxxsk_0 person aueT5WO4e_c_0 giraffe aueT5WO4e_c_1 giraffe augKp60fa5Q_1 car auiPa0HNOEQ_0 person auu_tYb3G1Y_0 person auzy4oPzM5Q_0 motorcycle avCqOSeS7WU_0 person avC67gaD1NM_0 cat avHbY1Q3vyw_1 elephant avLxYBedm_c_1 elephant avT7Q6Wibdg_0 person avl9d-bL57Q_0 airplane avl9d-bL57Q_1 airplane avob12vGzmU_0 horse avonCFmxPyg_0 person avonCFmxPyg_1 person avpWY3czerE_1 car avpf9VVT6CU_0 motorcycle avvQ5wNPiew_1 person av475qBV4QY_0 skateboard awC9zxAeP54_0 person awQ1n9aQEco_0 person awVBieSP5Zw_0 person awVa7pqR9DU_0 horse awfg9NsCVQ0_0 person awjHSQ5uPi4_0 bus awkpYVN-fJw_1 horse awmHGFkxxlw_0 person awwWMuOKe3c_0 person aw059qHbVm0_0 bus aw2lOvXUAPg_0 truck aw5C9nQgLcA_0 person axB1Gk85UtQ_0 person axEK7nZ8W3I_0 person axJZ92uWnkA_0 person axXs2oUd4ow_0 bear axcDoOd0G0s_0 truck axjSgDsN6t8_0 horse axltu5Qf6ok_0 skateboard axn6QuPBPqA_0 person axulii3UXSQ_1 person axulii3UXSQ_0 person ax4YUE-PcF8_0 airplane ax4YUE-PcF8_2 airplane ayD3RJIjplM_0 dog ayRmnUb2LAI_0 airplane ayax5k3PJMs_0 person aybdlOdul0U_1 person aybdlOdul0U_0 person aydxF0r6n9s_0 person aydxF0r6n9s_1 person ayg0x1glF2s_2 horse ayg0x1glF2s_0 horse ayg0x1glF2s_1 horse aylBB_8cv60_0 umbrella aysqPEtZvsg_0 person ayuF_8chcKM_0 person aywW_Wvo49w_0 person ayzzG8M0fzo_1 person ay1d8NBbrl0_2 bird ay1d8NBbrl0_3 bird ay5RnrQple4_0 train ay5tx1Rovwk_0 cat ay7LLDO9Ecc_0 dog azC7-_wC8N8_0 bus azDn4DU7cGA_0 person azKKcIb4Ufw_1 boat azOInI_CMHM_0 bus azbls7-iaEU_0 person azbls7-iaEU_1 person azbls7-iaEU_2 person azfLb8VvI-4_0 person azfLb8VvI-4_1 person azfLb8VvI-4_2 person azlRI_Jydpw_4 cow azmZDijLihI_0 person azmZDijLihI_1 person a0FDxoXtFyM_1 airplane a0NOwUio_n8_1 person a0NOwUio_n8_2 person a0NdjlW5H_U_0 cow a0N_vetshbg_0 person a0N_vetshbg_1 person a0OjB7xzRx4_0 person a0RusP9ATfw_0 person a0dHPtoBS3U_0 person a0hRgBpppWs_0 person a0jpiOFS7eM_0 bear a0oeBV6-20U_0 person a0uoJdAwobA_0 person a085oeXd0RE_0 person a0-Pmmyi8js_1 person a1ADw1megCI_1 airplane a1Fzn7iUHO8_1 motorcycle a1RVXQl4rlY_1 cat a1RinDI9Hgw_2 knife a1SaKvoO2Og_0 cow a1U6U_pntMo_0 person a1XDxiP1hNA_0 person a1ctjjNUZ-4_0 dog a1kLNA-KACs_1 bicycle a1lQwuhicQI_0 person a1lQwuhicQI_1 person a14VlgxHS3M_0 person a2AT0Xo7uLY_0 person a2Osa5aleJ0_1 bus a2Qp2Grx3_8_0 person a2XMK6mjiZg_0 dog a2XvXs2guuE_1 person a2XvXs2guuE_0 person a2fEq8oS3M8_0 bus a2gYRtJhP1E_0 horse a2gYRtJhP1E_1 horse a2hv4szlq-Q_0 train a2kH2_9zoWU_0 airplane a2o_-GSpXXk_0 cat a2qmS6AhUYk_0 motorcycle a2vx_F1NOas_0 person a26mRIQUPoU_0 dog a26mRIQUPoU_1 dog a26mRIQUPoU_2 dog a27UC8vu1hI_1 truck a29AS00WJrY_0 cow a3AIwQnG0Ek_0 cow a3FLLhQu768_0 person a3THrQYDkqw_1 bird a3UCtF8nZIY_1 skateboard a3dbdHben-o_0 elephant a3dbdHben-o_3 elephant a3dbdHben-o_9 elephant a3dbdHben-o_1 elephant a3dbdHben-o_2 elephant a3dbdHben-o_4 elephant a3dbdHben-o_6 elephant a3rGEI8MdMs_0 cow a3uvEIsI1no_2 person a32oJ0GsAYw_0 person a35UuVw16Ks_0 person a37D3FoqIJA_1 knife a3-oi7T-Lw0_1 zebra a3-tURw95Xo_2 person a4IU4va7hp0_1 truck a4LYVAPbEwI_0 motorcycle a4LaeeZXIc0_2 skateboard a4Nt5QxFqmY_1 boat a4PwZfJZVPA_2 bear a4arqJgXHDA_0 person a4pR_YBd4yY_1 bicycle a4uNoGpllg4_3 bear a4v1ptMpyi0_0 cow a4v1ptMpyi0_3 cow a41TZwhyyP0_0 cow a46BqT5Mo5I_0 cow a5HZnFcvdyA_1 horse a5P8pVrcSRk_0 motorcycle a5brvs-fct0_0 person a5tSaF5GCKE_1 cat a5ye5BUJFlY_1 person a5znd3aNwLk_3 bicycle a58tMy0mhIk_0 person a6G_DBEFdFA_0 horse a6ZXi7Qqls0_0 person a6fBYYEgBvs_0 dog a6jDeIJbF7Q_0 person a6uyjrBkkXs_0 boat a61piN6ffE4_0 person a67zz0CSEpk_0 person a67zz0CSEpk_1 person a7B81Zeqgfw_2 truck a7HKuyv2qLQ_0 elephant a7HKuyv2qLQ_1 elephant a7Q6eb6feT8_0 person a7S9rFNKVMI_7 motorcycle a7Zr0-1LIPc_1 dog a7hwm4TORvY_0 person a7pC7IjO2ik_0 truck a7peWR4xJwQ_1 cow a7ygZsaDMis_0 person B5_Hyk-p7kE_0 cat B6E15pe4UR8_0 horse B6LGwD1E9SQ_1 person B6P8B8BO-6U_0 giraffe B6SaDYczlDQ_1 person B6U92N9hh6k_2 horse B6V4xqX67OA_0 truck B6bDVhRNw00_0 airplane B6cEdaWTjeU_0 person B6dBkoOhfBU_0 car B6lU93wtaDA_1 boat B6mP9KsnQPc_1 bear B6mngUQtFJ4_0 cow B6nlTJYtmws_0 cow B6pXMjH4geU_3 boat B6qshzfLYzs_0 person B6x2dNbgPjM_0 cow B6y439-imys_0 person B6z7eCsgfM0_0 bear B61Wf8NFvcU_0 airplane B645r0hkdmg_0 person B645r0hkdmg_1 person B67FwwZfIEA_0 person B6_IcyhOHpE_0 person B7BjhnnQ2K4_0 person B7GRNv2opSY_0 bird B7MHQOUO4f8_0 umbrella B7Z9UV6aQuM_0 bird B7a8WkaWmH4_0 person B7a8WkaWmH4_1 person B7cXCz7jJKQ_0 cow B7gX18_mDyQ_0 person B7hmqrwe88o_1 elephant B7hmqrwe88o_2 elephant B7iAvi5riV8_0 motorcycle B7nwfSMbEL8_0 cow B7pEEUJ-J1g_1 motorcycle B7rCxgg3F_s_0 train B8Bp9yKWV9c_0 person B8D4fPARFvo_0 person B8HQglK444U_2 airplane B8HQglK444U_0 airplane B8HQglK444U_4 airplane B8LGL1Tt_wg_0 person B8MxJKDkvkE_0 person B8eeoykmq1E_1 person B8eeoykmq1E_2 person B8f7NnYq5sg_0 person B8f7NnYq5sg_1 person B8sWL2syyA8_0 person B8uIyRkm9YA_0 airplane B8zGkBkQw4c_0 person B87W__RIE-E_0 person B8_Z7m50I_E_0 motorcycle B9AXF91pIUs_0 airplane B9Ed_vAN9mc_0 dog B9Y_LrDVbg4_0 person B9aqDsvGy5Q_0 person B9aqDsvGy5Q_1 person B9j233QxEuQ_0 person B9oJSA_NJ2s_0 bicycle B9z17FOPd5A_0 person B99mIPKaChY_3 cow B-CR7vl67W8_0 person B-QiQvJcSVk_0 person B-T1YNe09SU_4 bear B-T1YNe09SU_3 bear B-bDxAN93a4_0 airplane B-dlnlRKA5s_2 airplane B-dlnlRKA5s_7 airplane B-tukWZbXp8_0 person B-wJpt4zl0c_0 person B-x2pu-ux3w_0 horse B-z1uE4iuz4_0 truck B-0WNs2QYPk_1 elephant B-48lEXzIS8_0 umbrella B-7cqxw95Ro_0 person B_BqrY2eeCY_0 motorcycle B_Gjc7J18qg_1 person B_Gjc7J18qg_0 person B_M6X41emhY_0 person B_O8idmfoCQ_0 person B_Tj79jaRXs_1 person B_Tmq51dx1g_0 person B_jGC2tlhRo_0 person B_k6vEEPHK0_0 person B_lEJv31TlI_1 person B_lEJv31TlI_2 person B_nZdcreecE_0 person B_wWPH9kbxM_0 person B_ylVg-TN2Q_0 skateboard B_4Kfa8_9ms_0 person B_4eJYakoRY_0 motorcycle CAEqRvJLY-M_1 motorcycle CAe1SZKZ9T0_1 car CAq4CxCpeQE_0 cat CA4UqnJCs58_0 motorcycle CA9SLI7TOKQ_0 person CBASqWyp4yk_0 person CBJQ5dL6Df8_2 horse CBNqNe7G-QQ_0 person CBnYDFRfYgo_1 bus CBqyVKttAwU_0 cow CBtgGOzZtLQ_0 person CBz3ZOrTAjI_0 elephant CBz3ZOrTAjI_2 elephant CCAsEc2oRAM_0 elephant CCGg17i4vMU_0 person CCHay2RSnJI_0 skateboard CCHay2RSnJI_1 skateboard CCLRdGNDgdc_0 cat CCoGim--jEg_0 train CCp6NLBil8k_0 bicycle CCwovjgEx1k_0 person CCwovjgEx1k_1 person CC0aX78fQFo_0 cat CC-qoxEyocI_0 person CDCLLCkr87I_0 cow CDY4TXCreQ0_0 person CDbWYF89944_0 person CDb6uyrYrZA_0 car CDfjcWI7iBQ_0 boat CDgBHxiVkFw_0 truck CDnrG74PXbI_0 person CDpQZEjohRc_1 cow CDpQZEjohRc_0 cow CDrU-q6QdEs_0 person CD0cWR7d9yI_0 person CD4SGfIdfSg_3 elephant CEDTshbJOaI_0 person CEJoHSbb4gg_0 person CEJoHSbb4gg_1 person CEMCCDAYzQs_0 person CENd4xI4dnY_0 person CETUG_G0I4k_0 cow CETUG_G0I4k_1 cow CETUG_G0I4k_2 cow CEUjuyvgrB0_0 person CEUqqi8y4sg_0 cat CEVHrP5OzJ0_1 knife CEafe_JTk8g_0 knife CEqA0cqMfzg_1 cow CEsjzJHOUBw_0 dog CEzWiyTQOMA_0 truck CE1gHqc8aqU_0 person CE3KdY0X0QE_1 person CFD0NOl12CA_1 train CFD6d4OweGQ_3 motorcycle CFD6d4OweGQ_1 motorcycle CFD6d4OweGQ_2 motorcycle CFD-UQW1aQU_1 car CFD-UQW1aQU_2 car CFRsGLeMJKc_0 person CFXkKgig7Io_0 person CFee6F2rbjc_1 bird CFxObg2ebKQ_0 airplane CFxObg2ebKQ_1 airplane CF0JmXACTww_0 person CF01UBuV76Q_0 person CF7DZCaSqIg_0 bird CF7DZCaSqIg_1 bird CF7KYbTChlg_0 person CF71f3YLQ9U_1 person CF-cX0etaAw_1 cat CF_NSKkrwjg_0 person CGCNTZsml7Y_0 cow CGQoaYTzfaU_0 train CGQoaYTzfaU_5 train CGQoaYTzfaU_7 train CGgxp3ycSWs_0 elephant CGoqd4n_qJg_0 person CGsUTzKzV4U_1 train CGwrXZ2fUqg_0 person CGy0nn1MCqY_0 person CGy0nn1MCqY_1 person CG1sXlDy2Yg_4 horse CG1sXlDy2Yg_5 horse CHH1SlvOzfI_0 person CHIVYSnFst8_1 bear CHJFpAcH8NM_8 bicycle CHMzSMq0ui4_0 skateboard CHZU6sP-loU_0 person CHZU6sP-loU_1 person CHbhzxurZNM_1 person CHbhzxurZNM_0 person CHnWGkGAnos_0 person CHo3jSv3HIA_0 train CHwNoZ55z6c_0 cat CH6ptLNxppU_0 person CH8zCsamj44_0 person CH-_pvq3am4_0 person CIJ-q_X_y7E_0 person CIKrCLz06-4_0 cat CIQLvytEu6E_0 person CIQz5we_nHI_0 person CITgpk4GyMA_0 bear CITgpk4GyMA_9 bear CIV_VaLTf5c_0 motorcycle CIc1KbOeijU_0 person CIgzZOf3uA0_0 person CIgzZOf3uA0_1 person CIlb5C929mc_0 knife CImmRnndBuo_0 person CItr4F49wO4_0 person CIxs-77bPrM_1 person CI2GrLRwQR4_0 person CI3rFXxUPtI_0 bird CI6fYr7IJJM_0 person CI_9TEXzQE8_0 person CJD7b_dMrVE_0 person CJG8ou9QuY0_0 person CJIpdb7wZEc_0 person CJNAMf-R_J4_0 truck CJNj2wqp8QU_0 bear CJOJBhvHmCE_0 person a79_ETe4ego_0 person a7_ixAbhsRI_0 elephant a8MHgXPiRZU_0 person a8as0DkifS0_0 person a8eQTqlG-6o_0 person a8insUA82jQ_1 dog a8insUA82jQ_2 dog a8insUA82jQ_3 dog a8r9Xss8Es0_0 person a8wT4T21reQ_0 person a8z4RhTT02c_0 horse a82uXl_fE7A_2 cow a82uXl_fE7A_3 cow a892r_pD5PM_0 person a9FI5hfZsG0_0 boat a9GBRb_g82o_1 bicycle a9GBRb_g82o_2 bicycle a9YciDJw4wo_0 dog a9Y2Jm4-FDM_0 person a9ZvcKL6lEg_0 person a9fG2p2YO7k_0 bus a9fG2p2YO7k_2 bus a9g4dt8Lszw_0 person a9g4dt8Lszw_1 person a9riNB4_uhk_0 horse a90AssqciQk_1 elephant a90AssqciQk_2 elephant a-EIC5v0X4o_0 dog a-EIC5v0X4o_1 dog a-MNXAJ2mZo_0 person a-NocjWzZtY_2 person a-QTXZfMMT4_0 person a-ZWAMyDG3o_0 person a-iJ2J3oI-A_0 person a-lm-MyKchM_0 cow a-s461-Ddxc_0 skateboard a-u5tm8bZnc_0 horse a-yRjCC5TTM_0 horse a-1bMCU5aj8_0 motorcycle a-8RK3OMAOo_0 skateboard a-8RK3OMAOo_1 skateboard a_KVzTF1RIA_0 person a_KZ5mevNfs_0 bear a_OkB8q7LMc_1 person a_SryCna8Rk_0 person a_UjbYab9UM_0 train a_YIQ1VvpcU_0 person a_YIQ1VvpcU_1 person a_gLFD5d04A_0 person a_wdiSqtOK4_0 airplane a_xkGO87GsU_0 skateboard a_1zKb6B-bs_0 person a_6uxh_4kb8_0 person a_-WUUfn_l4_0 person a__R_Y49D54_0 person bALr5X95BQ8_1 person bAMbXytHB7Y_0 person bAMbXytHB7Y_1 person bAdtKFYWQcE_0 person bAfpD53Vjic_0 horse bAinSo2I3HI_0 person bAinSo2I3HI_1 person bAp653-8UZI_0 person bAtWugkhW88_0 bus bAutb-z3rvw_0 cow bAwVg4MVWds_1 elephant bAwVg4MVWds_0 elephant bAwVg4MVWds_5 elephant bAwVg4MVWds_9 elephant bAwVg4MVWds_10 elephant bAwVg4MVWds_11 elephant bA2bnjEnbus_0 person bA4v5gLC700_0 person bA5elX54rTQ_0 cat bA6JRlAu2yE_0 person bA8lz4kTY-0_0 bicycle bA8lz4kTY-0_3 bicycle bA8lz4kTY-0_5 bicycle bA8lz4kTY-0_6 bicycle bA_NwRpP6Tw_0 person bA_6OElyKFo_0 train bBPPJNf59kQ_0 umbrella bBT4o_qtgWU_0 person bBgRYIPlqAQ_0 person bBm9VYnMO9g_0 bird bBt5A6pwnxY_0 person bB1rIuXXQFA_1 bus bB4Xm1LS9CI_0 dog bB6PWM19eMo_0 person bCB5mMgiGnk_0 person bCRN4AZbr6o_0 train bCbqiJ6Ales_0 person bCuWk5NSB0k_0 person bCuWk5NSB0k_1 person bCuuL9wxM7E_0 person bCuuL9wxM7E_1 person bCvbst3iM94_0 motorcycle bCwUgQIL5cE_0 knife bCx54wbopXs_1 horse bDBjT69DcT4_0 cow bDEPo_ZJ8BY_0 truck bDJyFQqK69A_0 person bDOeksOYoHc_0 truck bDOeksOYoHc_1 truck bDOeksOYoHc_2 truck bDOeksOYoHc_3 truck bDO5jSIN9C4_0 person bDZrANNzYZY_0 skateboard bDaTeoyWI4g_0 train bDcapf9qqwU_0 person bDjiXPhFyUA_0 person bDu9DwJEoHs_0 cow bDu9DwJEoHs_1 cow bDu9DwJEoHs_2 cow bDxvHkJLr2M_0 bus bD9LGwYECDw_0 cat bD-NwifgK0w_1 skateboard bEDI6tCMZXU_0 person bEIh6sX-Tl4_0 person bEKdkY9RBEY_0 person bEM1_c0lvzs_0 bear bEOBKFTwR2Q_0 giraffe bETxZfOvyHY_3 bear bEUZ0kW5UxE_1 person bEawSJKPt-Q_0 person bEhFibV8au4_0 person bEqXwB3xaWk_0 person bErIbiSkE10_0 skateboard bEwALd1GaT4_0 bicycle bEzk1Y4QUKs_0 bus bE2p5KejqaA_0 person bE54N9ho-us_0 elephant bE9RuKWeuuo_0 person bE--xARlZGI_1 bird bFA9McooYzo_0 car bFCSt5rQdmU_0 person bFEO4MHzBto_0 person bFIAwyZ6uuE_2 person bFIAwyZ6uuE_0 person bFIAwyZ6uuE_1 person bFNUtoXNMlQ_0 bus bFORQXIUbxA_0 person bFXutLP--Cw_0 cow bFXutLP--Cw_1 cow bFXutLP--Cw_2 cow bFXutLP--Cw_3 cow bFYfbtcZvsM_1 horse bFe5fer15nk_1 bus bFm95kiEE_Q_0 bicycle bFnZbMhDMQ8_0 person bFrVmI5XvFw_0 person bF2D0pMJqLQ_1 knife bF65L0Tc9w8_0 person bF8lUYDQNgc_0 person bGCRyP03o54_1 skateboard bGFqTDkSuTA_1 bird bGMKF81Sy6c_0 person bGcugFPOZ98_0 person bGeFOznVAdA_0 elephant bGmggiJ7Hrk_2 boat bGpuuVQyMOY_0 person bGsY4wldptk_1 horse bGsY4wldptk_0 horse bGyLNR-ZWRY_1 cat bG7btkvllWc_0 skateboard bG9Q1zv6YZ4_1 person bG-X3irBEO0_0 person bHALJVsPIWo_0 person bHBuapxTSS0_0 person bHB5zkcU4DY_0 person bHO746jxL2Y_0 skateboard bHP9bh7-qNQ_0 truck bHWmtSkc1qY_0 person bHWmtSkc1qY_1 person bHbgFvCFkb0_0 horse bHcNLuPTrTk_0 person bHcbcNIxs_o_0 knife bHdxB4LnmGY_2 motorcycle bHdxB4LnmGY_0 motorcycle bHdypdEXRYY_0 skateboard bHim6VG9R7E_1 boat bHoVPJGd7EU_1 truck bHoVPJGd7EU_2 truck bHoVPJGd7EU_3 truck bHvVd9-u80E_0 person bH5d5crxmiw_0 cat bIFUXEvQb_4_0 truck bIFUXEvQb_4_1 truck bIV7YZEPqTo_0 person bIiV4e5w280_0 person bInwFKVbP2c_0 person bIqcbjzOQ0Y_0 car bIslKUiw6YQ_4 airplane bIslKUiw6YQ_0 airplane bIyfjvesRuY_0 boat bIzzvd9q2po_0 cow bI19pnS1D7Q_0 motorcycle bI8htXUqQkI_0 cat bJADjJacbIY_1 person bJAxqtGR-MY_0 person bJBnGIqBiuw_0 horse bJDJ5yePi6M_0 person bJITjrxz5Ns_0 person bJI1844s-tU_0 horse bJKrgOW0nMk_0 person bJMS4sT7XRo_5 horse bJMS4sT7XRo_6 horse bJMS4sT7XRo_8 horse bJMS4sT7XRo_9 horse bJMS4sT7XRo_0 horse bJMS4sT7XRo_1 horse bJWTtXkyZHg_0 person bJcrA1AOfI4_2 train bJcrA1AOfI4_3 train bJfHVvueTbo_0 person bJh3iPv6jYc_0 cow bJqhWaDN0hQ_1 dog bJ0SdP6bjnQ_0 person bJ24-WqB1xs_0 person bJ6hIJWstDo_0 truck bJ6-RBgHmRU_0 person bJ8k9v22vJA_0 person bKBLXhOMUi8_0 dog bKCfbZIUSZI_0 person bKCjZrT7jIY_0 truck CJfXDO8EqQ4_0 person CJfXDO8EqQ4_1 person CJm40KxFN5E_1 person CJm40KxFN5E_0 person CJqFjtBvN9Y_0 skateboard CJqHpmU9iSk_2 person CJqHpmU9iSk_0 person CJrxPkQa2GE_1 train CJ0sXsga9bM_0 bus CJ35smVDZW0_0 person CJ4qgeMiaOQ_0 airplane CJ6n8mmO1b4_0 cat CKB_--5AbfU_0 train CKC6BopJKyk_0 person CKGpdOkI6P4_0 person CKNmSha1fz0_0 person CKQHLTDcKyk_1 bird CKSN1SlM9ug_0 cat CKZ1xRX4dh8_4 knife CKcBs841bV0_0 person CKhADB_ssaI_0 elephant CKjQxzl__Fw_0 bicycle CKkp1wLGtks_0 person CKmTbQn6J9U_1 person CKsvfQdlYfo_0 person CKuBMM3fZ84_0 airplane CKxmvXSrPIg_0 bicycle CKzh_WuJFng_0 person CK29cIxMNP0_0 person CK39c3vr6gc_0 skateboard CLAjvvAM-K4_0 person CLB6UiAOkP0_1 bus CLMUcOgZdNQ_2 cow CLQOTITDBeo_0 person CLXlbsB7sLY_0 person CLdyznsISW8_2 car CLosaFzMFeI_1 person CLzV3TNXkFo_0 person CL1Bt58elWc_1 person CL1Bt58elWc_0 person CL1z2IBwWkA_0 person CL1z2IBwWkA_1 person CL4fc23TpVo_0 person CL5zmQikk-A_0 person CMBw6j8-QzY_0 person CMBw6j8-QzY_1 person CMIMzbsGXk8_0 bus CMLOYaDEQ9g_0 person CMMGX4SFyIs_2 person CMOEwqoxxwo_0 person CMP-dHylUas_1 person CMlE5HjD19w_0 truck CMlNU8W7Lsk_0 cow CMrJ3Hog9z4_0 elephant CMrJ3Hog9z4_1 elephant CMrJ3Hog9z4_2 elephant CMsMnTwn9o8_1 truck CMwy_JpVNwc_3 bird CMwy_JpVNwc_1 bird CMwy_JpVNwc_2 bird CNDd5De0h98_0 person CNEdjudh1lE_0 person CNID7GMZCtU_1 horse CNiuz-9TxDo_0 person CNqKVUmynPk_0 airplane CNt_itMBqgs_0 person CNua3gOk0oM_0 bus CNwRXN4wSAk_0 knife CN6-VQgDfe4_0 person CN8AktLgwN8_0 giraffe CN8AktLgwN8_6 elephant COAed-b3LTY_0 person COFcQrVSFcc_0 person COTylrR16zU_1 boat COc8fmI9wQ4_0 horse COh7aoqTWjY_0 elephant COj_p56dMLI_0 motorcycle COksm121JZ0_0 train COxq73j4_rY_0 person COyU6vUfxXQ_1 person COyU6vUfxXQ_0 person CO2cK7r8MNQ_0 person CO33VpWw45s_0 skateboard CO_0l5Z12kw_0 cat CPManZ0i9vw_0 truck CPN9sc_XrbM_0 elephant CPOp_zZsQJk_0 cow CPQXOFjv2LM_0 person CPXyJXYL8yY_0 motorcycle CPXyJXYL8yY_4 motorcycle CPYxpWVVj_M_0 cow CPZSesZALiI_1 cat CPuy90LHgrc_0 bus CP3cZfEx36E_2 bear CP3u7XjYteQ_1 person CP3u7XjYteQ_0 person CQEjDKzTc3Y_2 person CQE_vEzLzMQ_0 person CQPAMu_3qwY_0 bear CQUUCXr0Idg_0 person CQU9LkJ1PlA_0 person CQU9LkJ1PlA_1 person CQbUivUBlJ8_1 bear CQbUivUBlJ8_3 bear CQihoSP1KLM_0 person CQite5jXihw_2 person CQlL5sCIaM4_2 train CQlL5sCIaM4_0 train CQlL5sCIaM4_1 train CQmCFDEszdc_0 cat CQyxRGB9-_o_1 elephant CQzQkumb_iw_0 person CQ0hdku_Mu0_3 elephant CQ0hdku_Mu0_4 elephant CQ0hdku_Mu0_6 elephant CQ0hdku_Mu0_8 elephant CQ0hdku_Mu0_11 elephant CQ2pa82Muc4_0 person CRGhEOLOPLw_0 bus CRHfpplogUY_2 car CRHfpplogUY_1 car CRPfcUOT10Q_0 train CRQ8kzUgpGE_0 cat CRS3P9ePDug_8 train CRS3P9ePDug_0 train CRS3P9ePDug_4 train CRS3P9ePDug_7 train CRS3P9ePDug_9 train CRS3P9ePDug_1 train CRYLa0UnCJY_0 dog CRZQQc-7Cr4_0 person CRZQQc-7Cr4_1 person CRcL9sc8Z_Q_0 person CRihNgUldQg_0 person CRpG5Auclh4_0 train CRscoQhOT24_0 elephant CRteSMMhdfo_1 person CR2Qbth78ug_0 person CR7gNMR7aFk_0 person CSBnYbN-fwQ_0 person CSBnYbN-fwQ_1 person CSCN35ZL4gk_0 person CSCmLaLpgec_1 train CSGkGWkJnIo_0 person CSKOzx-8MRM_0 person CSKhQtYbLiY_0 person CSTEfDaVq_w_3 horse CSgIyZrF2Xw_6 bear CShE1WLp4V4_0 person CSlYtyS3ekI_0 cat CStjlkpuH8I_0 knife CSwiprmAnWk_0 person CS4LhFaTdRc_1 person CS4TVHuh-OI_1 person CS4TVHuh-OI_0 person CTBCSXpoCNw_0 knife CTGjM7vaWkc_0 car CTNN0vCWthk_0 cow CTOTTFDvM9g_0 elephant CTOTTFDvM9g_1 elephant CTpK5Ywqj4E_0 person CTtActqncZs_1 person CTtActqncZs_0 person CTty0Fesx4k_1 elephant CTty0Fesx4k_2 elephant CT6O84zfmoY_0 person CT8VKdB074U_0 dog CUB_Y4U0gNU_0 person CUE1Oj2b7oo_0 person CUIv9zU0_7M_2 dog CUQZtS7SlyM_0 truck CUVQtlpfthI_0 person CUVqn-7LP_k_0 cat CUjEVN0BT58_0 person CUjbAz30mdA_0 person CUvi-gOiEak_0 airplane CUvi-gOiEak_1 airplane CUzrNlKejnA_0 person CU-5HeXnZag_0 person CU_cxu2KrzY_1 cow CU_4MsJSWGw_0 horse CVCPdF3TevY_0 car CVJEcVS63rM_0 person CVJu9kpxa0o_0 skateboard CVQq3Lnsmb8_0 skateboard CVRQkAzvHOI_0 cat CVXbWRarjGI_3 bicycle CVa-tmxG3G8_0 bus CVfXcK9LvU4_0 cat CVnQzQjIfdo_0 person CVnQzQjIfdo_1 person CVtUo7t1tg4_0 knife CVtdQUWrMFo_0 person CV1gdpxyUvQ_0 umbrella CV7yBA-RY-s_0 person CV9Mv-Z5ywo_1 knife CV9_qaQ3bOc_0 dog CWNPg3hbbCc_0 person CWRUw47fnHQ_0 dog CWcpGIObSb4_0 person CWcpGIObSb4_1 person CWhtecFS3Ps_0 person CWh66yU69HI_1 person CWq2nbpnjkw_0 person CWsgkyp-Wv8_1 person CWsgkyp-Wv8_0 person CWu6nT2qW2Q_0 person CWydCxGJyck_0 cat CW0GVWegie4_0 person CXEi_k33z08_0 person CXF-MNV21Uw_1 person CXF-MNV21Uw_2 person CXF-MNV21Uw_0 person bKIEzYSD9LU_0 bird bKM4LmiXX5k_3 knife bKM4LmiXX5k_0 knife bKQQdBiIraA_0 dog bKT6s25xsS4_0 person bKh8FyKvOq8_0 umbrella bKic74m-XKg_0 horse bKnsY1ytgqc_0 person bK0HzQHKqhg_0 motorcycle bK0HzQHKqhg_1 motorcycle bK0IN2qoSjQ_1 person bK7Wo0UxDyQ_0 person bLBmIVS2T-0_0 person bLLFtAMqoF0_0 person bLOW53I2oWw_0 knife bLU0G55kWgs_0 car bLU0G55kWgs_1 car bLYGpYiiF7Q_0 person bLg0SdwRkKc_0 person bLneVyHWwdk_0 person bLoyRVgQcTk_0 cat bLoyRVgQcTk_1 cat bLoyRVgQcTk_2 cat bLs4dUFZzcQ_0 person bLs4dUFZzcQ_1 person bMEbcFBdRsA_0 airplane bMM1OZMZ_WY_0 person bMNzE6F4WK4_0 truck bMPPnTHvu8c_1 cow bMQlfzj9vCE_0 person bMZPcnVc1K0_0 person bMakr2vwfqQ_0 person bMdfLBSo6jw_0 bicycle bMfQw6tBALo_0 cow bMgWjlwilqA_0 bicycle bMk8JyTyvUo_0 skateboard bMojajeogfY_0 person bMphaUsZuqU_2 elephant bMrDB2JI0QM_0 elephant bMuSXdxvaWY_0 bicycle bMumJTM0f28_0 person bM3OcevX9F4_0 person bM6fRimkPZg_0 cow bM6peJ4lQyU_0 elephant bNGoGllCEj0_0 car bNGoGllCEj0_1 car bNJ5ygVB-GI_0 person bNPtMp-AuhY_5 train bNPtMp-AuhY_4 train bNR89JLsh7Q_0 motorcycle bNZe9vwuE8E_0 car bNcTCIgwqNY_0 boat bNinDD5s0LQ_0 person bNo2RseLYYs_0 person bNqXgNLQX3s_0 person bNtivYIWtQE_0 person bNtivYIWtQE_1 person bNtivYIWtQE_2 person bNyyHqBZnmQ_0 airplane bN4vggzwxWI_0 person bN-epcJfRJ8_0 person bOPvxhSlnZI_0 truck bORQv_d22gA_0 bear bOTYFfq_264_0 person bOXM6ibmbG0_0 truck bOarvmUMdLs_0 person bOarvmUMdLs_1 person bOb4k6pTF-k_0 motorcycle bOeUzXPOIWw_0 motorcycle bOfrPHjROWI_0 dog bOm9Qgnl2KI_1 umbrella bOor15z5M5Y_1 truck bOuuxRt7ugE_0 bear bOwOVcqeajs_1 boat bPAO0nyCO8Y_2 cow bPLKx5uJaZY_0 bear bPTTPAsH7v8_0 airplane bPZdC3oRr1c_0 dog bPanGwtU82U_0 airplane bPavgNJxZnI_0 horse bPavgNJxZnI_4 horse bPcXQrlHs60_0 zebra bPddyJH2fm4_0 cow bPeFwxV66_s_1 cow bPfaS8RIHVw_1 train bPjZsDes9ck_0 bird bPvvA8Wm5Ts_0 person bPw91vtx0rY_0 dog bP17881jyH4_0 elephant bP17881jyH4_2 elephant bP17881jyH4_1 elephant bP6QvQUfZSI_0 person bP7ZU4wl_xs_1 person bP7lN2WyBTg_2 bird bP7lN2WyBTg_0 bird bP7pux4nQa4_0 person bQJQKEfdctc_1 person bQKuVB3YmRI_1 knife bQNLK-43XKM_0 person bQNXrSVq4r4_0 person bQQS-amRhxU_0 person bQQr8FzMTHE_0 person bQR6KxB4qjg_1 train bQWO4r5DLWY_7 bicycle bQWO4r5DLWY_8 bicycle bQZ8WQ2mS9o_0 horse bQd1k1RNZZA_0 person bQwDt3XOok0_1 skateboard bQy9W_tIPJg_0 cat bQ7FEMZ309U_0 bicycle bRElYolSzbI_2 horse bRKfUmz_7hE_0 bicycle bRKfUmz_7hE_5 bicycle bRP4TElBetA_0 skateboard bRUtCCY00Yw_0 person bRd_NGjRFpU_0 cow bRgNc063rsk_0 person bRgNc063rsk_1 person bRiVaIWzo4k_0 person bRiVaIWzo4k_1 person bRpbblTb1VU_1 person bRq06zdCv4k_0 dog bRsjD1GTjeE_0 truck bRuSrTOibGY_0 skateboard bRw2PFlL8l8_0 cat bRxyuZTXkWo_0 person bR61bP65wdI_0 person bR_EeaX8Kns_0 cat bSC7MwTZ0Og_0 person bSJbBDA3-rI_0 person bSJbBDA3-rI_1 person bSSSYoS7HhY_2 person bSSX8qJnGak_0 person bSVCTx_L7lU_0 person bSbZuDkimC8_1 cow bScFgdC-DH8_0 motorcycle bSkEsUu7aBI_0 cat bSqX5D_GrEc_0 person bS4mTtP-Ud4_0 person bS4mTtP-Ud4_1 person bTAxiISsPNE_0 cow bTHRXr-yw54_0 person bTOZp15gd24_0 airplane bTOZp15gd24_1 airplane bTOZp15gd24_2 airplane bTO9Pid9808_1 cow bThFysASYJg_0 person bThX-5t7OWM_3 bus bTl-dt761p8_2 bird bTp1hk4dhPE_0 person bTuho6CpJpg_0 horse bT7mzx9P1Yo_6 bird bT7mzx9P1Yo_8 bird bUDYPhSFyyw_0 airplane bUFCsL247kY_1 person bUFCsL247kY_0 person bUIov_O62GU_0 train bUVi7VVygmM_0 person bUa61WY6E38_1 person bUu6iW_nRvM_0 person bU8cBepgoMY_4 elephant bU8cBepgoMY_1 elephant bU8cBepgoMY_3 elephant bU8r7rNDaHQ_0 motorcycle bU8r7rNDaHQ_1 motorcycle bVCLNxl4PQY_0 person bVPgCZmg1CY_0 person bVTzUiTPtww_0 person bVZixqlT1AI_0 person bVbT4F3I0s4_0 person bVbdO8rj6TQ_0 person bVbdO8rj6TQ_1 person bVdjQbIzOGc_0 horse bVgKe0-_228_0 bear bVkYqw1YJ6c_0 person bVnmeQsd3xk_1 car bVph6GZ3jLE_0 skateboard bVrck_XYsR8_0 bicycle bVtMukuPx9A_0 motorcycle bVtWuhD1L1s_0 car bVvVMOxHOT4_0 cat bVwWkzYdrvk_0 person bVw9txmBeX0_0 person bVz-pHuWNfc_0 person bV3UXbGCshc_3 elephant bV3UXbGCshc_4 elephant bV3UXbGCshc_0 elephant bV3UXbGCshc_2 elephant bV8k_w0cphI_0 person bV9tUYWi-9o_0 truck bV9tUYWi-9o_1 truck bWCW4QZTIXE_0 person bWCxObc3uVo_0 person bWEnwFThRlA_0 person bWEtMBeQQCA_0 bus bWEw8rNQ-kI_0 person bWJg9jatoBY_0 person bWLcKJauKIs_0 person bWO4NBx37Vk_4 airplane bWdWgIB371Y_0 person bWdWgIB371Y_1 person bWkKy-_YzW8_0 umbrella bWotjBNgmiA_1 motorcycle bWotjBNgmiA_2 motorcycle bWo4CzHWaZ8_0 dog bWqayCqhqVQ_0 person bWtXkAzA6zE_0 person bWtXkAzA6zE_1 person bW1JoZnZpXs_0 bicycle bW1JoZnZpXs_2 bicycle bW2I1hUiWgg_1 bear bW2I1hUiWgg_3 bear bW2I1hUiWgg_2 bear bW6PJACBEFo_0 boat bW6PJACBEFo_1 boat bW7x14tLsxU_0 cow bW7x14tLsxU_1 cow bXGa-FIGViQ_0 truck CXOKkaurfXo_0 person CXVmfrDfalE_0 person CXVyHpmc_fU_1 cat CXXWvUVLBBE_1 train CXXWvUVLBBE_3 train CXaF0E3wEzI_4 boat CXaF0E3wEzI_1 boat CXaF0E3wEzI_2 boat CXdGDPRtlo4_1 cat CXdjIo4q-w4_0 dog CXoeLQPShqU_3 horse CXoeLQPShqU_0 horse CXrwHki5ShI_0 person CXw5HMRQwEk_7 bear CXxPPuZcT2k_0 knife CXyujV2S5aE_0 person CX1US3Y-2jI_0 person CX5Y01eJ_g0_0 knife CX838M4iPkw_1 bear CX_YxpWurRk_0 person CYEtgx1uVTM_0 train CYEtgx1uVTM_1 train CYFtiy8FtgM_0 person CYGBUw8HZ8Q_0 person CYKbj5BgaiI_0 person CYPFpTJXCp8_1 person CYXd3muNlJ8_0 person CYcxxdqG02k_0 person CYcxxdqG02k_1 person CYghFhQySik_1 person CYghFhQySik_2 person CYghFhQySik_0 person CYg8fy66poA_0 train CYjEASXRoys_0 person CYkow-sm2pA_0 person CYmpj4UFFtA_0 cow CYsgb4GhJ_0_1 cat CYtehjvIIIE_0 cat CYw9ONxIi0M_4 bear CY3-VTI7lQU_1 cow CY48729zIgM_0 bus CZAt34OJpoI_0 elephant CZGoAqWEDQM_1 horse CZJz6zZt3cE_0 person CZXHqexGqfI_0 cow CZduQndn_Eg_0 train CZfMxEFk9hc_0 motorcycle CZfe1GuZxPI_1 person CZws8sfLA8M_0 person CZ8bjG4wdZU_0 person CZ9MT7tZZ2E_0 knife CZ-Kodbg_2A_0 bus CaA-PFuqaXw_0 truck CaFlo5YQHXw_0 train Cag3vCKRh6c_0 bicycle CajF9IxbOvI_0 person CajF9IxbOvI_1 person Cam_wHie6XQ_1 person Ca4_dI-Ii8o_0 person Ca5GzZ-rifE_2 horse Ca5GzZ-rifE_0 horse Ca5GzZ-rifE_3 horse Ca5mOzqFz70_2 bear Ca6g367yxss_3 dog Ca9JsTGifmQ_1 knife Ca-l5zpgIL0_0 horse Ca-wDaXxSn8_0 train Ca_LwXljv5I_2 dog CbBrv9GkBDM_0 person CbKVR2EGoWU_0 cat CbO4r5w5NEM_0 cat CbTbpHHYfGo_1 cow CbYQk8GFQwY_0 person CbYXzAv9G40_0 person CbZA75LYWsk_0 boat CbZA75LYWsk_4 boat CbZA75LYWsk_7 boat CbZA75LYWsk_8 boat CbZA75LYWsk_6 boat CbbsxxHKQBs_1 bicycle CbbsxxHKQBs_3 bicycle CbfML92fBFc_0 person CbrOGI6D5oo_0 dog Cbz0hgvZtyM_0 person Cb0EbSTABAw_0 person Cb31aGVbcGE_0 person CcJ-51mUw00_0 person CcNfpk8tVxA_2 person CcNfpk8tVxA_0 person CcNfpk8tVxA_1 person CcadL-XHA8w_0 person CccC-FK79hM_0 skateboard CceETksmvEc_0 bus CcfAKl1kCRM_0 person Ccl3EZzzNhc_2 bird Ccl3EZzzNhc_3 bird CcmiWGPbuT4_0 car CcyRYeSG3sQ_0 truck Ccyqd4ZzDtQ_0 person Cc5DUip1-eE_0 person Cc9-Kd--ejs_0 car CdA-Gg7O6d4_0 person CdD0W0pS7gk_0 skateboard CdG8sd9UZFM_1 elephant CdG8sd9UZFM_3 elephant CdOwMZqCiMs_0 bird CdRgo9V_e_U_0 person CdTDo40rdz4_3 umbrella CdVnK1TcGcQ_0 knife CdW2qTShGbY_2 person CdW2qTShGbY_1 person CdYkEASWMqQ_0 person CddXUsFqg4Q_10 bicycle CddXUsFqg4Q_12 bicycle CdeUORbvfgs_0 person CdkbBdQwTX0_0 person CdmrCOVxj8c_0 person CdosWRXaOgY_0 person CdtY-oTmACc_0 elephant Cd3qxnZC6s4_0 airplane Cd8dfcT-D9U_0 horse Cd8zY0wsrLc_0 umbrella Cd_ZgXZ7qKw_0 person Cd_ZgXZ7qKw_1 person CeCnRUGvs9Q_1 horse CeEMUoHNeVA_0 person CeICmGeQXOk_0 motorcycle CeICmGeQXOk_1 motorcycle CeVjsWpfoCY_0 person CekBpSMLr08_0 horse CetmVa_LV2A_0 bird Cetw-N1I1bA_0 dog Cew6y9K7ynI_0 cat CezGmkW4sRY_0 person Ce1tW6uV_lw_0 person Ce1tW6uV_lw_1 person Ce_dgPawIkU_0 person CfC--i0DQ-o_0 car CfThv8Vk-oM_0 umbrella CfbzDUZ6PyQ_0 truck CfqtCB_f_Z8_3 skateboard Cfwk3niR9Uc_0 motorcycle CfyvbbrxquI_0 cat Cf_GVLLQaTA_0 person CgB0fwUOZd4_2 bus CgDcN1Lk7ag_0 car CgDcN1Lk7ag_1 car CgDcN1Lk7ag_2 car CgDyrbc-LLo_0 person CgHCCqADKys_0 person CgQl21vwrqk_0 person CgQv6o97KqY_0 person CglmlO92nKA_0 person CglmlO92nKA_1 person Cgod2p17L48_0 person CgwHXWDGAak_1 person Cgzt1Kv6Sqg_0 cow Cg9H20lr5Uk_0 person Cg9H20lr5Uk_1 person Cg9H20lr5Uk_2 person ChBKKPEO8N0_0 person ChOKPIVr5XE_2 bicycle ChPBGkSbJ0g_0 elephant ChRNCk9Bq-k_0 cat ChZB3vAX8sk_0 person Chc7poZ9r-k_3 skateboard ChmcE3Lz1Vc_0 person Ch2_CQg4r1o_0 person Ch-PosNzqZ8_4 elephant Ch-PosNzqZ8_0 elephant CiCqdFq_a7U_1 person CiCqdFq_a7U_0 person CiLbnwjSJ9w_0 person CiQOmR8VCzs_0 person CiQOmR8VCzs_1 person CiQS0RMaLZQ_0 truck CiT09gfBJPA_1 person CiVwjoLvdAs_1 horse CiWhBWV1zGM_0 cow CiWhBWV1zGM_1 cow CiYOn9VW1eY_0 horse CihCAad2Duo_0 person CilRWTfS8e4_0 person CiwaaMNfvCo_0 airplane Ci0S27Qp1w4_0 cat Ci2vW1OGHe0_0 cat Ci6mTJ6BqYI_0 person CjJ3l2smqAc_0 person CjMaorKuwf0_1 horse CjRX9J2BM4Y_0 skateboard CjUf3D9IsCQ_0 person Cje7Ip85T1I_0 person Cjm9Wky44TM_0 elephant Cjm9Wky44TM_1 elephant Cjn-mt97y-w_0 person Cjq3dda3PlA_1 person Cjq3dda3PlA_0 person Cjw2f0M_eB8_0 bird Cj1CpXDG_Qw_0 person Cj3PTZcRbd4_0 person Cj3ZEx4SDe4_0 cow Cj-a9t9yiiA_0 person Cj-a9t9yiiA_1 person CkBGaJnF9vo_0 person CkC43WVctnk_0 cat CkKQhDP2FGY_1 person CkKQhDP2FGY_0 person CkKQhDP2FGY_2 person CkLE-s6CsgY_0 cow CkLwgOIBF_I_0 person CkLwgOIBF_I_1 person CkP_70u-2zU_1 boat CkX8laawskQ_2 horse CkZeki9RVDI_0 person CkZhHtevDk8_0 person CknHFY05prw_0 person CkoK8C4Rzj0_0 person CkvEr5T38Wc_0 person CkvEr5T38Wc_1 person CkvEr5T38Wc_2 person CkyU5jU74Js_1 dog CkyU5jU74Js_0 dog Ck8GRgUrpoE_0 person ClBCXl7l2pw_0 skateboard ClH2-R5LeVo_0 cat ClLZcmIHrTw_0 person ClM3Ftm0S7o_0 cow ClRLFlpMUhU_1 horse ClSzHW4AuJ0_0 person ClV1oHNuF9o_0 person ClV6A8WNCvw_0 cow bXcKQNGRBvw_3 airplane bXcKQNGRBvw_0 airplane bXcKQNGRBvw_1 airplane bXcKQNGRBvw_2 airplane bXjVvJ8eOJc_0 skateboard bXkjwotai0Y_0 bicycle bXnvGCFA9Dg_0 person bX9TcejzzTM_0 person bYCvd_BTMsk_0 dog bYE-vUOh10s_0 boat bYN8lkupLt4_0 bird bYQiCAwebzs_1 bicycle bYSbuWYiixQ_0 person bYVgzwF1hNw_0 bicycle bYWGnwi8nDQ_0 motorcycle bYm9aUK2zzk_0 person bYpG750b7pE_0 motorcycle bYvzSXZ0w_I_1 person bYwwOO6vMAw_0 person bYwwOO6vMAw_1 person bYyFEbIGMfo_1 dog bY3sDu5BZDI_0 elephant bY3sDu5BZDI_1 elephant bY3sDu5BZDI_2 elephant bY6vPIaJDGA_0 person bY8BdyCsCAw_0 person bZL41d9eFyc_0 cow bZRpdnJtcT4_0 train bZRpdnJtcT4_2 train bZVMygQQgNg_0 person bZVZbn0oTjo_1 giraffe bZdq8Rk75M8_0 knife bZgZihlL0IU_0 person bZsoMlw4CnI_2 bus bZuOWV67gnY_0 cat bZwJl6ye9Cc_0 motorcycle bZwJl6ye9Cc_1 motorcycle bZzzlD0C8Jg_0 train bZ2u1x38Qbg_1 airplane bZ6gk6FLGss_0 person baDesUZ9Pyc_0 bear baRyXrRn_ls_1 motorcycle baWLnj87FOc_0 cat babQ3FBdeqQ_0 cow bagbzsb-tg4_0 person ba1hwKdPRx8_0 cow ba3cGHmc_OA_0 person ba5407XQYAQ_0 cow bbHdRyrdpDA_0 boat bbH4CQx07Go_2 knife bbLW6902ITg_0 person bbLW6902ITg_1 person bbLW6902ITg_3 person bbLW6902ITg_4 person bbM0SbH_pgk_2 bear bbZAdo3awRs_0 car bbZeVbzmLVw_0 elephant bbaUzB0Na2o_0 person bbfDHSIT9ys_0 person bbhyEgEjfvQ_0 cow bbjuucY5QQc_0 person bbkjnF0iGrs_0 horse bbkjnF0iGrs_2 horse bbkjnF0iGrs_3 horse bbkjnF0iGrs_6 horse bbnb-beW0p0_0 horse bb0DRm0ueKk_0 horse bb4sgALviyc_0 bear bb5OO1wMKr8_0 person bcJ1MAj_A_w_1 person bcLW7YqnUGs_0 skateboard bcdQmV1-Z5k_0 motorcycle bcgTPCycRIw_0 skateboard bcksTLjC1fs_0 motorcycle bcrQdxrU_vI_0 person bc1C8HrNVqE_0 horse bc28CjoKODI_0 person bc28CjoKODI_1 person bc3rySF6iao_0 person bc6jeLN-DUo_0 train bdU9JALjnmw_0 person bdYKw4SpkQQ_0 zebra bdZpXHSW4Ps_0 cat bdbVAdua3uI_0 airplane bdbVAdua3uI_1 airplane bdcoNmelRw4_1 dog bdcoNmelRw4_2 dog bdcwT2ufUBg_0 bird bddes6RyfCI_0 skateboard bddes6RyfCI_1 skateboard bdeoe5gmCd4_0 elephant bdeoe5gmCd4_2 elephant bdgSMIY2A8Q_0 horse bdoNsiMM1RY_0 bird bdwlZMpXPJo_8 bird bdwlZMpXPJo_7 bird bd--DVCeT-s_0 cow beE5VOzxibM_0 giraffe beLTv9YiY78_0 dog beLTv9YiY78_1 dog beLTv9YiY78_2 dog beQOHdCA8KM_16 elephant beQOHdCA8KM_3 elephant beQOHdCA8KM_6 elephant beQOHdCA8KM_7 elephant beQOHdCA8KM_10 elephant beQOHdCA8KM_12 elephant beSTl1azmTY_1 skateboard beVVM2pBQdA_0 cow beVVM2pBQdA_1 cow becTICXjrg4_0 person beliMXc3JE8_0 train besXR1P9Oew_0 car beu-edT1daM_0 person be9BCy6kHvY_2 person be9CXLatX9I_0 horse be-ggiVD4V0_0 knife be-5ARU_aHA_0 person be_IhYef3hE_0 person bfBZLLwpNWA_0 giraffe bfJaD1qZ2gE_0 bus bfJaD1qZ2gE_3 bus bfJtapJ86Gw_0 person bfRgL9oanEc_1 person bfRgL9oanEc_0 person bfS8FB_HOlY_0 person bfZfMA1mLrQ_0 dog bfZfMA1mLrQ_1 dog bfaMdaYiK90_0 cat bffC89pE6fo_0 person bffC89pE6fo_1 person bfkNVFr6Cwg_0 cow bfkNVFr6Cwg_1 cow bflVgDgAHSo_0 umbrella bfrY2wEePwY_0 person bfrY2wEePwY_2 person bfwWF0XO7bE_0 boat bf9YySHJcdQ_0 person bgAOYaooc18_0 person bgAo5vgwe2M_0 zebra bgBK4sMnLig_0 cow bgBK4sMnLig_1 cow bgC-r6p-XHU_2 elephant bgE_uy3Ml6g_1 umbrella bgHMLwWY4Qo_0 person bgV-FqQ8Tv8_0 umbrella bgXZ3BpIOh8_0 train bgaD7K2iEPI_0 person bgbS11O9lSw_0 bus bgelX1blhpQ_0 truck bglPgA_0LAk_0 motorcycle bgpB-A04RLI_0 person bgyEHsMav4U_0 person bhBMa8wQ5KA_0 bird bhGJ9gZmP90_0 person bhGJ9gZmP90_1 person bhH_pqCQ3Co_0 cow bhJGFbgXlts_1 person bhNfsUPLKDg_1 train bhWmpmnXSlc_0 person bhZZubkX8_o_1 bird bhdtzsUvieg_1 person bhqr680CLr0_0 person bhrOzwB-7qA_0 person bhsCCw1J_JU_0 person bhuOX61sk8M_0 person bhz6HG2KpnI_0 skateboard bh0ZZ4Z76cc_0 person bh3QacG9JYk_0 airplane bh3QacG9JYk_1 airplane bh3QacG9JYk_2 airplane bh8aMNVny8s_1 truck biAdsjypETI_0 knife biFm-y7gSrc_0 horse biGJ8vHOsZM_0 umbrella biLY6NMsqJU_0 cat biUFB3c0Ucc_0 bus biZU5SOHQvc_0 umbrella bibJ3Bv5YmQ_0 motorcycle bik9GuCughc_1 bird biuEbYnn68k_0 bus biwbqbVsZeE_1 elephant biyu3sxIOYc_0 person bi1kYvu5Irg_0 train bi1kYvu5Irg_1 train bi3GSUnfzd8_0 person bi5Bkz2MVP4_0 bird bi5Bkz2MVP4_3 bird bi6BNwvsR_0_0 person bi-GKlUZMR8_0 motorcycle bjBwCQ5z4IQ_0 cat bjH2OQR68Vc_0 person bjRQ69TaeKs_2 person bjgooTfy3JM_0 train bjgooTfy3JM_1 train bjgooTfy3JM_2 train bjhEqucWULo_0 cow bjq8de0pw5M_0 person bjq8de0pw5M_1 person bjrq_Kj-wSU_0 airplane bjrq_Kj-wSU_1 airplane bjrq_Kj-wSU_2 airplane bjrq_Kj-wSU_3 airplane bjwdTl5zyaI_0 skateboard bjx96uw-Q24_0 person bj-Grf4s790_0 elephant bkElaSUqJjM_0 train bkIBcqXKARI_0 person bkMU7xViDvA_0 person bkXBjOrn2yI_0 person bkggwniG4vc_0 person bkiQTbQF_TA_0 elephant bkigtjV1zA0_1 motorcycle bklheVvsfac_0 truck bkoOiNz6Zmo_0 person bkok3wr4188_0 person bk2l-O9wSEc_0 person bk8UlOzFy7U_1 person blAiGXbJxmI_0 train blIpNvBakFI_0 person blW8z3TPVvo_0 motorcycle blhCjXE5cRo_0 person bli5Z83QY_U_0 person blnFzQdaVRc_0 person bluU1CAbJfo_0 person blubKbt8mLE_0 car bluqyqDv2eE_0 car blv0QslQ524_5 bus blv0QslQ524_6 bus blzDAgvxJMw_0 person bl1XJCtyP2E_0 truck bl2xZSpcZqs_0 cat bl6wIjxfuJo_1 bicycle bl6wIjxfuJo_2 bicycle CloG2hcM5nU_9 bicycle CloLHr7NJqg_0 person CloOQkTkYfY_0 bus ClpDLu1qCx4_2 person ClpDLu1qCx4_3 person ClpDLu1qCx4_1 person ClvAi34e1zM_1 elephant Cl1mEpQ3wy4_0 boat Cl1mEpQ3wy4_1 boat CmEoz728tlo_2 bear CmGSMnkcvrg_1 train CmIXZuJDwt0_0 person CmNv_yKt5oM_0 person CmOIqZyQpPI_3 bird CmOIqZyQpPI_1 bird CmVoggJ6fxY_1 horse CmYL2EyELbA_0 elephant CmezWT8A2i8_0 bus CmjUCOwcOT8_4 bicycle CmjUCOwcOT8_11 bicycle CmjjEuS9_Ww_0 bicycle Cmjw8kbfDCw_1 knife CmoknpL1cMA_0 person CmqXoT7CXJs_0 dog Cmq1qVX-Ugo_1 cat CmsqpFOcosw_0 person CmtmoydPH08_0 cow CmxhIEztsyg_1 skateboard Cm1y7USHcrg_0 person Cm3tYZlSc0o_0 skateboard CnBJ9TMTRAA_0 person CnBJ9TMTRAA_1 person CnCTVtsK5Kw_2 bear CnEXHgq3AE4_2 elephant CnGp9Wq2rTs_0 bear CniS9Q6Y200_0 person Cn0UKsWocEI_0 elephant Cn0UKsWocEI_1 elephant Cn1dXZ_p3dw_1 person Cn9Bj5B29UI_0 motorcycle CoBuNWx_OwM_0 person CoDB7ZeilsQ_0 person CoKMowfrd5Q_2 truck CoKMowfrd5Q_3 truck CoKVaYX3c1k_0 person CoKVaYX3c1k_1 person CoKVaYX3c1k_2 person CoOwm7ccDrs_0 truck CoSIyrW5lvA_1 skateboard CoSSvI2-U_w_1 bicycle CoZY8o0c-h8_0 elephant CoZY8o0c-h8_1 elephant CocSNWws-Qo_0 person CodelARKQ10_0 skateboard CosYvoW04Uk_0 person Cot7Xj8C308_0 boat Coz9g_0N91c_0 person Co_XBpd6lxE_0 person CpDHwc5JmK8_3 elephant CpFiT_6KvM4_0 person CpF-80dM2aY_0 person CpF-80dM2aY_1 person CpxxxHYsJy8_0 person Cp0lT2opaL0_1 person CqANE5ByBvY_0 person CqDjHjvw8T0_0 elephant CqDjHjvw8T0_1 elephant CqVeLNnA0vk_0 horse CqZz9FnLLjk_0 knife Cqkhrld_7LU_0 person CqzahbOVzO4_0 person Cq02-pFNn6w_0 motorcycle Cq02-pFNn6w_1 motorcycle Cq4KAVAWq7g_0 person CrAxPJajbcs_0 airplane CrCNqDd18fw_0 umbrella CrUmEDCjFtU_0 person CrUmEDCjFtU_1 person CraDHWuN4Q0_0 person CrgMhrCYmOo_2 motorcycle CriTKYemGmo_0 person CrmzwYKpLAY_0 umbrella Crn24ZKAP1k_0 person CrsjxpJoY5Q_0 person Cru8KBJqhng_0 person Crz3l2CEDzA_0 person Cr0SWcS1qX0_0 cow Cr_B3I0QPEQ_6 airplane CsM_GTD0TZE_0 person CsPLGd2dgl0_1 airplane CsTntmE8EWs_0 person Csa542XNEXo_0 person CsfkuwD6-nA_0 person Csh_4yR8bFk_1 truck Csh_4yR8bFk_2 truck Csii4vkefsM_0 boat Csii4vkefsM_2 boat Csw3kLrhjoM_0 person Cs38JY7Gqjo_3 skateboard Cs-Vx_ym23o_1 bicycle CtC2yC9NGTk_0 bird CtD4wnIU0Pw_0 bicycle CtF9IxfLhaQ_1 person CtF9IxfLhaQ_2 person CtF9IxfLhaQ_0 person CtHIoS1lGKA_0 person CtLVK2j48gA_0 person CtO5dmTdzYQ_0 person CtPEAoFPnE4_0 person CtQPPKpIEIc_0 person CtTcyoZvRvU_2 skateboard CtUPPSKU8cE_0 bus CtVUqIFqqr8_2 bus CtYDJRkhtpg_1 umbrella CtYDJRkhtpg_5 umbrella CtfPPnpBKHs_2 bird CtipU0GHAEo_1 elephant CtjTAe-FFe4_3 elephant Ctkjh9fntpQ_0 bird Ctkjh9fntpQ_4 bird Ctkjh9fntpQ_5 bird Ctkjh9fntpQ_2 bird Ctkjh9fntpQ_3 bird Ctnjw80kgcw_0 person CtxK3wGlqx0_2 motorcycle Ct1QrXUgBGg_0 person Ct1QrXUgBGg_1 person Ct8S9nC7sfk_1 person Ct870xrnBGU_0 person CuDfCpgoIjg_6 boat CuGfRQMwYd8_0 cat CuHF9Hd0uwI_0 person CuIkNejeZrY_0 cat CuUJUrjEcc4_0 person CuWdZPYMLww_0 person CvDW2A8hD78_0 person CvRJwKt7FfY_1 skateboard CvVVS4SUiuw_1 train CvZaA28QUK4_1 knife CvajmAL3sjQ_0 person Cvda-hutmbg_0 dog Cvqylkq9fwI_0 truck CvxsoaCV1_8_0 person CvzsX_s6tek_0 person Cv2T8U0uQcQ_2 person CwAdBrBzIcA_0 truck CwBiMh4zHWQ_0 person CwFcmrnz1yw_0 elephant CwFcmrnz1yw_1 elephant CwFcmrnz1yw_2 elephant CwR2tJptu0Y_2 motorcycle CwVLRawns04_0 person CwVTSONqnVw_6 knife CwnHi50fuuQ_0 person CwnHi50fuuQ_1 person Cw22-zpE1UY_0 person Cw3iLs4yV4g_0 person CxFRYsUCyWc_0 cat CxH8vGqLVM0_0 bicycle CxH8vGqLVM0_1 bicycle CxH8vGqLVM0_3 bicycle CxH8vGqLVM0_6 bicycle CxJ7Uww1mSk_0 elephant CxN5CG94Q5Q_1 airplane CxN-YEErXFg_0 train CxPyIeBtRec_2 truck CxWaiU0rF9g_1 cow CxWaiU0rF9g_0 cow CxXdw0Cqr4Y_2 airplane Cxa8q3QXoRs_0 person CxgqklOxSfo_0 airplane CxgqklOxSfo_2 airplane CxnCTBBNWCY_0 person CxnCTBBNWCY_1 person CxoZT0--IBo_0 person CxooWldim98_0 person Cxs-xZDDZWw_0 person Cxug83tjWyc_0 horse CxzJV_HYpAc_0 airplane CxzJV_HYpAc_1 airplane Cx0XeFKQ06o_1 train Cx7ZY8oqOmE_10 bicycle Cx7ZY8oqOmE_6 bicycle Cx7ZY8oqOmE_8 bicycle Cx9efnltcUY_0 person CyE1kuECzfg_0 person CyH0woBc0zU_0 boat CyI7nyp65bI_0 person CyI7nyp65bI_1 person CyLLTzV_lAg_0 cat CyOXSqLm7ao_1 person Cyb4-vF1WMM_0 airplane Cyedl__okwE_0 person Cyedl__okwE_1 person CynfaDsQ1AI_0 zebra CysFfEkdDT4_0 bear CytiPd_Wbkg_0 airplane CytiPd_Wbkg_1 airplane CyvInNqvQyE_0 truck Cy002CigJRQ_0 person Cy_hvqOd0RY_0 knife CzFRG22Jmvs_0 cow CzHeIzQZUEg_0 person CzNFSb4N6p8_0 person CzQ03Z7Dv5U_2 skateboard CzQ03Z7Dv5U_3 skateboard CzQ03Z7Dv5U_6 skateboard Cza2-_wwpd4_0 person Cza2-_wwpd4_1 person CzcwXF0Z1TQ_0 cow Czt8McI8UTE_0 person Czze2Jy6Ook_0 cat C0Tk6QryTA0_0 bus C0Tk6QryTA0_1 bus C0a9pkujXQg_1 person C0lvs-UEqKs_0 person C0pOQ36uosU_0 person C0pOQ36uosU_1 person C0qbh7OJTHI_2 skateboard C0tGKqnFyZA_0 person C0xTDmlUYSA_0 person C0xZYHsXNws_0 person C0xZYHsXNws_1 person C0xjvq51pVA_0 horse C0xl46ieUxg_0 skateboard C0zUOQoeQrA_0 person C0zrmcMf8D4_0 bird C05P4mCw-xA_0 bear C1DCcNlUQDk_0 boat C1DX9TjKTrE_0 bus C1MfcNYih9c_1 person C1RCXQFjvvc_1 person C1RCXQFjvvc_0 person C1bdSMUVy2Q_1 truck C1bdSMUVy2Q_0 truck C16ZlJRDfUc_0 bird C16_rFYBwUA_0 person C17jwrOnSCI_0 horse C19rR4b8CSQ_0 dog C1_gk-bIL6Y_0 airplane C1_tauCAYjs_0 person C2GvHXU8mIc_0 person C2HZBTrCAf8_0 horse C2Hcs2itPTc_1 elephant C2H_P7MX3zw_0 bus C2H_P7MX3zw_1 bus C2IJYHPWHJM_1 cow C2K7zu49SKw_0 person C2K7zu49SKw_1 person C2LdkQMjxJk_0 cow C2ROFMcXam4_0 cat C2S4CV9mnC0_0 truck C2VjZHe3ID8_0 person C2r9VGslxTE_0 person C2v7hcs3Ax0_0 zebra C2zRn25TBOo_1 airplane C2zRn25TBOo_2 airplane C2zRn25TBOo_4 airplane C2zRn25TBOo_6 airplane C23ZGYnWhgo_0 person C26HiGgIjYg_0 person C2-glFtt9Vw_0 umbrella C3LbuiUjzvo_0 cat C3LbuiUjzvo_1 cat C3LbuiUjzvo_2 cat C3Qu-KUydyg_1 cow C3UX9hrlLeE_0 person C3YcvZKgCgY_0 person C3terpXzPm4_0 person C3z1zbkmwdU_0 bird C30B6KXg9vs_0 person C3399zrSQ6A_0 horse C34_EkCWJaU_0 motorcycle C4HzsadhLW0_0 boat C4QHknuNLYI_0 person C4RAj-omUMo_0 person C4W_g9eheB8_0 skateboard C4XGGPoj4q8_0 person C4dV8SPq6Mk_0 person C4e-5QS1FmU_0 umbrella C4e-5QS1FmU_1 umbrella C4irKghQYTE_0 horse C4jghf6KKYI_0 skateboard C4vFHmzTY-s_0 cat C4xJ3_Wrrn4_0 train C4yVuAqcr0U_0 train C409K0fAxiM_0 person C42397qio9c_1 skateboard C4317zxtzKA_0 person C4-k1XW5O3U_0 dog C5DAyL_gEQU_0 cow C5GJx1VFRm8_2 cow C5HT9La1jDY_0 person C5JobuZa590_0 skateboard C5MJ8fSfmLw_2 bear C5dPwnswp8Q_0 cat C5jo-fCBqmA_0 person C5jo-fCBqmA_1 person C5jo-fCBqmA_2 person C5pop0SvnOM_0 person C5r41vkLsKE_0 person C5sXGZRLfmU_4 truck C5sXGZRLfmU_6 truck C5umaWklWFQ_0 boat C5ybfGh51LM_0 cat C55z9Fe6H7A_0 dog C56Bp4toMG8_0 person C6NYuB7zIzs_0 person C6NYuB7zIzs_1 person C6XCgppHkHA_0 bus C6Yy8uEd0bQ_0 person C6aB6M0DHrU_0 person C6cOmWIisxU_0 person C6eN6sMtuXY_1 boat C6gNbZUU7xg_0 person C6ia-W4TV1U_0 horse C6nHtSy67OY_0 cow C6n6ECY5h84_0 cow C6qWzx58kxo_0 elephant C6qWzx58kxo_2 elephant C6rqmPvlIlI_0 person C6upTeuDG4E_1 skateboard C6xv6Wmy97M_0 horse C62nD-_VXpM_0 horse C62nD-_VXpM_1 horse C66OM90TFXI_0 train C66OM90TFXI_1 train C66z-I_UHqQ_0 airplane C6_p7BXwCTA_0 elephant C7CB2A_bxa0_0 person C7COsB9pcOQ_0 person C7CXGBdoJWo_0 cat C7KZnM_0j8s_0 person C7QYoT22ZYo_0 train C7W0oxkg-nU_0 bicycle C7kKR6pqYzw_0 horse C7to6tRsC9U_0 person C72k6hv1NPM_1 cow C72k6hv1NPM_0 cow C7-sqpILAXM_0 person C7-sqpILAXM_1 person C7_HhvBNDSw_0 person C8ETc2K6ef0_0 train C8G_kcqjspU_0 knife C8IE7aLZvIA_0 person C8IUB4Opf44_0 person C8IUB4Opf44_1 person C8PqOHn0izQ_6 bird C8Zex-ptYyk_0 person C8daRmtyPo0_0 person C8fcFW4HKGs_0 airplane C8mEWe-TWYs_0 knife C8n1dTEDWvk_0 skateboard C8ukXeoRjbI_0 cow C9Zq_rDHwgg_1 cow C9dD6oS_Zs0_0 person C9je005HOlA_0 bus C9jqFBMRyPs_1 person C9vG5qPPhzE_1 train C9wgqGACPso_2 elephant C95TX0IOPa8_0 skateboard C97oHqKqdBk_0 person C97t3TGT2oc_0 person C-AoVBwcBUw_0 person C-FX5hgFDd0_2 person C-Q9RDsPyOw_0 person C-Q9RDsPyOw_1 person C-S34-Drg7M_0 cow C-TWHpbtVNY_1 person C-WsGZQoLx0_0 boat C-cL2hzThKI_3 airplane C-cL2hzThKI_6 airplane C-omy9mzD7E_0 person C-q9nO8X1rs_0 person C-seg-BCK0U_0 bird C-v3Ttrvuo8_0 airplane C-38hraIyOs_0 person C-47EdafspI_1 airplane C-54wttM4AA_0 person C-9LBJqCMm0_0 train C-_ebeJtjyE_0 person C_BX3dg-lc4_0 person C_DOGAVETwk_1 bird C_EMJm-Z2I8_1 bird C_EMJm-Z2I8_2 bird C_EwPB6zgIA_0 person C_EwPB6zgIA_1 person C_GnC_IEwJM_0 person C_GnC_IEwJM_1 person C_HBU7EUsoE_1 person C_HBU7EUsoE_0 person C_IjqR1NOxw_0 person C_POS7ndKw0_0 truck C_PXq5TsPRQ_1 train C_TfufSsuEU_1 person C_VePcGhr10_0 knife C_aP0fKyudQ_0 horse C_aYcFttRC8_1 person C_aYcFttRC8_0 person C_cUky_0p2Q_0 cow C_uGdKk79X0_1 person C_ykabkQ2U0_2 person C_2EFIuyDSA_0 person C_2p_N8Kvpk_0 person DAJkfl5W8Vc_0 horse DANymtBuoIs_0 dog DAOBGjTf7xI_0 person DAQ9-YTrpp0_0 cat DAU6UNdxbRI_0 person DAn4fH-1Ucs_0 person DApkEgrJX0Q_0 person DAqHnZA6tBQ_0 truck DAtSTeTmg8I_1 horse DAwdyKiZyzM_0 person DA1bsx2RsGA_0 person DA1bsx2RsGA_1 person DA4LF3u2VTI_0 car DA5X-ADHM1w_0 person DBFMXaS9LRg_1 umbrella DBLaZSSthxo_0 person DBR0l2rW6Ew_0 horse DBVbRonJkb8_0 person DBaAVcI4Ftw_0 person DBaAVcI4Ftw_1 person DBmVOTuCJ8Q_0 person DBvOm1qnWrA_0 cow DBySPDEqsO8_0 person DB1Cvyyike0_0 airplane DB3lsf7fD84_0 dog DB6TJh9r1Dw_0 person DCE8Dg_ycjo_0 truck DCHv6sxfCAs_0 person DCPk1uyVNlU_0 person bmHyfvCZWsg_0 elephant bmHyfvCZWsg_2 elephant bmHyfvCZWsg_3 elephant bmLLdC88ohM_0 train bmMB6Mr1uKI_1 person bmPhh5NpV7U_0 person bmQbHpw-4fY_1 bird bmUFMo3pjyo_1 airplane bmhSkbKIg0U_0 cow bmhSkbKIg0U_2 cow bmhSkbKIg0U_1 cow bmhfPSKCY8I_1 dog bmqPIwMWGj4_0 person bmuIwo4T6rk_0 cow bmvh7yxyWcY_1 horse bm2eU4uLgQE_0 skateboard bm8MRDfmerA_2 person bm8MRDfmerA_0 person bnOUoCjxIvA_0 bird bnWQnn3a2xE_0 cat bnZwZd6xdHY_0 person bnc1LyPUCLg_0 train bnfN43NoRbA_0 person bnqbJR2oSPk_1 person bnqbJR2oSPk_0 person bnsuTEBQy44_0 person bnw6G0Prvc0_0 bus bnyALwWqo4Y_3 cow bn8epY7auRE_1 person bn8epY7auRE_0 person bn9y-iIDoUU_0 person bn9y-iIDoUU_1 person boHeJDDjRf4_1 person boIKCyPzxr8_0 bicycle boNYwNYmh1E_0 cat boVVWwoXNDw_0 truck boZ6xZrNpzc_0 person boadjC5Lci8_0 person bocql7vYA4o_0 bus boja3N4XQVo_0 person borBr_AiOmM_0 person bornws-twE0_4 airplane bosTHwpZ8Ao_1 dog bo7P3hYkeog_0 person bo9sUjViaHQ_0 person bo-qyHCKssw_0 bird bo-qyHCKssw_4 bird bpI4nUgSqbE_2 person bpI4nUgSqbE_0 person bpI4nUgSqbE_1 person bpJNbivFLKE_0 skateboard bpdgYRz5hPs_0 person bpiM4FHf540_0 person bpjVhXyB4M0_0 airplane bpjVhXyB4M0_2 airplane bpsMni7yj3M_0 truck bps3HXPsekI_0 bear bpu9NYWxcEE_0 skateboard bpyH8PRkBQM_0 person bp1zW8j_ajo_3 bus bp26IdTs4XE_0 person bp3rDJju8n4_0 person bp3xwI_FfOI_0 elephant bp6K7EUtORo_0 cow bqBtysMz94c_0 person bqEmBkEnR1c_0 person bqGkchWbZYE_0 car bqJcZwUB1Go_0 person bqPKigpT9AY_0 person bqQk37pcpVA_0 person bqaeUBH6J3Y_0 person bqhQG8t_2XA_0 person bqjcNzWyaC4_1 airplane bqoG__OO_5g_0 person bquLxAXnaww_0 truck bqwFWjwCZas_0 truck bq6n9q-Qpv8_0 person bq6870eY1a8_7 bicycle brDq8RFzVTo_1 truck brIIDuCmk-E_0 person brLbzZeRz1o_0 person brLeJHMfMXQ_0 horse brNR68fKeMk_0 bus brWg7FAeBEA_0 person brZj8bv9oxY_1 person brhA4NqjrgQ_0 horse brh4hrmrs0Y_1 skateboard brpbaoTNe4s_4 bicycle brpbaoTNe4s_0 bicycle br3e--6oH8Y_0 airplane bsGmFJGua4w_0 elephant bsR9KXIHlCM_0 umbrella bsVBX8u9pW8_0 bus bsXpGvnXpmk_0 cow bsa-G_HEllM_0 person bsbzpk_ejJk_0 person bsbzpk_ejJk_1 person bsgdfqE8ySk_0 person bspbqjb3wAg_0 person bsv_swJ9_KY_0 knife bs2FVeXKiYQ_0 person bs3u00S0eu0_0 person btI7FYFXsfI_0 person btL1Ptjq7pM_0 motorcycle btMmnZdL_uQ_0 person btO34shZMZo_0 horse btSyjckocDA_0 person btVQJbFp8Dw_0 cow btdt4lysW6U_0 dog btihrVidTTg_0 cat btk27mnJY_A_1 person btrdQ6N7QJc_0 truck btrdQ6N7QJc_1 truck btsT4XRF0nI_2 cat btul_U3BMKI_0 bus btvg47tz3Ps_1 person btvg47tz3Ps_0 person btz7EwI5rYY_0 person bt75khQG0w8_1 bird buFiFNHj41w_0 person buOqwfPnqkI_0 cow buRfiT3Mq6Q_0 bear buSgd-PrRmA_0 elephant buSgd-PrRmA_2 elephant buSgd-PrRmA_6 elephant buSgd-PrRmA_8 elephant buWf8ffXWTs_0 person bue8SUcqigE_0 cat bugTv6zkE0Q_0 person buh8d20UxNw_1 airplane bulc7gZ_YQY_0 boat buqR3s7EZeQ_0 person buq0_IIvQqc_0 person busJdrzEeJU_0 truck buyJwHRaSYc_0 person buyJwHRaSYc_1 person buzd3FYmwQQ_0 bus bu6QE_qf8fw_0 skateboard bvLQLfRAI9s_0 person bvW_ZJYSOLg_0 person bva98_iD8pI_0 person bvc6dUfKFpM_0 skateboard bvg-QHsENSc_0 umbrella bvnuyMz5Pk4_1 person bvnuyMz5Pk4_0 person bvqPJIDHXHI_0 person bvqPJIDHXHI_1 person bvwJ75OkrTk_0 person bvwJ75OkrTk_1 person bvwwPOK7lN8_0 skateboard bvw4raRDAys_0 person bvxAWBUG1zk_0 dog bv6ASjMljew_2 person bv6ASjMljew_0 person bv6ASjMljew_1 person bv7NOTxSDhg_0 person bv7lroHoMyE_0 person bv8CHN4kwyM_0 person bv9J7oplKjY_1 bird bv-ps8hofSY_0 person bv_rrakMnsY_0 elephant bwB-cfh8UFY_0 cat bwIBXBulTRg_0 person bwM3RKdZAd0_1 airplane bwM3RKdZAd0_2 airplane bwSSE1XeKkg_0 person bwSSE1XeKkg_1 person bwTJKRhesM4_0 person bwZEDD10b44_0 person bwd7bbxG4Kw_1 person bwjUOg-CI1E_0 horse bwotbTZHoPA_0 horse bwotbTZHoPA_1 horse bwv4Q2VqV5A_0 bus bwv4Q2VqV5A_3 bus bwwud6bxEeY_3 elephant bw1HepCVmL8_0 person bw3c96BQrRU_0 car bw3c96BQrRU_1 car bw96DHOgI1I_0 airplane bw_opOTzI6k_0 dog bxRX_05rH9Y_0 bus bxXWi1nvXjI_1 bird bxYeOYlqDPc_0 cow bxaC_opt7IU_0 truck bxjIDI2ZkO4_0 cat bxnu-AITJt4_0 person bxoclb4AFb8_0 person bxsI00qOi6c_0 person bx0h8tvY6kw_0 person bx6BVBAcBtM_0 person bx6BVBAcBtM_1 person bx7PtvZe6O8_1 airplane bx7-RzWnIe4_1 truck byDPGQJdn1s_0 person byQIRt1JF9I_2 dog byQIRt1JF9I_0 dog byQIRt1JF9I_1 dog bycJD4U6rIs_0 bird byehVoG0_eg_0 person bye0FepI8wg_0 bird byi-4Qx3vx4_0 person bykN9ap_QTw_0 bird byvddKaL_kw_0 person DCRIRGz2xhc_0 person DCRIRGz2xhc_1 person DCUcxHDfYiE_1 cow DCUvhnZnRGQ_0 horse DCXrBMEdS4E_1 person DCrv8CyK9zM_0 bus DCx698xXxjs_0 person DC0PPRyXlD4_0 person DC4ZTdVoj2o_0 boat DC5fRZmUZV8_1 airplane DC8lKdla6rE_0 person DC8lKdla6rE_1 person DC_Kd2iaw9U_0 person DDZILIDFFXc_0 elephant DDd8CfnxkYM_0 person DDgtm9B7Yj0_0 train DDhlugZ-vro_0 person DDhlugZ-vro_1 person DDjUzAM4mLE_0 bus DDjUzAM4mLE_1 bus DDjUzAM4mLE_2 bus DDjUzAM4mLE_4 bus DDoBBLQQ1Mg_0 train DDtWIKexWpM_0 skateboard DDw2iF2W4HI_0 bird DD4YGjlBsHc_0 boat DD844YVVMXE_6 bicycle DD844YVVMXE_0 bicycle DD844YVVMXE_1 bicycle DD844YVVMXE_3 bicycle DD844YVVMXE_4 bicycle DD844YVVMXE_5 bicycle DEHHjz2xiz4_0 person DEI-qJD08Pc_0 person DELUfY3m37k_0 person DEVUyfQt_G0_0 cow DEVUyfQt_G0_3 cow DEVUyfQt_G0_1 cow DEXhh5rt_24_0 motorcycle DEXhh5rt_24_1 motorcycle DEZHoMWiFBQ_1 person DEau5L3A9S0_0 person DEjPKQLASJg_0 umbrella DEtj0Fb-Jbo_0 skateboard DEuYWYNXbw4_0 truck DE3kl7rbakE_0 skateboard DE6z5oB-0vo_0 elephant DFBlkKPYtl0_1 cow DFBlkKPYtl0_0 cow DFI7_dtUb0U_1 giraffe DFI7_dtUb0U_3 giraffe DFRmdyjR_Dc_0 giraffe DFb4KWUX31Y_0 person DFpZ6f1iWT4_0 person DFwPVEPK4-Y_0 cat DFzgqOHlnAk_0 person DGC_pivLAEE_0 person DGMfSMlhL4w_4 elephant DGMfSMlhL4w_6 elephant DGMfSMlhL4w_13 elephant DGMfSMlhL4w_17 elephant DGM9CDF3ks8_2 motorcycle DGM9CDF3ks8_0 motorcycle DGM9CDF3ks8_1 motorcycle DGbZYKPp7XI_0 person DGc9VSWQUyQ_2 person DGc9VSWQUyQ_1 person DGp5vBVf28g_0 person DGsQAjKXPBw_0 cat DGs0ZHnAtkg_1 person DGs0ZHnAtkg_0 person DGvsndSWlBw_0 elephant DGx5aC4h8wg_0 horse DGygUuHcJhs_0 person DGygUuHcJhs_1 person DG8TJBoerZ0_1 person DG8TJBoerZ0_0 person DG93jIsco3E_0 person DG93jIsco3E_1 person DHB_RgHOHdo_0 umbrella DHB_RgHOHdo_1 umbrella DHLK8xDGwL0_2 knife DHLg5KzzoOM_2 cow DHLg5KzzoOM_0 cow DHPWnuYI2qA_0 person DHSGQLguGZ4_0 truck DHdFVfp7SvM_1 horse DHl_QoiyZ2I_1 person DHl_QoiyZ2I_2 person DHl_QoiyZ2I_0 person DHqrGwHgnAA_0 person DHr77uGYi-g_0 dog DHsorh6ngMI_0 umbrella DHs1KtWx2n4_0 person DH0OVsYB2vs_0 person DH5nSZZ6uJE_0 umbrella DH_wEdP1Glk_2 train DIFEQ3rorSw_0 person DILtO1oyoCY_0 person DIOuJC_mv_k_0 person DIO8l6DAJX0_0 person DIO8l6DAJX0_1 person DIP8d1YC6vM_0 person DISU2i6bJqs_0 cow DIaTXSXAfJM_1 person DIaTXSXAfJM_0 person DIpJyhb8gzw_3 motorcycle DI7rj5AAYEE_0 elephant DI801ysby74_0 knife DJD4Xlf0eNg_0 person DJKFzJe6KAk_1 skateboard DJKokwprK90_2 skateboard DJLSHLPE0po_0 person DJQ8goQ4xyo_0 person DJV-ft_10HY_1 person DJjjrdYts2s_0 elephant DJ4oQ03HqyE_0 bicycle DKBIz_MLIpw_2 knife DKC58UBq-0w_1 airplane DKEmSml-t4c_1 person DKEmSml-t4c_0 person DKHCjzNZE3U_0 elephant DKHCjzNZE3U_4 elephant DKICHseWnGQ_0 person DKJ3As_9Mlw_0 person DKKsGGUWero_0 person DKLxBVm3HHk_0 airplane DKMUARFnh2Q_0 person DKShwn6Xk8w_0 cat DKZ21QA0lBM_1 person DKcpPg_tEUU_0 skateboard DKj3fFeAaL8_0 person DKq7d2C6gOI_0 motorcycle DKxIadOj4D0_0 horse DKyckH3XY8Y_0 bicycle DKydJWySeUw_0 car DLKE31mt2Qc_0 bird DLLrkv1aF-k_0 train DLMDzB4XBPg_0 person DLPmEX5pwY0_0 cow DLT57E3vm98_2 truck DLct7_2tyWI_0 person DLd6kxxgSUM_0 person DLkx4w5oteM_0 person DLmCj6q5vD0_0 person DL3V2mhMX7M_0 skateboard DL3eQSTbZ9Y_0 skateboard DMB6Mr7lTSI_0 person DMEXGsc-PaU_0 person DMFEU87_IrU_2 boat DMR4kX1M_zk_2 elephant DMR4kX1M_zk_1 elephant DMTP7OyjdJ4_4 bus DMT_n1VJG80_2 bird DMbwyGKLF4c_0 person DMb-AjUXKe8_0 giraffe DMiFC67o2P0_1 horse DMiFC67o2P0_2 horse DMiFC67o2P0_3 horse DMn1JpU6MBE_0 person DMn-kaSNd5Q_0 person DMuLn7wJTcc_0 person DM7c57qvjgs_0 person DNAMMWkSfLY_11 umbrella DNAjFU24eK8_0 boat DNB4bgEP-8Y_0 person DNGlLqzJF6Q_0 person DNGlLqzJF6Q_1 person DNOZeC0gZzs_0 truck DNXuVh_X_qY_1 person DNXuVh_X_qY_0 person DNhOrRaOe2M_0 person DNul7ILzxkQ_0 person DNul7ILzxkQ_1 person DN0xWDfCAM0_0 motorcycle DN1ujoUaAKU_0 person DN1ujoUaAKU_1 person DN4TuB3csDg_0 person DN4e8ljPm1g_0 bicycle DN5mGCGzOOY_0 person DN7FitWe9k8_0 person DN8yb60bxNc_0 person DOAU-JodN0U_1 airplane DOAmtFxCuKA_1 person DODU9JghuAA_0 cow DORauVZJhAU_1 person DORauVZJhAU_0 person DOhLqHOLbQY_0 person DOiUy3AGiKw_0 person DOiUy3AGiKw_2 person DOoTpSSHVho_0 truck DOoTpSSHVho_1 truck DOsVwDV787M_0 bus DOuULWa1RKM_0 person DOvC_-Yrn5k_0 cat DPAEt1AqwbQ_1 car DPCyQOQdLHE_0 cat DPFO_O_f3hc_0 cow DPIm8x0i2yo_0 motorcycle DPJ7ZSWY2Qs_0 skateboard DPXJpAVtRfM_0 train DPXJpAVtRfM_1 train DPZi4DZaTmk_0 person DPZi4DZaTmk_1 person DPelBJ73uaU_0 bicycle DPo9M61p8gI_0 umbrella DPvxwOvedrQ_1 knife DPz3CG4lD2Q_5 truck DPz3CG4lD2Q_6 truck DP2q1TrqjAE_0 person DP2q1TrqjAE_1 person DP6ZB5PxNfc_0 person DP-JZPR9HFc_2 elephant DQDV1Wr7qo8_0 bear DQOglBZHFCs_0 bear DQZiSQmMBnc_0 bird DQcCfbTKP1s_1 person DQcCfbTKP1s_2 person DQcCfbTKP1s_0 person bywgcqNg6RU_2 car by7PLb7MqM0_0 motorcycle by_OJvQqlKE_0 person bzKVRbSQpZE_0 knife bzLdvZQAWgA_0 person bzO5MBTTrdQ_0 person bzRELZo9WMU_2 dog bzRELZo9WMU_0 dog bzZgsynjAGk_0 cow bzfE3U02_44_1 person bzfE3U02_44_0 person bzimWzymgu0_0 person bzquVP0NUms_2 truck bz5Ht4jyT0k_0 bus bz66OedbeoI_0 person b0C_2T7-IfU_0 cat b0GlXXGkfRQ_0 person b0GlXXGkfRQ_1 person b0HXAfyZ7Sk_1 person b0Q3EfK70fg_2 airplane b0Q3EfK70fg_4 airplane b0Q3EfK70fg_5 airplane b0Q3EfK70fg_6 airplane b0a7ewqE8S4_0 dog b0nOQfZSaUo_0 person b0nt17hBmDw_0 boat b0qXUUs3-WE_1 person b0t8uuynzIM_0 train b0xQRq8njAI_0 cat b0z1nalEX08_0 truck b0-UOt-DT1A_0 person b1ETK4nP9ag_0 dog b1EnXvOZQbQ_0 truck b1Gd5IWJBRI_0 person b1R3uk0VLc4_0 person b1SyeZsSk80_5 elephant b1SyeZsSk80_3 elephant b1UAPTD4s74_0 person b1UpjRRBrTw_0 cat b1cpAYk99_U_0 person b1cpAYk99_U_2 person b1cpAYk99_U_3 person b17OiOMReIs_0 person b1-WFxZ7Lcs_0 truck b2DqNP9s4t0_0 person b2Tm_7DUimQ_0 person b2Y6KLIX5vE_1 motorcycle b2Y6KLIX5vE_0 motorcycle b2azzMxEH84_0 motorcycle b2fq5Ba1L8M_0 person b2fsE3wZfWM_1 person b2m2gaVpjNE_0 person b2qNS9qjYbE_1 person b2tlrwd_LIg_0 person b28pEbOSeUs_0 dog b2_dSc2NxNI_0 person b3KP0d-WX38_0 bicycle b3KP0d-WX38_1 bicycle b3KP0d-WX38_2 bicycle b3R6fHlRZu4_1 bicycle b3R6fHlRZu4_3 bicycle b3R6fHlRZu4_4 bicycle b3SsKosfjOA_0 train b3SsKosfjOA_1 train b3SsKosfjOA_2 train b3UOZHA5jRI_0 cat b3Z1Ay2o1zQ_0 knife b3bkNCYQbwc_0 cow b3p-fFVYM4E_2 train b3p-fFVYM4E_4 train b3p-fFVYM4E_6 train b3tgGsan2vc_0 truck b3x6f5xFPTQ_0 horse b3x6f5xFPTQ_1 horse b3x8Gwk4V8o_1 person b3x8Gwk4V8o_0 person b323CLKf_vM_0 person b34Cdm6l5_k_1 airplane b34JUq19S0E_2 motorcycle b34JUq19S0E_0 motorcycle b34JUq19S0E_1 motorcycle b344je6lVYA_0 airplane b35ihWGyz_4_0 cat b37tPdAEkEw_0 person b39uBVwcm48_0 motorcycle b4E8uT19QkY_0 bus b4E8uT19QkY_1 bus b4FBbr4Pud8_0 person b4GXrkSKAdA_0 cat b4HAPQ_xX5E_0 person b4HAPQ_xX5E_1 person b4KwBIif5OY_0 cow b4KwBIif5OY_2 cow b4KwBIif5OY_3 cow b4KwBIif5OY_4 cow b4UXSjdnqZ0_0 person b4Xn8--nfvI_0 person b4aEJNvYqtU_0 bear b4j8lkkY_lE_0 zebra b4tTUDVt6Gk_0 person b42WUwHAKPs_0 boat b455pPKgTj4_0 person b5D9lQq3uf8_0 bear b5IshxZjL7o_0 motorcycle b5NxbNaAo_8_0 person b5R1HVvc040_1 train b5S8Db1Gu7I_1 bicycle b5S8Db1Gu7I_3 bicycle b5T_VSM7nbg_0 motorcycle b5nwFyniymA_0 dog b5ud9dsnS1c_1 person b5ud9dsnS1c_0 person b51dSWD8MF4_0 elephant b59pPUKW_78_0 car b5-eXPHW4Mg_0 person b6AoStVIzkw_2 person b6IE2imnfp4_0 person b6MtzhRufn4_2 skateboard b6MtzhRufn4_0 skateboard b6RIavVJ660_1 person b6dVZMAHwro_1 airplane b6gsIu7Pxbc_0 dog b6ndIInoIzU_0 boat b6xUAyNCbdY_0 person b61MghNCCTI_0 person b61MghNCCTI_1 person b65S2P2Pfms_0 person b66BE9WdQP0_2 bicycle b7HqfhRNtAQ_0 cow b7H_n_w2eFQ_0 person b7Igw_OO-P4_0 person b7LHlx86tk0_0 train b7RYkf4oXv0_0 skateboard b7WQe48-0NI_1 giraffe b7WQe48-0NI_0 elephant b7WiE1a8IAM_0 person b7go-l8jA5s_1 boat b7hJ62ORLHc_0 person b7iLQoOKVrM_1 horse b7ivqvv6s6A_0 motorcycle b7mawJlPASQ_0 person b7u0NZEc8OI_1 person b7ycKg8GLHA_0 person b71SThzfrDg_0 bird b78PYqyYWZA_0 person b8LqaxvNRHw_0 person b8LqaxvNRHw_1 person b8VoRclgULc_0 cat b8aWJIa4RFI_0 giraffe b8es8BWiC5c_1 person b8g4M9Yov8M_11 bear b8g4M9Yov8M_3 bear b8xtOCMwjJM_1 bird b8x1qHT8nvE_2 boat b8yA8bHlrtQ_0 bus b8yqEFXS8Ck_0 horse b82N91HYnUo_0 knife b9O_mJTNj2A_0 train b9SLHObDJzQ_0 horse b9Y5tpPv-LQ_0 car b9iCmG9fIHc_1 motorcycle b9melHkIeV4_0 bird b9oiO21MJh0_0 horse b9oiO21MJh0_1 horse b9u4WV9ft4s_0 motorcycle b9wwfAu5DCs_0 skateboard b96WdT0DXKk_2 bicycle b96WdT0DXKk_0 bicycle b96WdT0DXKk_1 bicycle b98Gs0d8AKo_0 motorcycle b9-xiVm1Xck_0 skateboard b9-2bW13faI_0 person b-Cp0i6fBOU_0 person b-Cp0i6fBOU_1 person b-S7G5A0MNI_0 person b-T0AS7CuxI_1 knife b-VYy9eEU6w_0 person b-W1PY33nQg_0 person b-hT8zKObfM_0 person b-hqwYjKCH8_0 truck b-i49sLOjBo_0 person b-i49sLOjBo_1 person b-mQajOHUAA_0 person b-mQajOHUAA_1 person b-mQajOHUAA_2 person b-ncxt38EFw_0 person b-wiIOBccF0_1 person b-x--HjbnpM_0 knife b-5K7RwiHdw_3 boat b-8ARNgk-Tw_0 person b-_FeNpM_wI_0 person b_B3oYiBWi4_1 skateboard b_KBD-NL4Vo_0 train b_ZVDwMrcEU_0 airplane b_exMPY7gnM_0 person b_fR7aS10Z0_0 bear b_h4xugql44_0 umbrella b_kksCK6cbw_0 cat b_n776bwyJo_0 boat b_n776bwyJo_1 boat b_vDLf3193s_0 bus b_1TwBIgwKE_0 car b_7EvlxDWFc_0 truck cAARR6q3Qq8_1 skateboard cAARR6q3Qq8_0 skateboard cAFqK_6ltXw_0 cat cAJsxlkMG_s_0 dog cAJsxlkMG_s_2 dog cAJsxlkMG_s_1 dog cAKfCLDFg34_1 person cASL6wZ33vA_0 boat cAYIECe6Bvs_0 truck cAnDryag2FA_0 truck cAqs3d9KNzk_0 person cArYvJEUdOg_0 horse cA0HCmGOK84_8 horse cBAG9pjaV70_0 cow cBBDfwkH23A_5 horse cBBDfwkH23A_2 horse DQk3Xvbv57I_0 cat DQqBXfTgqTE_0 train DQ04rtHIqHQ_0 elephant DQ7GZOJxra8_0 person DQ-vQygnOx0_0 train DQ-vQygnOx0_1 train DQ-vQygnOx0_2 train DQ-vQygnOx0_5 train DQ-vQygnOx0_7 train DQ_yyvagS0g_0 truck DRMoOpmUgn8_0 person DRO4MalcQFk_0 person DRSSiSNzV7Y_0 person DRXxJArWrQA_0 person DRaIGIiQXd0_1 train DRaX3P2ysBk_0 person DRhRKwI26n8_0 bear DRhRKwI26n8_1 bear DRseWxukwaI_0 person DRsoi5DxAJk_0 car DRuDqkZ0zfE_0 person DRuDqkZ0zfE_2 person DRuDqkZ0zfE_1 person DRxLQ6we5YU_0 horse DRybt0Cgr_U_1 bird DR0QGL0n_wM_0 person DR4mzyMklY8_0 skateboard DR82KhNzs1w_0 person DR-AMnnLCCQ_0 cat DR_jo4aSqn0_0 person DR_jo4aSqn0_1 person DSAbzYpUW5w_0 cow DSB9X3bgG2A_0 person DSCt67aveiw_0 truck DSCt67aveiw_2 truck DSEt02E1kJE_0 person DSM_BlK-ggg_1 person DSM_BlK-ggg_2 person DSRGbK9rPbo_0 train DSWlLGL3xj8_0 horse DSZkEwhJEI4_0 skateboard DSaSooZZeAg_2 bus DSn5-dKW_P0_0 person DSoRmFNRxiE_0 person DSoRmFNRxiE_1 person DSqy2MlVOxE_0 person DSq0q8dCuCw_0 truck DS5z-K8Cpzs_0 person DS-V_NKOawo_0 knife DTBhYAFcQ94_0 skateboard DTFg8SeWhbE_3 skateboard DTYiSIRTXW8_0 knife DTZkCYvGZ9E_0 person DTm5L6IAHC4_0 person DTnIC_Q8YoY_1 boat DTs2uXh47Xw_0 person DTtejx1VYBs_0 person DTvjWj60ixI_0 person DTvzQwX0KRQ_1 horse DT4KxrhD89E_0 person DT7TSCbFXek_0 person DUAhVOWkluQ_0 person DUAhVOWkluQ_1 person DUBzIIKht_w_0 person DUBzIIKht_w_1 person DUB3OOi7dQc_0 person DUHEv94Tyno_0 person DUHEv94Tyno_1 person DUHEv94Tyno_2 person DUPQ3fPhomY_0 person DUQa7q5NTQI_1 horse DUZhPq4FiJM_1 person DUb6-VQcokc_0 cat DUlYPwiuBrw_0 truck DUlYPwiuBrw_1 truck DUmKu-rc7jI_0 person DUwVOy7IYvA_0 person DUxGnuYB_GI_0 cow DU1ww3ryP7s_0 person DU4acd1_vuI_0 person DU8jvzO9tEA_0 zebra DVFfZw4HW3E_0 train DVFfZw4HW3E_1 train DVK9BrG_Y_8_0 person DVOFKTeh9BY_0 person DVgCgSDZVw0_0 person DVjOMylPUfU_0 person DVlEnd5Ra2Y_0 person DVm_-u6oWwA_0 car DVqsCPYrMrg_0 person DVqsCPYrMrg_1 person DV4GPAloBks_1 person DV4GPAloBks_0 person DV79-MpnE1Y_0 person DWQ0kmCIT0E_0 person DWZNfCg0W8o_0 person DWjj9U_lr30_0 person DWoRZEAFpUI_0 person DWqyeu4eovM_0 horse DWuaB5j6-CQ_0 person DWwGWBcxL0k_0 person DW1iqzQEWkE_0 person DW4OTTF7Jc4_0 person DW8G3A0trOk_9 bear DXEqDJWN72E_0 person DXEqDJWN72E_1 person DXI2AmrILgw_1 cat DXa15hEKLAc_0 truck DXgs-pfW-0M_0 train DXpyVrXMs1w_0 person DX5AP4s6u0k_0 bird DX867I2CNRk_0 airplane DX-PbjeeB6o_1 giraffe DYJJBRoUlnU_0 knife DYUiMLisOzs_0 person DYbb8_mMeLs_0 horse DYhTdNMuv5g_0 knife DYkV2TPfOBk_0 truck DYlrCUMDv_g_0 cat DYpBOmbclGY_0 person DYqIQv97tuE_0 person DYvHdc4rnxk_2 person DYvHdc4rnxk_1 person DY0ggbU0cIk_0 person DY3h0Y3ijmo_0 elephant DY3h0Y3ijmo_2 elephant DY6eQdk8jaE_0 person DZESlirYB3I_1 train DZGEjl9U78c_0 person DZIFKtO6y2Q_0 person DZIFKtO6y2Q_1 person DZMd9NPNnLE_0 person DZRZg1gGn1g_0 bus DZWsGelqCPg_0 person DZXldsAgY7o_4 skateboard DZYjfZMMVAE_1 person DZgbeXD-bZg_0 bear DZqs7ie6HPU_0 person DZ3JlgmRHQ8_0 person DZ4G9EBImOM_1 person DaMdWu7CyRE_0 person DaRYBq6zsmY_2 elephant DagKzwyphZY_0 person DapmUIRDw3o_0 airplane DaqVTidNtg0_1 person DatNYbTqxlw_0 person Daz5kZBXn5c_1 elephant Da10JheIcaw_0 person Da25bjhf1WQ_0 person DbAZPBnTh3U_1 person DbGX12xMbWM_0 person DbNOHXsDP5I_1 boat DbSGsjNmQ8A_0 cat DbXz_8anwSM_0 person DbZGV4ixs2E_0 bird DbdZugU9GWk_0 bus DbeCxvMCD-Q_0 person DbfJ2s7qQJ8_0 truck DbivV-It_rM_0 person Dbmwr1_ObHM_0 person DbnhReILFSs_0 person DboUAm-F7Rg_0 person Dbpte835xwc_0 person Dbqj1XCvcGw_1 cow DbrGY3BalZ0_0 skateboard DbrGY3BalZ0_3 skateboard DbrGY3BalZ0_2 skateboard DbvkTKJjRj8_0 person DbwEevYFGrg_0 person DbzakdG34mg_0 car DbzakdG34mg_1 car Db3OG025sz0_0 person Db74WjMmf-0_0 bear Db74WjMmf-0_1 bear DcAxPsNVe28_0 train DcFWetycnqY_0 person DcKjrocJ8iM_0 person DcKjrocJ8iM_1 person DcOl0Ec1kuI_0 person Dca5CTtFQZ8_0 motorcycle DcexSE28IOA_2 person DcexSE28IOA_0 person DcexSE28IOA_1 person Dcfs-bFQcxk_0 person Dcj-1vKe6iI_0 elephant DckRd1CpSm0_0 skateboard DckTHE_Pn5Q_0 person DcknQtmjIDA_0 elephant Dclr-tDJMO8_0 person DcpuJSx5z78_0 person DcpuJSx5z78_1 person Dc3yhv5mfN8_0 person Dc4EXPP0fqU_0 cat Dc9dWfPxIEM_0 bicycle DdGvFcujfxo_0 person DdHWfz7kw4I_0 person DdJuIi7LexI_0 bus DdKvI-6rMII_1 person DdNpi-Pmvgc_0 person DdNpi-Pmvgc_1 person DdNpi-Pmvgc_2 person DdOk9lG9b1k_0 knife DdUa-CozM14_0 person DdUa-CozM14_1 person DdYyeGgXLKw_0 person DddB5joJQC4_0 airplane DddRHyvYqFI_0 person DddRHyvYqFI_1 person Ddf4T9I0sdI_0 person Ddz7VVJXgHs_0 person Dd2qrXASEzk_1 person Dd2qrXASEzk_0 person DeCtt_QZqjk_0 person DeCtt_QZqjk_2 person DeFuoRV0yCw_0 person DeFuoRV0yCw_1 person DeHiMvczAD4_0 person DeIpwOsUzjw_0 person DeVZ83g93sE_1 bird DeViLrLvD1Y_0 horse DefHSc2VTOo_0 person DfGzSVv2ELQ_4 horse DfGzSVv2ELQ_1 horse DfGzSVv2ELQ_3 horse DfS7lvAcDQc_0 umbrella DfS7lvAcDQc_12 umbrella DfT_7BUGNQA_0 person cBI2gZhpA-8_0 person cBMnKBVcoOE_0 person cBMnKBVcoOE_1 person cBQJU95uwwM_0 person cBQJU95uwwM_1 person cBSbDKv-Z_o_0 car cBb6VPKgF1M_0 knife cBeH0xcCCWE_1 person cBhDn0TkAdc_0 elephant cBhDn0TkAdc_2 elephant cBhDn0TkAdc_3 elephant cBhDn0TkAdc_1 elephant cBlqBEElvDI_0 person cBpFzTn_uOo_0 person cBvZAwlCN4M_1 horse cBvZAwlCN4M_2 horse cB1RhnpteUg_3 airplane cB9XRu3bb_0_0 person cB_RQN9IXg8_2 skateboard cCA7llOU4HQ_0 person cCEUd1IZ6OQ_0 person cCEUd1IZ6OQ_2 person cCMe4KdqzeI_0 person cCaz75u-bCM_0 motorcycle cCfInBOvqkk_0 person cCfVriTflG8_0 person cCnjh5F8dvM_2 boat cCvpQCZ33xQ_0 train cCwB7O-yg4Q_1 airplane cCxZRIxh_yk_0 cow cC2UgNbG7Rs_0 cat cC3-bziiNKk_0 cow cC3-bziiNKk_4 cow cC4nZNGoC-g_1 horse cC4nZNGoC-g_2 horse cDGz5cnIzK0_0 train cDIc8cs3igI_1 person cDL0YZ_vXOk_1 person cDaR5WdXvIo_0 dog cDfSk2g6wRM_0 dog cDg-vYWO3AI_0 umbrella cDvCYN97QYU_0 dog cDvWWER9oeI_0 person cD_EAISZcwM_0 person cD_zwwrcvkI_1 person cEAwCEnfITY_2 horse cEFLP7rdZSU_0 person cEIAg54WPCs_0 skateboard cEOHFcu3Uic_0 person cEOqnkbgfMQ_0 person cEXYVwmcpSg_0 person cEdeOfPvcQ0_0 person cEomNeUqQUI_0 umbrella cErRs5qv8mc_0 elephant cEyCX-t8Jlo_0 bird cEyCX-t8Jlo_1 bird cEzC3hwdO_o_0 person cE7AS1hrlYA_1 person cE7AS1hrlYA_0 person cFBoLads7vA_0 person cFHTt7uFxH4_4 umbrella cFOk-AMS2Aw_0 motorcycle cFOk-AMS2Aw_1 motorcycle cFkmNa2nYEk_0 person cFoUf9UmoZ0_0 person cFq4fzO00qE_0 cat cFtfKwaxphA_0 person cFuoJPf6prU_0 skateboard cFzjl_SiNhg_2 dog cFzjl_SiNhg_0 dog cF0SM2Lf82s_0 person cF7uQwB8sEg_0 person cF9YklqKEp0_0 cow cGBOBTCgzP8_3 horse cGBOBTCgzP8_4 horse cGCbcyeQqG8_0 person cGCbcyeQqG8_1 person cGC4pGWPOUk_0 person cGC732t-itM_0 person cGEvxRn1UtQ_0 person cGNmKg25XMs_0 boat cGUXUioIa4o_0 person cGVaIIV18ug_0 person cGcyxMp1ZQc_0 person cGcyxMp1ZQc_1 person cGdeftwBWL4_0 person cGiVzhQI2a0_0 person cGpNQ9Vk-5E_0 person cGtaJVgvTJg_0 person cG1_sZqy7lU_0 person cG2fL1nRZmE_0 person cG5TxH-1Sf4_0 person cG65cBtyj20_0 cow cG7BBtumZnQ_0 dog cHCYX0EqsfE_0 person cHQLun1YTiM_1 person cHQLun1YTiM_0 person cHSjCxvPumA_0 motorcycle cHWE72lnzZo_0 person cHYcXW7HAkA_0 person cHaBQgTFdr4_2 knife cHjKy80ojXM_2 bear cHkm25QAG8A_0 truck cHnV0yZTha4_0 car cHpaD5PtHnM_0 cow cHv3ulnF1fo_0 person cHyjhzLIeO0_0 person cH2A35uULdc_0 person cH2g9vV4SyM_0 bird cH27awicc50_0 person cH8zYhvzdb8_0 person cICrfFzHoZs_0 person cIFXOWG5Dd0_1 person cIF9coXttVs_0 person cIIlWssV9Sk_0 person cIJSKwcTQ10_2 bicycle cIJSKwcTQ10_3 bicycle cIPlCULXXHQ_3 elephant cIPlCULXXHQ_2 elephant cISwax-t_78_0 person cIVGJQrNkT8_0 person cIV9T5ZQmdI_0 person cIh9baL5Hzw_1 person cIjMwiaApEc_0 person cIvqOdvwX6w_0 person cIwDGqmKrfY_0 person cJH4RK9aVR0_0 elephant cJJDfdbopiQ_0 person cJSjHpF7ILg_0 airplane cJUj9q6wgis_0 person cJfW0Gfkzrg_0 knife cJjaVdNaUko_0 bus cJnihDxg0wg_1 dog cJtGcHMJlMA_0 person cJ0hAba-pck_2 giraffe cJ0_u3Ta6kU_2 skateboard cJ0_u3Ta6kU_0 skateboard cJ2f7qDBm7M_0 horse cJ41GQMsJIA_0 dog cJ6BfbrgwDM_0 person cJ7Akre7-Sc_1 cow cJ7ZHI-8gU0_0 person cKO8G1ZXQgo_0 person cKdank8BDik_0 person cKgqIdOoBmE_0 person cK4yj3jgWek_0 person cK5MabT7iIA_2 train cK5MabT7iIA_0 train cK5MabT7iIA_1 train cK9R8KdVuIE_0 person cLKgng5yuC4_0 person cLKgng5yuC4_2 person cLKgng5yuC4_1 person cLPSEK3_jEE_2 horse cLPSEK3_jEE_3 horse cLPSTXefj2Y_0 person cLY_N1jEC8E_0 person cLg1pn5Oh1k_0 person cLlL2uHDyBw_0 bird cLnQAhX42Eo_1 horse cLnQAhX42Eo_0 horse cLn0Kz_p2U0_0 train cLrXQvFZ-y0_0 knife cLvgs19Vm18_1 person cL2jFa-Zd_M_0 person cL4k6bdNmbs_0 boat cL6G_y5LoDo_0 motorcycle cMGnmOyYWcM_1 person cMIyGPpW9Xw_0 person cMJhk7y1Nng_2 bird cMJhk7y1Nng_0 bird cMJhk7y1Nng_1 bird cMOULCqujvs_0 cat cMRhR707ZfA_11 bear cMRhR707ZfA_13 bear cMeXNjQUwe0_0 horse cMg1O__kPFA_0 horse cMwsAfZMG1c_0 person cMwt7xBZ9i4_1 person cM6-id-uhMg_0 person cM6-id-uhMg_1 person cNLuZxPpWho_9 elephant cNLuZxPpWho_14 elephant cNLuZxPpWho_1 elephant cNLuZxPpWho_4 elephant cNLuZxPpWho_8 elephant cNLuZxPpWho_11 elephant cNLuZxPpWho_13 elephant cNalYSGXOkM_0 person cNnMvF7oiUo_0 horse cNr9rjOJ0ps_0 person cNxEreBWMRc_0 person cNxEreBWMRc_1 person cOD8xhwGfME_0 person cOD8xhwGfME_1 person cOYK17trE9k_0 person cOYK17trE9k_1 person cOZOzY6XDLU_0 person cOalncX8fwg_0 airplane cOalncX8fwg_1 airplane cOalncX8fwg_2 airplane cOalncX8fwg_3 airplane cOalncX8fwg_4 airplane cOkVxYbnFRs_0 person cOkiG4LRtQU_1 truck cOp33oi4C8E_0 skateboard cOzNmIBhiMY_0 person cO1F_0l1vSU_0 person cO1MbnbgUbU_0 dog cO3WA2g_UeM_4 bear cO3WA2g_UeM_2 bear cO5xsG3ud_0_0 train cO7nCAZ-uLk_0 person cPBvSHKPNvk_0 person cPdRddyxsVA_0 cow cPdjr1zTQQ4_0 person cPeGSXSLepg_0 person cPkbg5bdpcE_1 person cPkbg5bdpcE_0 person cPn5c5t2g6w_3 skateboard cPqAK1E1Ajo_1 dog cPqAK1E1Ajo_0 dog cPsXS3_4zOk_0 bus cPu-riLrt1c_0 person cPu-riLrt1c_1 person cP-gl2IN_AI_1 person cP-gl2IN_AI_0 person cP_nenKIU4g_2 bear Df70QgKA_Hc_0 person Df70QgKA_Hc_1 person DgSwJVCLkYM_0 person DgcSsQKaX7Q_0 person DgoFmJFWpUw_0 bear DgtiaphLkMc_0 person DguiMPx8nn0_2 person DgvI1azs_0E_0 airplane DgwM5b-eKvc_0 person Dg2sU0bmBho_0 person Dg8r8QlJw80_0 person DhAkswxLuAs_0 person DhJZwbql4dc_1 person DhLD44-KIUU_0 person DhYbvvwSsEA_1 person DhYbvvwSsEA_0 person Dhd-0-xOF6I_0 cow Dhl-jIQaam0_3 person Dhl-jIQaam0_0 person Dhl-jIQaam0_1 person Dh6APdqkNZ0_0 person Dh_6tF8ndZs_0 person DiAj24Xsadk_0 person DiDELcBJWh4_0 person DiPjO5frbNc_0 person DiQ-VgXIDMo_0 person DiVX_-kQv0k_0 person DiVX_-kQv0k_1 person DiWi-oWT9EI_0 boat DiXsD6VHEr4_0 person DiZ4OCT30AM_0 person Dia6QIxORbM_4 airplane DihnxPkojnQ_0 giraffe DihnxPkojnQ_1 giraffe Di41WoS7T1M_1 bear DjAQs68BiwA_1 giraffe DjB4dpC4TVs_0 horse DjD15NlLBYI_1 truck DjD15NlLBYI_0 truck DjK1R_LBqgM_0 person DjMnoAbMiIU_0 person DjMnoAbMiIU_1 person DjQF34GUthk_0 person DjS-0VOep0Y_2 person DjXtIIwfITI_0 person Djb2blFeoNM_0 person DjdAxUWgSdk_0 knife Dju4Bl2fx88_0 bicycle DjyldIzPJbA_0 horse Djy5UE0Ofa8_0 person Djy5UE0Ofa8_1 person Dj7DVsCVqqY_0 cow Dj9npayKJqk_0 elephant DkAG7dFDk94_0 person DkC_iJTIrYc_1 person DkC_iJTIrYc_0 person DkF-LqA7wSk_0 bus DkNY4yun6ek_0 boat DkPYbKRQBE4_1 motorcycle DkTfU9q9U_I_0 cat DkTqTY04y30_0 person DkTqTY04y30_1 person DkbRBY4ZlFY_0 bicycle DkbRBY4ZlFY_5 bicycle DkbRBY4ZlFY_6 bicycle DkbikYoLycQ_0 bus Dkmab-wxSy4_0 person Dkmab-wxSy4_1 person DknRMqifZFE_0 skateboard DkpZP7RtrJM_1 bus Dkqy-okNDVM_0 person DkrkY6blx3U_1 person DkrkY6blx3U_0 person Dk0wXCp-USs_0 boat Dk1QPiNji5I_0 skateboard Dk4V0c6Yzbs_1 boat Dk47lOWl3NM_2 cat DlCMYyDhSVY_1 person DlCMYyDhSVY_0 person DlDFQ88ui2A_0 person DlDJpNWKuPM_0 knife DlFJTfO-mc0_0 cat DlG-VsdsPCk_0 motorcycle DlTE01-45gQ_0 airplane DlX2Yvp20gY_0 person DldXGda7zfE_0 person DldXGda7zfE_1 person Dlg5BFm20wI_0 person Dlg5BFm20wI_1 person Dl3fDWG23zU_0 person DmG9v9xVPbg_0 person DmIeMGzqZEc_0 cow DmJ9x-DFdqA_0 person DmJ9x-DFdqA_1 person DmLGGv6YNEo_1 bus DmL_6_a_54g_0 bird DmNmgatXwU8_1 knife DmSRZp63qTo_1 truck Dme3Rfsqbz8_0 person DmiucPhqXMg_1 bus DmiucPhqXMg_4 bus DmlMgF-BuRo_0 person Dmt8pgQG3M4_1 skateboard DnLVGRyXAR4_0 person DnN9tjwPn-0_0 person DnR4VFNo44s_1 airplane DndaJVRuOoo_0 person Dniy3zze90s_0 person Dniy3zze90s_1 person Dnj_fhGXHC8_1 bird DnkUzsPqjE8_1 person DnkUzsPqjE8_2 person DntJ297deXI_1 person DntJ297deXI_2 person DntJ297deXI_0 person Dnx6TlTvRfI_0 person Dn80jV69sbs_0 person DoEWhY2BkZo_0 person DoOq_FhWze0_0 person DoPKGr2HJwM_3 bird DoRoLk97UqY_0 truck DobAdZVysXc_0 cow DohloSZ6YdA_0 person Domgj6ptFOs_0 bus DpH2eSmcTk4_0 bus DpJA_qYLobk_11 bicycle DpJA_qYLobk_0 bicycle DpJA_qYLobk_2 bicycle DpJA_qYLobk_5 bicycle DpJA_qYLobk_6 bicycle DpJWhFnF2Fo_0 dog DpR63uhHTjo_1 horse DpWw1SaCdTQ_0 person DpbGsvglx7Q_0 elephant DpbGsvglx7Q_1 elephant DpimIW1T2Sw_0 person Dpp32dLn0hQ_0 person DpvuhymOiUM_0 person DpwjQ_KcYAc_0 person DpxoJ_GWJA4_0 giraffe DpxoJ_GWJA4_3 giraffe DpxoJ_GWJA4_4 giraffe DpxoJ_GWJA4_1 giraffe Dpz-s6E9VWg_0 person Dp2pGcutqDQ_0 person Dp2pGcutqDQ_1 person Dp4XaG6247k_0 person Dp5KRKUJBGE_0 cow Dp6qJvgV4fQ_0 person Dp71z8eyq7o_0 bus DqBNoutsr4M_0 person DqBNoutsr4M_1 person DqDElT9H4Tg_0 boat DqESUtRuhPw_0 dog DqVUeH6XI2Q_0 person DqegnRXQd5Q_0 airplane Dqi5KTmt04s_0 bus Dqy6NbRkVPE_2 skateboard DrAnw0S9Pmc_0 person DrCKp4YB7rI_0 person DrE7aW7O0eQ_0 person DrFxlXYC6-o_0 person DrGCtlmxxVc_0 person DrPpkd-UxFY_0 cat Drc0Grdb_LU_0 cat DrgjySu3e-c_0 motorcycle Dr9XXUA4UKc_0 person Dr9XXUA4UKc_1 person Dr--We7lD3I_0 person DsA5QOOIZJw_0 person DsP87b0IuoU_0 person DsZ6Cf42EdQ_0 person DsiAcCUi8iE_2 bear Dsm48Msjw6k_0 bird DsxyH6AKBd0_0 truck Ds0GIUe1AFo_2 person Ds0GIUe1AFo_0 person Ds0GIUe1AFo_1 person Ds3E7n1kRQk_0 train Ds44yYfSEr8_0 bird Ds8xwquSVkw_0 skateboard DtKSEQhjq2I_1 cat DtQGDwZ1PIU_0 truck DtQGDwZ1PIU_2 truck DtSpyLMbD9o_1 motorcycle DtU93_s53sI_0 train Dtc3hZBmn9Q_0 person DteEg93cINc_0 person Dtf2WRyd4OA_0 airplane DtgUpKmdw_g_0 person DtuRiD_E6HU_0 person DtyatJX8J1A_0 bicycle Dt1MDqN3TCs_1 elephant Dt1PLFoRvoM_7 airplane Dt1PLFoRvoM_0 airplane cQAr7IVeBrU_0 person cQC7jBc1pC0_0 person cQIviFGN-_M_0 train cQOFvBNN9to_0 airplane cQOFvBNN9to_1 airplane cQPP6SqX-uk_0 truck cQbqByuUnW8_1 car cQgUGmyvkJ8_0 train cQttS-GIM5c_0 person cQttS-GIM5c_1 person cQw1wXvFnLM_0 person cQ29m5z8Cnk_1 cow cQ4aR8OLr74_0 motorcycle cRGrqg7y9tE_0 boat cRVqyVvxjHI_0 train cRczdkzrJ-w_0 cat cRnDFinbH-s_0 bird cRrjU515FKg_0 person cRvAv1Nn-WQ_0 cat cR6qM7wjtDw_0 knife cSDafQMsYwc_0 cat cSJ2ISog6Pw_0 bird cSJ2ISog6Pw_1 bird cSLerMX3IBg_0 person cSNwXF8OcR8_0 cow cSO-70KCypM_0 skateboard cSVIvCYuDtU_0 cow cSdBaGsGWKk_4 bird cSdBaGsGWKk_9 bird cSdBaGsGWKk_1 bird cSdBaGsGWKk_3 bird cSdBaGsGWKk_6 bird cSdBaGsGWKk_7 bird cSdUwiTGXPc_2 motorcycle cSor-u6VHHw_1 dog cSqMDH0-sDs_2 person cS398dAyQ9k_0 cow cS-QgqiUgLQ_0 person cS-QgqiUgLQ_1 person cTGOQnmi7bo_0 person cTLa1dxk76g_0 person cTUTNgp9rZ4_0 person cTUTNgp9rZ4_1 person cTayBCWq6xo_0 person cTiETDBrGv4_0 skateboard cTiETDBrGv4_1 skateboard cTk8pacLUcc_0 bus cTmv-vp89sY_0 elephant cTmv-vp89sY_1 elephant cTsipIh7xF8_0 cow cTvxGA-EvvY_1 person cTzz_ZCUpxc_0 person cT4Y0HSeBgg_0 elephant cT5UlPnc5MQ_0 person cT5UlPnc5MQ_1 person cT7LjXG7ByI_0 airplane cT7LjXG7ByI_1 airplane cT7LjXG7ByI_2 airplane cT7kZP5B_2s_0 bus cT_US5II64I_0 person cUEWtKzcAsM_2 airplane cUEWtKzcAsM_1 airplane cUM5ajI3KJg_3 horse cUNExkBml18_0 person cUSRVmcbXxI_0 person cUS9QgCXcPo_0 person cUWmN_HuZiA_0 person cUYlfMGqB_8_0 dog cU7JEUo5qdM_1 person cU7sT9UHs7s_0 person cVCqOzgt2vI_2 train cVCqOzgt2vI_0 train cVM2h5qbyUw_0 elephant cVXIaONp5o8_2 person cVYqiMXSh9g_1 person cVbcrOx7768_0 person cVfH0tFh5Kc_0 person cVfWBtl-qK4_0 truck cVq5VnfZtNw_0 person cVr16pInr5k_0 person cVsZMfMaxSM_0 person cVtyGQKWFcI_0 motorcycle cV0a2ScBxpE_0 person cV0a2ScBxpE_1 person cV1mBGRlLe8_0 bird cV1szYodba0_0 motorcycle cV8BGLBROa8_0 person cWBCCAo3pUM_0 bird cWBTkrImlLQ_0 train cWBTkrImlLQ_1 train cWGCbw5I6cI_0 skateboard cWIDcoPB3Rg_0 person cWKf_KANUSM_0 person cWRO27zzxF4_0 person cWaVXNQ5cvg_0 person cWb-i8hj8uc_0 person cWcJrAQuNA4_0 bird cWtIT6V98zc_1 person cWxELKsh43s_0 person cW2hQE3lS9k_1 person cW4fmuV2JuU_0 skateboard cW7OrsSn-m8_0 person cXP1Lit5Pmk_0 person cXS9VytLIjM_0 cat cXT5_AFSI8Q_0 person cXUdqfIp-Hs_1 person cXUdqfIp-Hs_2 person cXWgDE6boPQ_0 person cXZt2UZe6QQ_0 motorcycle cXaAcHkHUzU_0 person cXsRP67GHA0_0 person cXsRP67GHA0_1 person cX0yQ5KIAKw_0 person cX3mnglolLE_2 elephant cX3mnglolLE_3 elephant cX6lyv1DI80_1 airplane cX-s4BNxb0c_0 person cYHq8xoYMO4_1 bus cYVLbgGxJMM_1 person cYnyDXx580I_0 person cYpas0B5zEo_0 cow cYvyTVEqiEU_0 giraffe cYwkpA75A8Y_0 person cY1cmlwRnaE_2 bicycle cY1cmlwRnaE_1 bicycle cY6HDOEiINs_0 skateboard cY_INarfLQ4_0 person cZA_Yoq3vy8_0 person cZB5MQY5kVA_0 skateboard cZDoXwn5lv8_1 person cZPvtKaqRxc_0 person cZU2LAWtwUM_0 knife cZZT6OJ6xGk_0 horse cZZT6OJ6xGk_1 horse cZe888DWA8M_0 person cZgt8s4mARc_1 person cZugy4cYVng_0 cat cZz6eOuSV9Y_0 person cZ155yARalk_0 person cZ155yARalk_1 person cZ7siEIFHlI_0 cow caAnHYU-Gwk_0 horse caGQ2b4L930_0 person caGzwv3HLKU_0 skateboard caLKu0yKW0Y_0 dog cacCjMLNpIg_2 bird carYHHE3y3A_3 knife cavT34ZvciI_0 elephant ca4_gKs6MN0_0 bear ca8aNafTzeY_0 person ca_weHSJH80_1 train cbRztq6KZn0_0 horse cbVll1hxlDA_1 person cbVll1hxlDA_0 person cbvbRxOMJ-A_0 truck cb6YFX4CVqc_2 airplane ccIWh5JBil8_2 bear ccIWh5JBil8_0 bear ccQ7JnYrTL8_0 bird ccQ7JnYrTL8_1 bird ccRdzj5Zi-U_0 person ccR-h9z3bRI_1 knife ccR-h9z3bRI_2 knife ccVJXErLdOo_0 dog ccWTUq_mvsU_0 elephant ccWTUq_mvsU_1 elephant ccaCWXJ0jKY_0 person ccaYdn2p4Uk_6 knife ccaYdn2p4Uk_10 knife ccfTQmE0zsA_0 person ccfTQmE0zsA_1 person ccwFXG9D98w_0 person cc0S9924O-s_0 skateboard cc76qcSHNMM_1 dog cc76qcSHNMM_0 dog cdBO6xYUmzE_0 person cdBO6xYUmzE_1 person cdKEh34fsYk_0 person cdNWg2zU6bY_0 person cdOQ7lTQJBw_1 cow cdOQ7lTQJBw_2 cow cdSG1fcxNAA_0 person cdS-7_Egk88_0 person cdW8PgwFm6o_0 motorcycle cdZqtqh5PwE_1 person cdZqtqh5PwE_0 person cdZ1ODMJYKM_0 bird cdbmvoa89QU_3 train cdbmvoa89QU_4 train cdbmvoa89QU_5 train cdf-C-P2bW0_0 elephant cdkSgKIMQEM_0 truck cdkSgKIMQEM_1 truck cdoGDD6m8Og_3 person cdpYTik8eL4_0 person cdruQqCvfrI_0 truck cdxkCeoDX6Y_1 person cd80Ii4FB1Q_0 bird ceH46gqMWak_0 person ceIoRNo5FBk_0 person ceIoRNo5FBk_1 person ceLI06w8-Yo_0 person ceVkcz1wysc_2 dog Dt5UnNOUlZA_0 motorcycle DuMGrFowOWE_0 airplane DuUmKpZym5U_4 boat DuV6ahfZ_yw_5 knife DupWsV-iiys_0 knife Dur1W4FemFs_0 person Du7sKt25RiA_1 knife Du8hVxuK10c_1 airplane Du8hVxuK10c_2 airplane Du8hVxuK10c_3 airplane Du8hVxuK10c_4 airplane Du9r_1zpPkA_0 person DvEWbWxGJvQ_0 bus DvEykMsNibg_2 bicycle DvIS9FV5pag_0 person DvIS9FV5pag_1 person DvKLYYQzmas_0 person DvNTMqUwwWo_0 person DvR9Ctfk8lg_0 person DvWCGbG9LT4_0 car DvWDBQ9eMNQ_0 elephant DvWDBQ9eMNQ_2 elephant DvuQOS7UVI0_2 elephant Dv1e0Y8A8yg_0 cow Dv4azGPr4YI_0 truck Dv7eGdF004Y_1 person Dv7eGdF004Y_0 person DwJntGNV4Gw_0 person DwWzbtiIs7k_0 skateboard DwhCZK1eUPw_0 person Dwi-kq9Gcsw_0 zebra Dwi-kq9Gcsw_1 zebra DwlOBOv0IC8_1 bicycle DwlOBOv0IC8_0 bicycle DwvclcpHQNY_0 horse DwzuhLu_Jew_0 bicycle Dw2QHLXWmos_0 truck Dw7BXQFtH60_0 person Dw8lXatl4wE_2 person Dw8lXatl4wE_0 person Dw8lXatl4wE_1 person DxAMNpw-4qg_0 person DxB962sZJ_c_0 airplane DxB962sZJ_c_1 airplane DxB962sZJ_c_2 airplane DxFjGsjegtk_0 person DxHhkA1fVdA_0 person DxPOOsSCJpc_0 cat DxU9ZTI7KzY_0 bird DxXEapsjhOg_0 cow DxYW3ZMCXUw_0 person DxegJbsalCo_0 person DxegJbsalCo_1 person Dxl8-fknJjM_0 bird Dxl8-fknJjM_1 bird DxmdjAoDhkE_4 knife DxpMePWSgjs_0 person DxsdKCCUvCY_0 person Dxw3Y-UB0jk_0 airplane Dx0fgXYBRV0_0 knife Dx4a9ZiekrQ_0 elephant Dx4a9ZiekrQ_1 elephant Dx5VMmCltKo_0 person Dx8eIjF--eo_0 person Dx8eIjF--eo_2 person Dx8eIjF--eo_1 person DyFNZgEaw24_1 bird DyZHVNsbZeE_0 person DyceiTbkpMw_0 bicycle Dyd1Aj3RO3I_0 cat DyfyfDI4jqk_0 person DytAOZD9DLU_1 person Dy1-ch56AMc_0 boat Dy5kD11Wnbk_0 person Dy5kD11Wnbk_1 person DzAi_cumPY4_0 person DzCPCgkI8XA_0 motorcycle DzCPCgkI8XA_1 motorcycle DzFhvnd07Ck_0 train DzKdERTAA8U_0 cat DzMXxF7XRaI_0 person DzW2oC31Gcs_1 person DzXDPH8p-6Y_0 motorcycle DziXgWdCrvY_3 horse DzkCtRPiI-Q_0 cat DzlPtZXxtpU_6 elephant DzlPtZXxtpU_4 elephant DzlfBATujA8_1 horse Dzp0BrJSMBU_0 person Dz0d79BMerc_0 motorcycle Dz34hVhjpzA_0 person Dz7kWPDxgbg_1 bicycle Dz73CrM7pH8_0 person Dz8_y0iOjLM_0 skateboard D0DtV2eD7cs_0 knife D0HGjOZ5XWU_1 elephant D0O-T4E2DVo_0 cat D0R59ANL6o4_0 person D0TQLmGtPm4_0 airplane D0TTR7qCVXQ_0 person D0WAC7ByU0M_0 person D0Yx5cLcrqk_0 skateboard D0mf15dFGhk_0 person D0pcdPd6hwY_0 dog D0qo2f2Cshk_0 person D0xc1K3BQnQ_1 bicycle D0zhUpZhZi4_1 airplane D04tMZ7n3YM_0 skateboard D09x5ezi5hU_0 elephant D0-sW80X3kI_1 elephant D1Ct81qiyT4_0 truck D1Ct81qiyT4_1 truck D1DYQay-d_E_0 cat D1IQfkEa2-8_0 truck D1KUzeiWmUE_1 cow D1XPuPzMvv4_1 bus D1cTj9Fy4yE_0 dog D1dWoFMnKhc_0 person D1f92BE9HmI_0 person D1ktXwG0_jM_0 person D1plKiNFzvI_0 cat D1tZzoBOWfA_0 person D1yVIEgFGrY_1 airplane D10WSuM8eqU_0 person D19A7AUqZJ0_0 person D2CXHzxp1TU_0 cow D2Iqqb3RP6c_0 person D2Iqqb3RP6c_4 person D2Iqqb3RP6c_2 person D2Iqqb3RP6c_3 person D2KcVzav3YU_2 airplane D2KoBI6R7W8_0 train D2Qw63hsi1E_3 bear D2RT-qUSw_U_0 dog D2RZP8Y6VT8_0 dog D2Ri5Wy9XPQ_0 person D2RkdlTKlsE_0 person D2VABHjSM6E_0 bus D2VABHjSM6E_2 bus D2co1ZGkwCs_0 skateboard D2rbERtPxNM_0 person D2t36StaDcc_0 elephant D2t36StaDcc_1 elephant D2wSgbAelUc_0 cat D2yQaYJDNvs_2 bicycle D2yQaYJDNvs_0 bicycle D24GJS9nKC0_0 person D3EIh6pBTdQ_0 train D3F3xWCoWD8_0 person D3IDGSQSrFY_3 giraffe D3IDGSQSrFY_4 elephant D3IDGSQSrFY_5 elephant D3IDGSQSrFY_7 elephant D3IDGSQSrFY_8 elephant D3OvvA5jYlM_2 bird D3OxudXglSM_1 cow D3XqhAXefSA_0 person D3Zg90Ib5GI_0 cat D3b-w5J-wR0_0 person D3tuGaFbdbE_0 person D36Pwfuad5E_0 horse D4CWBceBJEk_0 person D4OMvYw25w0_0 bus D4aL-0UevEY_0 person D4do8kCWydY_0 person D4do8kCWydY_1 person D4goZXgzVC8_0 person D4oLradsvXE_0 person D4qq5Olmh24_0 person D410FuTGoPI_0 bicycle D4_2g_M4CXM_1 person D5GNIcodIw0_0 bird D5KLVLNs7-0_0 train D5KWKhPhqWE_0 dog D5OtHFsiXiI_0 person D5UGpkiG-CQ_0 person D5hYrAC2iIg_0 person D5jUPc4nQO0_0 person D5kSwHOWPBU_1 bird D5kSwHOWPBU_0 bird D5n4B-O8y8g_0 person D5tLtHWe0Jk_0 person D5uTmoMYXDE_0 cow D5x402SaAk8_0 truck D537kaRoYEk_0 person D552mK5tfLU_0 dog D59Eb3u0iPs_2 person D59Eb3u0iPs_0 person D6EDJA1bO3s_0 zebra D6G1X8WFAA8_0 person D6LDq6Q1Aic_0 person D6NzaXWZGEA_1 person D6UsriFwkjQ_0 person D6XIhwBoaik_0 person D6XUUDKA1CA_0 person D6d20KAVyzk_0 person D6f2wdAt_Ug_0 person D6kIRV5rEPk_0 person D6qXaD6WnVQ_0 bicycle D6zUwxeZ1zU_0 person D7c2tRlXz5k_0 skateboard D7dAkMkQf4I_5 elephant D7kHPyS4Gw0_0 person D7r_HLTwhWY_0 person D71B5jrYOig_2 elephant D77yNiFrtmw_0 person D78FDAi2log_0 skateboard D7_S2hp6aKI_1 airplane D7_S2hp6aKI_0 airplane D7_tUVFGy2o_0 person D7_zjfakeYM_0 dog D7_zjfakeYM_3 dog D7_zjfakeYM_4 dog D8GQWYiVK1U_0 dog ceczRgI6HDM_0 boat cev1umQFsVA_2 person cev1umQFsVA_1 person ce8j1r_CDH8_0 dog cfD9yGF5XmY_0 car cfFAjaziwn4_0 person cfWqngaDvvg_0 person cfWqngaDvvg_1 person cfex3QJFkTY_0 dog cfex3QJFkTY_1 dog cfpiw6KGB70_0 dog cfyY4mfwN7A_0 airplane cf0a6xp7r9s_0 bus cf3VOLwZdKY_0 dog cf6daxmvx6M_1 person cf6kCO9JdOM_1 person cf6kCO9JdOM_0 person cgAiH_9c5DU_1 bird cgD7Gr2Y-c8_0 person cgQ_34JYUkU_0 car cgT26vQK-4A_0 person cgZo7nUeCNE_0 bus cgjjdvXBsFI_0 person cgj_bzL4vsQ_0 skateboard cgmkRlhxVQ8_0 person cgmkRlhxVQ8_2 person cgmkRlhxVQ8_1 person cgxIrs3ySiA_0 skateboard cgyRQ1a79c0_0 train cgyRQ1a79c0_1 train cgzHPxfb-R4_2 person cg4GIYiUNiI_0 person cg9Y2DTUiDQ_0 cow chc30sNO6KA_0 person chl-Wa4_hic_0 person chrXgx4NWck_0 person chrXgx4NWck_1 person chwYzLEqKp4_0 person chyVy1kdL5M_0 person ch_yUR9RHIM_0 dog ciEhviIYSFY_0 bicycle ciFKNPdVskg_0 airplane ciUZ2LoiaCs_0 person ciZNBF9RdaA_1 knife ciZNBF9RdaA_0 knife ciZNBF9RdaA_2 knife ciZNBF9RdaA_4 knife cifpYBLq6dM_0 person cit4hdvCIp0_0 motorcycle ci83tdO3GuM_0 horse cjAhjjWOj24_1 cat cjL-hMHdmN8_0 person cjdImYwFXEI_0 person cjlPNeNKoSo_0 car cjmps6UKu_Y_0 person cjtjQu1YoTc_0 person cjuRQJf1_qs_0 horse cjvMLM_Uzbw_0 person cjye6t7P2XY_0 person ckIaNsLDst8_0 person ckJHbJCefVc_0 bear ckY7Izfnggc_0 person ckfgZsmJEbs_1 elephant ckyL1lkCzU8_0 person ckzaUAcrtY4_0 person ck6hJJVJfvQ_1 person ck6hJJVJfvQ_2 person ck6hJJVJfvQ_0 person clCQhmV8nf8_0 person clL4lyl6J7I_0 person clO2SRgOzAk_0 person clQ98CON1pE_0 person clUGOwaYaPg_0 cat claqhrkmhPg_0 person clmsmTFOSLo_1 dog cl410aCQA8k_0 train cl6C5KiOEHQ_0 train cmAN1SqRkDM_0 person cmGz-63gi5Q_0 train cmHjbUBM4q8_0 elephant cmKnHqPGlTw_0 person cmV1BLuEvpU_0 cow cmeGuaSUg34_1 car cmqxX05lPiI_0 person cmtruoCpSec_0 person cmwRk4-z_BQ_0 person cmwzhxa6Kd8_0 boat cm7Xd_WXZAs_0 person cnAC9g_fYUY_0 train cnAC9g_fYUY_6 train cnAC9g_fYUY_1 train cnAC9g_fYUY_3 train cnAC9g_fYUY_7 train cnAC9g_fYUY_8 train cnAC9g_fYUY_9 train cnJKH5dTKyI_0 skateboard cne8MAKWcjo_2 person cne8MAKWcjo_1 person cnoIwn3cQ7Q_0 bird cnplEeb8Iuk_0 motorcycle cnp30cLXzq8_0 skateboard cnrSdMSCW6w_0 truck cnrSdMSCW6w_1 truck cnrSdMSCW6w_3 truck cnryAbqs0sM_0 horse cnryAbqs0sM_2 horse cnt7MyeNlHA_0 person cnvzLGyGalU_0 cow coBLne1vSV0_0 person coDrWV3qbQE_1 car coIhjdND3yY_0 person coVT-MPjIsc_1 cat cobC6BjJahk_0 person codE_-LtIRY_0 boat cofwfK4F5ac_0 person cohdkT2S_oA_0 skateboard coh6clK_Q6A_0 person comEv_WJ4Uc_0 person cousEghehEo_1 person cousEghehEo_0 person co17Vvf3bag_0 knife co17rRdOvwc_1 motorcycle co5rBTsE2i0_0 knife co7SR4bgOM4_0 knife co9DJtEU4eg_0 person cpEYJnyJ9XM_0 train cpLmgivniko_3 knife cpLmgivniko_2 knife cpO5pHTOelo_0 cow cpQ9HawKR-Q_0 airplane cpQ9HawKR-Q_1 airplane cpUTjBksgdA_0 person cpmMEngbDHE_3 person cpmMEngbDHE_0 person cpmMEngbDHE_1 person cpnZFfnjGYs_0 car cpre_wIt0hs_0 train cpre_wIt0hs_1 train cptcOzotQ0E_0 person cpuYK9y7zu8_1 boat cpxkLEREnwo_0 cow cp4ttild7EA_0 train cqEdqz5F7tg_0 cat cqOLpxxqIBw_1 person cqOLpxxqIBw_2 person cqOclzkqkVg_0 person cqO2VRSBGGg_0 bus cqRNPM3jgNs_0 cow cqS_ZvZF4Kk_0 person cqS_ZvZF4Kk_1 person cqez5FuSf44_0 person cqf4Vh7Vy9M_0 person cqkZZqtr3z8_0 person cqkZZqtr3z8_1 person cq3TwUTSBFA_0 horse cq84vJoKj0A_0 person crXlnYSuCuw_0 person crgSyPjbLBw_0 person crh-ncEjMd8_0 umbrella criMO4N0K5E_0 person crmw_2KCRlY_1 horse crmw_2KCRlY_0 horse cruWABLWvD0_0 person crzo7x07GTs_1 elephant cr02TlSWnkI_6 elephant cr5ddm3njdQ_1 bird csGJS_sNJx4_0 person csKSGFZyk04_0 horse csTChnltOdg_0 cow csiWQna-zcg_0 skateboard csl1NFlhS0I_0 person cswk8vZ6th8_0 person cs16RhEpmu4_1 person cs16RhEpmu4_2 person cs3PfcpDro8_0 cow cs_yLDexfXk_0 person ctAtCH6V1Dw_1 person ctAtCH6V1Dw_0 person ctCQsTBheHg_1 person ctJATSvGLTo_0 elephant ctJATSvGLTo_4 elephant ctJATSvGLTo_1 elephant ctJATSvGLTo_2 elephant ctK8CQu6Nvg_2 boat ctLUri8cnqU_0 bear ctNE8tj4Z18_0 truck ctOTsI_RZps_1 person ctOTsI_RZps_0 person ctPfu5shFA0_0 person ctPfu5shFA0_1 person ctRpeLVhC50_0 bicycle ctWUEkluOFo_0 truck ctWrHmTAoxw_4 dog ct24BXc-tWg_0 person ct8_KhvMuHo_0 motorcycle ct_TbfWVBQc_0 person ct_TbfWVBQc_1 person ct_TbfWVBQc_2 person ct_TbfWVBQc_3 person ct_vznHYblc_0 airplane cuHFcWEuUNo_0 skateboard cuQ5swAtzfk_0 person cuRuiFR7bNY_0 person cuU3htRHPgM_0 person cuWjLEIrs8k_5 bus D8btdwmdRNU_0 knife D8sBFUu104g_1 knife D8urBZQXl6o_0 person D8wVRKGVcLw_0 dog D804JptI7_4_0 motorcycle D8-J5NgmOQg_0 person D9J-SuKzTU4_0 bicycle D9RlyV_QhoQ_0 bear D9WsxKDzM80_1 horse D9WsxKDzM80_3 horse D9WsxKDzM80_5 horse D9XDsr6tkug_0 dog D9XDsr6tkug_1 dog D9XwHuLUv_E_0 car D9ixoNe1mQ8_0 person D94_XdBnfjQ_0 horse D97nupvam-4_0 person D97wkVsbfJk_0 person D97wkVsbfJk_1 person D98TSSeEEXc_0 person D9-PVz9eRtA_0 person D9-PVz9eRtA_1 person D-DNyYPMTvE_0 car D-EA0oKq0qI_0 cat D-UToJ9lT9w_0 person D-YgpB48Efg_0 person D-YtknfK7cQ_0 person D-a0sdpLGlI_0 umbrella D-gTVzHdFAE_0 bus D-gxEOUdm98_0 person D-jl7sUktcE_1 person D-pfJT6Nyfo_0 person D-pfJT6Nyfo_1 person D-u2wEUntuI_0 person D--GMbo7meg_0 person D_FozyNGP_g_0 person D_OvU_wvmsg_0 skateboard D_QDxlwnenM_0 bird D_TbGwH_U4I_0 person D_XHitiDPXI_0 person D_XwOiOHuZU_1 person D_XwOiOHuZU_0 person D_g7kf5F2CE_0 motorcycle D_kMPno6xDw_1 person D_r43ev6HHs_0 airplane D_uO4kxnCwM_0 train D_vXQa4wYoY_0 person D_vxl0ffX4U_6 bicycle D__WGD95lSY_0 cat EABbbYMrVPo_0 person EABxiYRLhro_1 knife EANBKNPscIU_1 dog EANBKNPscIU_0 dog EATgn3uQFCc_0 truck EAecqVilQ60_0 airplane EAh-eJriiEM_0 cat EAlTNLBenas_0 elephant EAmeB0UClfE_0 person EAoS9E3JQM0_0 knife EApLpwcDY04_0 cow EApLpwcDY04_1 cow EAvGskBbSsI_0 person EAvUn45orps_0 person EAvhz7EUrHs_1 person EAvhz7EUrHs_0 person EA2Zq7j78Zw_2 horse EA33eNV3TsM_0 bus EA4Pppxm9q8_2 airplane EA9IwJGPZFo_0 person EBBWzGDSfhQ_0 train EBCEcy1RAZU_0 bear EBDSyGzaeVM_0 person EBDSyGzaeVM_1 person EBGwUwk8_KI_0 motorcycle EBL5WSEhHwQ_0 cow EBTH0ShVz5s_1 horse EBYJEkaJizQ_0 truck EBmABlnU3Ns_0 person EBpvJEz7GAs_0 cow EBqxBh52uek_1 person EBrNePUYA80_0 cat EB0XdJ6nl5Q_1 bear EB5sThk9G-k_0 person EB7yZ9myXmo_2 horse EB7yZ9myXmo_1 horse EB-GUW188Kc_0 person ECDxDS-R1ZU_0 train ECEv0inW5Cs_1 dog ECKwTK9kBHk_0 cat ECLYb63wsdY_0 person ECT7_2qKJJw_0 person ECUpMJzxafs_0 person ECXdLGCGSRU_1 person ECdvMn526ho_0 skateboard EChWuqD2kxc_0 person ECofUr-jIIU_0 person ECpmJNOAfZU_0 person ECuo32_WqfU_0 person EC0Q7uMrJh0_0 cow EC1pupdSC2Y_0 person EC-RADUn0SA_0 skateboard EC-RADUn0SA_1 skateboard EDBYWaa97hs_0 person EDUY2xl1Jkw_0 cat EDYGYkJTUAw_0 person EDZ9Cu6WUAU_1 horse EDcpyGbwAVs_1 train EDcpyGbwAVs_2 train EDqFOrLwfpE_0 elephant EDqFOrLwfpE_1 elephant EDrX2_SzLF8_0 elephant EDtN3eOjUXg_1 motorcycle EDvdnYUw9b0_0 person EDxj4RwQr7k_0 truck EDxj4RwQr7k_1 truck EDxj4RwQr7k_2 truck EDxj4RwQr7k_3 truck EDxj4RwQr7k_5 truck ED-QWlNA_QI_1 person ED-QWlNA_QI_0 person EEFgTj2V6IY_0 person EEMkBuPFopc_0 person EENkey7gvFA_0 cat EENyo-VOtiA_0 person EEQVWkmTS6A_0 person EEQVWkmTS6A_1 person EEfWTq58rX0_0 motorcycle EEfWTq58rX0_1 motorcycle EEiUwF9ID5k_1 elephant EEiUwF9ID5k_0 elephant EEiUwF9ID5k_2 elephant EEiUwF9ID5k_5 elephant EEnpnVNwpgk_0 person EEnpnVNwpgk_1 person EEn1JwzcH7Y_0 person EEtv5FqPqG0_0 motorcycle EEx5nPfhJdI_0 bear EE5owiH92Io_0 bird EFHnwo5U2Bc_0 bird EFHnwo5U2Bc_1 bird EFTcDwwNw_M_0 person EFd6XVMNdEk_0 umbrella EFpWVH06Tf4_3 motorcycle EFpWVH06Tf4_1 motorcycle EFpWVH06Tf4_2 motorcycle EFryCLs5aWc_0 person EFwar_GkK6Q_0 cow EF0hPkNXnoA_0 skateboard EF1htFUPo80_1 bus EF23dhLqzKk_1 person EF23dhLqzKk_0 person EF4KGrH7s08_1 train EF4KGrH7s08_2 train EF4KGrH7s08_0 train EF8PHVKHaq8_0 person EF9VafNyS20_0 person EGCQIKdLkIU_1 train EGHYSrxI1Ek_1 person EGIhtnFv2f4_0 person EGI5Yk7IU8s_0 boat EGOtOZyUpk4_0 train EGOtOZyUpk4_1 train EGOtOZyUpk4_2 train EGZ7-ChFJQI_2 knife EGd19Lwe3vM_0 person EGgvoXoby8c_0 person EGgvoXoby8c_1 person EGiEfcahLzY_0 person EGsRldGZ4Bc_4 truck EGsRldGZ4Bc_5 truck EGsRldGZ4Bc_0 truck EGsRldGZ4Bc_1 truck EGsRldGZ4Bc_2 truck EGvzZJ10zwQ_0 train EG7cF7KMqs8_0 motorcycle EG-A5-_1i-o_0 car EHD613XdEQc_0 person EHMEQV26qfk_0 boat EHUgk_5vbps_0 horse EHafuO8IcpI_3 bird EHcP0uDfEyE_0 umbrella EHft6kH6siE_0 person EHft6kH6siE_1 skateboard EHtU4jYmFWw_0 elephant EHvP9Bwmq7M_2 person EHvP9Bwmq7M_0 person EHvP9Bwmq7M_1 person EHv9RwkIPXM_0 skateboard EIIC6lIbxO4_0 cat EIRbrmP8N9U_1 elephant EISmAs76j_g_0 train EIUHtk1IdtA_0 cow EIcGpS1nsXk_6 elephant EIcGpS1nsXk_4 elephant EIdaSifBFgk_0 person EIdaSifBFgk_1 person EIe7fhZxKpQ_0 person EIe7fhZxKpQ_1 person EInkqD_T5Os_0 train EIwa8hvMQ9g_2 bicycle EIwa8hvMQ9g_0 bicycle EI8OMIBxEOo_0 person EI-G2_K6zus_0 person EJE1AAlhjcQ_0 person EJE2EqHSaLA_0 airplane EJJefx2O7lo_0 person EJJ0aK1Mefo_1 bird EJMke8tdD9c_0 person EJMp6Gszq8M_0 person EJM15lQ1nds_0 bus EJM15lQ1nds_1 bus EJNv-W_Wh3s_0 airplane EJNv-W_Wh3s_1 airplane EJOO-gnqZOQ_0 person EJQZBc87T7Q_0 person EJTbpxYS19w_0 person EJdJUArfCgA_0 person EJdJUArfCgA_1 bicycle EJ2XL046J4A_0 person EJ3IJ7_jx0s_0 knife EJ3IJ7_jx0s_1 knife EKDm7Y7dQ-g_1 bird EKETFVqhfZI_0 person EKOgJfGpWw8_0 horse EKPKBwGLkg0_0 person EKR2BQWkMTI_1 person EKf-TzUsoG8_0 person EKsbh9eVG0w_1 airplane EKv1nvgLQLc_0 motorcycle EK2VY_FFN04_0 person EK56Obpu5ME_0 elephant EK56Obpu5ME_4 elephant EK5-ZuOavbM_0 train EK5-ZuOavbM_1 train EK5-ZuOavbM_2 train EK7wRGel2vk_0 person cuXky9bc80o_1 elephant cuXky9bc80o_3 elephant cuXky9bc80o_0 elephant cuYker921kg_0 person cuZPt_f2GfE_0 person cusvncJOcwQ_0 horse cu0Z8d-ioZA_0 airplane cu_YsyYcbL0_0 cat cvBKWYZidIs_0 person cvFAAQuXQR8_0 person cvJuXsDfcUY_0 train cvUktXqTBBA_0 car cvUktXqTBBA_1 car cveuhB6Z_D8_1 bicycle cveuhB6Z_D8_6 bicycle cvfI6ccn-J4_0 person cvgZ-1Uaigk_0 person cviAzkIEA00_0 skateboard cvlOlYpovm8_0 person cvyLalOdUEY_0 person cvyTQ9oFD8s_0 elephant cv9PMwKXLoA_0 person cwBgT8f3504_0 person cwB99KCLazI_3 person cwEuIwecOZA_0 car cwHQZi15U3s_1 bear cwHQZi15U3s_2 bear cwKndGwjXho_0 person cwKndGwjXho_1 person cwPtR7LsWag_1 person cwPtR7LsWag_0 person cwTq-wB6R3U_0 skateboard cwe2t4eoAs0_0 person cwf1OksNfQ0_1 horse cwjK5oxoq5Y_1 person cwjK5oxoq5Y_2 person cwmY9UYaukc_0 person cwnltT3Eelo_2 bicycle cwp0G17bk0I_0 truck cwp8Oe0F6y0_0 truck cwsLz_ppMx8_0 truck cwsx0Rs732s_0 person cwyDOlWxH00_0 bus cwzHLMKmpWM_0 horse cw054hU6MdM_0 person cw4vlk-0siU_1 boat cw45Y0beNG4_0 bus cw55i8mKHnE_0 person cw55i8mKHnE_2 person cw57dOs_v5A_0 bear cxAcLoLkk2g_0 person cxJp5-r_mjQ_0 person cxLrrWl89wo_0 person cxMcoeT1INo_0 person cxQENdEkIVQ_0 skateboard cxSj2n8O4Vk_0 person cxUXpTWO4iY_0 train cxbTIQtmtLs_0 person cxiI7jApblc_0 boat cxkH0GxPEqU_0 motorcycle cxm8wGi_pl4_0 person cxsitsK8l9w_0 horse cxsitsK8l9w_1 horse cx0cCIp1KeU_0 person cx0cCIp1KeU_1 person cx0tj_0g0-k_0 person cx2bUajKTrw_0 person cx4EC6uXkkY_3 boat cyBgPXda4lw_0 person cydwQgvjXlk_1 person cyd0m3k4Iv8_0 cat cynwjNSXfDs_0 elephant cyz45rMhH9E_0 person cy4xwLUwDN4_0 person cy4xwLUwDN4_1 horse cy4xwLUwDN4_3 horse cy5IjIQ0UNQ_0 motorcycle cy58Sr7mA_Q_0 knife cy6woAEQ0aU_0 knife cy8BRHRLKa4_0 train cy9CeQwHsws_0 bird cy9kq-lD2Q8_0 skateboard czD_BiifXv4_0 knife czLen_XZrRo_0 horse czUjYoRVVYw_1 horse czec9DaQ1sQ_0 person cze3sm-N48s_0 person czjU6Q4s1jc_1 person cznO_APZ6xQ_0 bear czpxbOFiY_Q_1 person czpxbOFiY_Q_0 person cztHS4laeBQ_1 bicycle czto2OaEIww_0 person cz0dXFpjC6o_6 bear cz0dXFpjC6o_4 bear cz0dXFpjC6o_5 bear cz5kAZB6n0k_3 bear cz6eGvs1xNE_1 motorcycle cz8sE1Vn4Gw_0 person cz83QPHVLnk_0 umbrella c0GrJULqad0_0 person c0GstZDjoNM_0 person c0IYOMYovRo_1 person c0J3zJ8n3SI_0 horse c0LibLcues0_0 bear c0MEfCeuV5U_0 bird c0MdSWVdmqY_0 bus c0PyfX2HFqE_0 person c0TJZWOz78g_1 dog c0XKBQNwSlg_0 truck c0aHKGTYgeo_0 person c0bZsiE4obs_0 horse c0jq_aReY5M_0 motorcycle c0kH2qIyG7E_6 horse c0kH2qIyG7E_4 horse c0lBfqi79wQ_0 cow c0lDR6ABjTE_2 person c0nRMc9KiiQ_0 dog c0nXpd7yJsk_0 person c0o_nv0BL6Y_1 bear c0pzN4lVApI_1 person c0qkbu5wLF8_0 elephant c0qkbu5wLF8_2 elephant c0wve_629pA_0 person c0yrclVs1YA_0 cat c02KdAN0Hwg_0 bird c04Vd9VQao8_0 person c04ixznYflE_1 giraffe c07Yqknz4KI_0 train c07k0EtqcVs_1 car c08cFHAOc7I_0 train c0_M6VhGXOo_0 person c0_M6VhGXOo_1 person c1JGF-ltiJ8_1 bicycle c1JGF-ltiJ8_2 bicycle c1PUETYl8Lk_0 airplane c1QAgByBiYE_0 person c1WZ6dEz6kw_0 airplane c1XMeGkSwJQ_0 person c1XfiRiOTb0_0 horse c1a_E7CZsVk_0 person c1djg96PnM0_1 person c1djg96PnM0_0 person c1hBqL_LWE0_3 bird c1j8TlZsEmQ_0 boat c1laLoj4fM8_0 person c10eOkpL080_0 person c10eOkpL080_1 person c2B7cQwr4Pk_0 person c2EIdJJnku0_0 motorcycle c2E5_n_bZKc_0 train c2Kh-3yj9Ak_0 person c2MTwEvPGGk_0 person c2MUYY-qPhA_1 bus c2MqPrUNXQ4_0 train c2UDI136z20_0 elephant c2UDI136z20_4 elephant c2UDI136z20_5 elephant c2UDI136z20_7 elephant c2YlmT-aFE4_0 cat c2a9uwUCFK8_0 cow c2dk3AjUcYs_0 person c2gJYqYcsZg_0 person c2luSxdPZ6A_0 person c2m_PmRSEmw_0 elephant c2qJhOvlIUU_0 airplane c2xTBZttrzA_0 person c22HGSTHjBA_2 knife c22HGSTHjBA_1 knife c22yvcXZcM0_0 bird c2_qHguvZbI_2 bear c2_qHguvZbI_0 bear c3E9z6F-Txk_0 train c3J2U0kR6Hg_0 person c3TisGCbmoU_1 person c3Ur6j05SgQ_1 bicycle c3YFgnDBuXw_0 person c3bCGnwqGxc_0 car c3eo0_ftrn4_0 cow c3pP__Uybq8_0 person c3wt1MUbgD4_0 person c3wt1MUbgD4_1 person c37EOoRHd7E_2 truck c4A01X82TfI_0 train c4FmSUmvYbo_0 person c4FmSUmvYbo_1 person c4Hh2XdTBGY_0 cow c4ICOFVvcTs_0 person c4e-qA4esVY_1 person c4iCXPdqm6c_0 elephant c4jbOCZyGsQ_0 person c4k8Yk1x3H8_1 person c4k8Yk1x3H8_0 person c4xRJS9_5Fk_0 train c4xRJS9_5Fk_1 train c40Mwg88VJI_0 person c43ihGsR1eA_1 person c5AKIs1XUhc_1 bicycle c5AKIs1XUhc_2 bicycle c5AKIs1XUhc_3 bicycle c5BYdZTaBgc_0 person c5CmxgLHcxA_0 bus c5Fw-Fi4daE_0 cow c5GANV8PlSM_0 person c5GIQcIJ9Tc_0 truck c5GOwfkZXFk_0 person c5GOwfkZXFk_1 person c5Q2ZeMDx3o_0 train c5TlkWtFymE_3 dog c5WT0W8SfGg_0 cow c5WT0W8SfGg_5 cow c5WT0W8SfGg_1 cow c5WT0W8SfGg_2 cow c5WT0W8SfGg_3 cow c5WT0W8SfGg_4 cow c5cooFy7-SM_1 elephant c5hEygqOXOU_0 person c5oiA5xy15M_0 person c56nid2YSes_6 bird c56nid2YSes_0 bird c56nid2YSes_1 bird c56nid2YSes_2 bird c56nid2YSes_5 bird c56nid2YSes_8 bird c56nid2YSes_9 bird c5_dNG2vWXg_0 car c6EIognIYWs_0 bird c6ZQRNXfcZA_1 person c6a4xySAJ0o_0 truck c6niMRNXDeo_0 person c6qKbpvd-iw_0 person c6rbqnU4LXs_2 motorcycle c6rbqnU4LXs_0 motorcycle c6s839WnVhE_0 truck c6yBOD3Wo5A_0 person c7B-3x-3V34_0 person c7ILC5wYs8A_0 person c7KoGv5Ha7k_0 person c7PMPnuPjp8_0 person c7RFexe2Ba4_1 bicycle c7RFexe2Ba4_3 bicycle c7RFexe2Ba4_0 bicycle c7RFexe2Ba4_2 bicycle c7SMRurbkY4_0 bus c7bKlPVR5pI_0 boat c7hVbIhp0Wc_0 person c7jWXqWoMz0_4 bicycle c7s8weR8lEY_0 person c7v4ZFCK-A4_0 person c70kaPblMLU_0 cow c74hYNtpwdA_0 dog c75cllxWxZE_0 person c7_op6G05l0_0 airplane c8B4ZVLv364_0 person c8Cl-5olqWk_0 motorcycle c8Gaja-xUeQ_1 person c8I3JAxoLTs_0 bicycle c8I3JAxoLTs_1 bicycle c8I3JAxoLTs_3 bicycle c8LHqWmKrJU_1 airplane c8LHqWmKrJU_2 airplane c8Mo16hH7qs_0 person c8UrmdREAO8_0 person c8Y7MJRWFqE_0 cat c8Y8y9BsPHw_0 cow c8b9qqF9Xvw_0 person c8b9qqF9Xvw_1 person c8ezNTNUXqc_0 cat c8wbvQnndJc_1 bicycle c8wdGQw1jB4_1 bus c8wdGQw1jB4_2 bus c8y3bmW0X9s_1 cow c8zphqgYcJM_0 person c80SYyKXCCw_0 person c8_fHVnrzZ8_2 elephant c9EDbgCRGP0_0 person c9GKsfyRkmE_0 person c9IdrMV-Y_Y_0 person c9Q9LPaqyug_0 umbrella c9SbfXgAoO8_1 airplane c9Somjq2gLs_0 umbrella c9WDXLFtYLU_0 bus c9XaEHVxu4M_0 person c9Y9a6KVWRE_0 bird c9Y9a6KVWRE_1 bird c9ZWCwVv6Q0_0 person c9dPiEkCwR4_0 motorcycle c9gCDztKasg_0 elephant c9pYz2lTh3I_1 person c90ldeMSfL0_0 cat c94gzpjmj24_0 person c9_87BKOW1I_0 cow c-CCw_cyicE_0 cow c-G0LV4kyY0_0 car c-T9ITcEW9c_0 person c-T9ITcEW9c_1 person c-ZnwBvVFGE_0 person c-gH6T1q-sk_0 person c-pKAy_3arM_0 person c-uOjPSq-10_0 cow c-vwn6zqogs_0 person c-vwn6zqogs_1 person c-4uPwFKBdY_0 person c-_iMD-ihnE_0 motorcycle c-_94CuEo_M_1 person c_SQI7NirwY_0 person c_THUYYi_-k_0 airplane c_YojhaB5pI_0 motorcycle c_jNM33kJuA_0 person c_rUQgBtHY4_0 person c_rUQgBtHY4_2 person c_rUQgBtHY4_1 person c_wkIYzEEDk_0 dog c_6OcDyZ93k_0 bus c_9GO2BbPz4_0 horse dAQu2GQSyrY_0 cat dAS6SqC7TCw_1 elephant dAVIZQJ5Af4_0 person dAqurx13i7I_0 knife dAynVVxxb_o_0 person dA7mx3mrJeA_0 train dA_ZtitJeMA_0 person dBDSqZ8rirA_0 person dBGKqrEvsIE_0 boat dBGKqrEvsIE_4 boat dBKexOUQSQA_0 cow dBKexOUQSQA_1 cow dBKexOUQSQA_2 cow dBKexOUQSQA_4 cow dBKexOUQSQA_5 cow dBKexOUQSQA_6 cow dBOrrvJDv54_1 skateboard dBPu5iVlw1Y_2 horse dBSryinfjiI_0 person dBS9maEElcw_0 person dBUpfcdFDUQ_0 bicycle dBWeUQd06l4_0 person dBWeUQd06l4_1 person dBiGneGqmh0_0 cow dBk2FwZgrtk_0 cow dBq77lvujCk_0 bird dBuvGegR_vA_0 person dByVvpTlwL4_1 knife dB29dsCcN9s_0 train dB43vSgLY2M_0 person dCG24UL_NeM_0 person dCSF80Y6lso_0 person dCSF80Y6lso_1 person dCZ9suBocXk_0 person dCgz-7OgwMQ_1 person dCl8hSleXYQ_0 cow dCoi3rXWgbM_0 person dCqdvmS1jts_0 person dCqdvmS1jts_1 person dC9rTC3kzsI_0 cow dDADJZV4i74_0 horse dDA5p5TJ03g_0 person dDB84W_zVOI_0 skateboard dDB84W_zVOI_1 skateboard dDE3p8Gs878_0 elephant dDGiQLFJtPs_0 bicycle dDIbBZtEJ2w_0 knife dDLgQQ2XRc8_5 horse dDLgQQ2XRc8_3 horse dDLgQQ2XRc8_6 horse dDO-RlSt3Gw_0 person dDQ58wciink_0 cow dDZYTPEd9KE_1 airplane dDacKPH4sOw_0 car dDacKPH4sOw_1 car dDcBtNpmCeU_0 person dDgcHWpKMeo_0 person dDkaPLEvAwM_0 horse dDkaPLEvAwM_1 horse dDkaPLEvAwM_2 horse dDqe9sBGR24_0 bird dDx0MqaKT2w_0 person dDx0MqaKT2w_2 motorcycle dD-AlVwxf-g_1 cow dD_Ew85jXzk_1 train dD_PbxvCBcA_1 person dECTTSpEUKg_0 person dEW9ZwvMsDE_0 cat dEc5fHlEXCo_0 truck dEuzpQL0tNo_7 elephant dEuzpQL0tNo_1 elephant dEuzpQL0tNo_2 elephant dE7OwbOHsu8_0 person dE7WsfeVkI8_0 person dE7X93gdVPQ_0 cat dFCUyBTrvNM_0 horse dFCu7E6aYM4_0 person dFCu7E6aYM4_1 person dFEo5YKHAcA_2 skateboard dFEo5YKHAcA_0 skateboard dFMPz16FOzE_0 motorcycle dFZSSPvMBqE_0 zebra dFZSSPvMBqE_1 zebra dFa7TcQRCUU_1 bird dFbZxetmjCQ_0 skateboard dFkNDweVNFU_0 cat dFpJq9s5fec_1 bicycle dFpJq9s5fec_2 bicycle dFsDjjWW00Q_0 knife dFth5-8MEhM_0 person dF7OkxFt3I8_0 person dF_aGgW1jcM_0 person dGE7t6KgXHc_0 person dGFrWX61Zk0_0 person dGS01inQU1U_2 person dGS01inQU1U_0 person dGS01inQU1U_1 person dGZBUkIXMpo_0 person dGZ_pzDrl70_0 person dGdh_BHleU4_0 boat dGh51ZQ9QAg_0 bird dGk8D_De-2E_0 person dGk8D_De-2E_1 person dGpbPaorWys_1 bear dGq1bpRxbiA_0 person dGyR5TWO-p4_1 person dG0CtnphYzg_0 person dG5mjfvTY7c_0 boat dG7DSOtetMY_0 knife dG9J5UpxeyY_0 person dG9J5UpxeyY_1 person dHCgtjlT_Lg_4 horse dHCpH8dTwfw_0 horse dHF9NIqrx6Q_0 car dHGIXivupi4_0 person dHGIXivupi4_1 person dHGIXivupi4_2 person dHJkOetpjQw_0 bus dHO6vTrB66w_0 person dHO6vTrB66w_1 person dHVDjpivOKw_1 person dHVDjpivOKw_0 person dHVgQCO07SU_1 person dHVgQCO07SU_2 person dHfs5GT-YpY_0 cow dHg1Xorklm0_0 person dHimuOjriUc_0 cow dHnk6ulSNSo_0 person dHnsZs2Riqk_0 person dHnsZs2Riqk_1 person dHsD3F8dTpc_0 bird dHvlIrb2Q-k_0 person dHwR5d4xGEk_0 knife dHwR5d4xGEk_1 knife dHwR5d4xGEk_2 knife dHwR5d4xGEk_3 knife dHwR5d4xGEk_4 knife dHxmY1bGbNc_4 bird dH89qyunr6s_0 person dH94i4xFlZU_1 elephant dH94i4xFlZU_6 elephant dH94i4xFlZU_0 elephant dH94i4xFlZU_5 elephant dH94i4xFlZU_7 elephant dICl73jYZ3M_0 person dICrafh45_I_3 airplane dIDxqrhmBE4_0 truck dIDxqrhmBE4_2 truck dIEZ2kfTzzY_0 boat dIJk0w4SnH8_0 bird dIVtaleUNWI_0 person dIVtaleUNWI_1 person dIX81Ov0fUY_0 person dIZM-9d8bSQ_0 person dIZM-9d8bSQ_1 person dIm0Sv_iE2E_0 motorcycle dIqYGVVgYsU_0 person dIzMmAGaF6U_1 skateboard dI93uXfSaRM_0 bird dJB-DXpgq2U_1 bird dJKAhixNM9Y_1 truck dJYNs94fv_0_0 person dJgqX3uy6z4_0 person dJg4R9cpbjI_0 person dJisrPH71tE_0 person dJi_dOrUZnw_0 person dJjrFTy9H3c_0 person dJkzzYh6BkY_1 cat dJnRg-1zO1g_3 knife dJqGj0FeC9I_0 cat dJvoaqZjIDw_0 person dJ2B9A0mYl0_1 dog dJ2kWscI-tc_1 dog dJ4PR9zme-s_0 person dJ6S9bSEYDc_0 cow dJ8J7WOLZtk_0 skateboard ELDxjZXMtCg_0 person ELLTxQ47f90_1 person ELLTxQ47f90_0 person ELNgTt9Jswc_0 train ELOZutiZKMM_0 person ELOZutiZKMM_1 person ELPpy9ABb3s_1 elephant ELTeW4X2mGY_1 cow ELbg8i93W8I_0 person ELbjX2Ya0_o_0 dog ELmktutrkDk_0 person ELqA6fb0un8_0 person EL8H94Lycf8_0 person EMAVfcO6JFE_0 person EMKcTJp7ehY_0 person EMOpCv3vVfE_1 skateboard EMP7p3FNxZU_0 person EMU8vGL7ZFQ_0 person EMb28oLn66k_0 airplane EMgh3pwtnXg_0 person EMiRla730lM_1 person EMiRla730lM_0 person EMmg9OKgyBE_1 boat EMmmZ6ADzfI_0 skateboard EMngQ4YMTv0_0 motorcycle EMorunu9Ik8_0 truck EMqd3lVNUxg_7 bus EMuGAIADn3s_0 person EMwcDTRPPMw_0 airplane EMyQWQ_Yobc_0 dog EM0yGxKJWqY_0 elephant EM1R3HXt7DY_0 person EM1z9o601v4_0 knife EM3tBaIyR0o_0 motorcycle EM5e1snhsCs_0 person EM-k8ZAva6k_0 person EM-zjCQyGAc_0 dog ENAr6j6fcWU_0 bird ENCHiWUV4dk_0 person ENI-JuSPNQA_0 motorcycle ENSEWig-4ZM_0 knife ENXXFcrrxGM_0 car ENc0uxXKsaI_0 person ENkqstdLKl4_0 person ENk4JRIbEaE_1 person ENnPjtPjU6c_0 person ENtoAci6OwQ_0 cow ENvdCzm4whM_0 truck ENvdCzm4whM_1 truck ENvdCzm4whM_2 truck EN0Klsi-AKY_0 bicycle EN4IIJjhBeI_0 zebra EN-QCSvtEd0_3 elephant EN-4SsZnn-k_0 person EOEXVXG1TDk_0 person EOVNlasJhIo_1 person EOdHjLYopi0_1 bird EOedzXaVI4U_2 bird EOe3CfOT53g_0 person EOmVKXeoKBc_1 airplane EOq-3ZRn0SQ_0 skateboard EOt6j5ecODw_0 train EO7NccQDQyM_0 cat EO8Dpvy4oXs_0 zebra EO8mQrkIZuY_0 person EO_DwtyWh0s_3 person EO_DwtyWh0s_0 person EO_DwtyWh0s_1 person EO_DwtyWh0s_2 person EPOXqdKNjKg_2 giraffe EPU630RSI5c_2 person EPU630RSI5c_0 person EPWmdYKJaXk_0 bird EPycDWf2vY4_0 skateboard EP_ezteElzk_0 person EQBFPIdI8gY_0 person EQC8eEghvs8_0 person EQNSjjkyRBg_0 person EQNSjjkyRBg_1 person EQTee9qqTZs_0 person EQVCizuJQFY_0 umbrella EQdEm5HuPG4_5 train EQx1XHc0mRM_1 motorcycle EQzXCoQRbas_1 train EQ5rBLoiT78_0 bus EQ9-lbsee1s_0 person ERCvzMzkDhg_0 skateboard ERGwo6vIXdQ_0 person ERJR-zQYyH4_0 person ERR-qjVJ3lY_0 person ERVp_cX1juc_0 person ERev6rrd5XA_3 motorcycle ERyyYMb2fFk_0 cow ERzh41uuxUE_3 bicycle ER0IdSeymeI_0 person ER0IdSeymeI_1 person ER03PLUBt4c_0 train ER03PLUBt4c_1 train ER03PLUBt4c_2 train ER03PLUBt4c_3 train ER53sUYwz1I_0 zebra ER6vMbAyQ6E_1 skateboard ER6vMbAyQ6E_0 skateboard ESDQMC_70Pk_0 bear ESInVf3ioiA_1 dog ESMdbpGXk4I_0 person EST4CUX19Eg_0 person ESokfN84OYk_0 elephant ESokfN84OYk_3 elephant ESokfN84OYk_4 elephant ESpwZsbwQGA_1 elephant ESpylyha7g0_0 horse ESt5TEXuGIM_0 person ESt5TEXuGIM_1 person ESwsyjITYGM_0 skateboard ETBia7K3ZHw_0 motorcycle ETBia7K3ZHw_2 motorcycle ETQTZgnfRK4_1 person ETQi93bP3YQ_8 elephant ETQi93bP3YQ_2 elephant ETTgj1pxvME_2 person ETWI4nXFANg_0 person ETcmjY7Jigo_1 motorcycle ETgN7EcVVQI_1 person ETmYIq5CF2k_0 motorcycle ET4xC8Wl_CA_0 person ET4yAsJTvlk_0 cow EUH3oSBX950_0 person EUH3oSBX950_1 person EULIYiiV-O0_0 person EULIYiiV-O0_1 person EULchAlLDfM_0 train EURUU5P5flo_0 person EUcHraiUCjA_0 bicycle EUcWvzarnb0_0 umbrella EUdNEi4myuA_0 person EUtfoblvHn0_0 person EUuCDfb8lf4_2 person EUuCDfb8lf4_1 person EU93Mw9WGkc_0 skateboard EVBHY1qGVos_0 person EVBHY1qGVos_3 horse EVElggpPSCM_0 elephant EVE2SBJ-2S8_0 person EVH8Ql7_pYE_0 person EVTW6Ka7-NU_0 person EViJ_JQcv5c_0 train EVmGPGaP6bY_0 person EVnnSfmb4go_0 giraffe EVn52FBjG9E_0 person EVn52FBjG9E_1 person EVxEEc26TWg_1 giraffe EWLiwu56oQc_1 person EWNd02yWiYw_0 person EWP0Hhxsf58_0 person EWQo_1YXfYM_1 person EWQo_1YXfYM_0 person EWTvjjpAUm0_0 airplane EWXyQ1tS3jI_0 elephant EWdNgXvr54s_0 dog EWfPRTjQO9k_0 dog EWgsivaLhl0_6 elephant EWgsivaLhl0_1 elephant EWgsivaLhl0_2 elephant EWi25l2D0cw_0 cat EWkndzLXvLc_0 bicycle EWuOSRFWTzg_1 elephant EW0Mgele6Gc_0 person EW0Mgele6Gc_1 person EW6FHYagN0Y_0 person EW98OEvTxM8_0 person EW-Zuo7ArI4_0 dog EXDDO7gLoL4_1 person EXDDO7gLoL4_2 person EXDDO7gLoL4_3 person EXDDO7gLoL4_4 person EXGwKMtyR1M_0 person EXHZgqkcXG8_1 cow EXJITC62tU4_0 umbrella EXSMz4HnWfg_0 dog EXaiYiUQrMI_1 dog EXfiGeKWKTk_7 airplane EXfiGeKWKTk_1 airplane EXiGyq1TD80_0 person EXiGyq1TD80_1 person EXkbZbo1n5U_2 elephant EXkbZbo1n5U_0 elephant EX817S50E5U_0 person EX-dqihLUwY_0 motorcycle EX-dqihLUwY_2 motorcycle EYCaJR9md8k_0 airplane EYEWPdaJuL0_4 bird EYEWPdaJuL0_5 bird EYEwLM8YTwc_0 person EYFMOBeF9UE_0 knife EYHtNGztiRQ_1 car EYKrEDelAdU_1 bear EYM1oXAmBq0_1 bus EYRf00qGMVU_0 train EYV6D6G6t2c_1 person EYZsYCSedGw_0 person EYd9lSK7Bbk_0 person EYhtY59whvs_0 person EYmWVBDEutA_0 horse EYnEMtlMaPY_0 person EYoj8D64YLA_0 skateboard EYuLodJTgYs_0 train EY2pZ9A48ng_0 truck EY2pZ9A48ng_1 truck EY2pZ9A48ng_3 truck EY25PJWD2j4_0 person EY36YeIgOYI_0 person EY36YeIgOYI_1 person EZWcsRlXIA8_0 person EZbOH9yEe-A_0 dog EZh1lf4yfCg_0 person EZ5Wa2duCoM_0 person EZ5Wa2duCoM_1 person EZ7d9ab31ys_0 giraffe EZ9-_7o9Vds_0 bird EZ9-_7o9Vds_1 bird EZ_xC5EBwvk_0 bus EaBdeSUjDYs_0 dog EaFSd7_S8kc_0 horse EaQ1P4QyRsY_0 person dKEVBoMMD2w_0 boat dKJz_EakSc4_0 person dKMb2S2SSfI_0 skateboard dKTgMjbnNPQ_0 skateboard dKiwficH2d4_0 person dKi4xI4vB-k_0 umbrella dKlCFQxk5Dc_3 person dKlCFQxk5Dc_5 person dKlCFQxk5Dc_0 person dKlCFQxk5Dc_1 person dKlCFQxk5Dc_2 person dKq4S1IVjlA_0 person dLFWcgSewxs_0 truck dLH8fBNk89Y_0 cat dLIld9ux7p4_0 airplane dLT61O_htwI_0 cat dLUCKkji5wo_0 person dLUCKkji5wo_1 person dLV2VJkpyMI_0 airplane dLbhzrFtNC0_0 person dLhVV7DMXkw_0 person dLoxdmLuphk_0 dog dLq5OW1xY54_0 elephant dLq5OW1xY54_3 elephant dLq5OW1xY54_2 elephant dLtQB9P_7BU_2 bear dLty27VgJcc_0 train dLvr7BjgsHg_0 person dLwXzYr8beg_0 car dL3dSZMnBko_0 person dL3vGWsRVCg_0 knife dMDGwTdSHIo_0 motorcycle dMJQi7oYiqQ_1 person dMS5hB4uWdk_0 bird dMWgiVqknaE_2 person dMWgiVqknaE_0 person dMZONdbNFbk_4 bicycle dMZONdbNFbk_2 bicycle dMdUZi9lxrU_0 cat dMiwR-DS6UE_0 car dMsIDwHkWNE_0 person dMulBz-N8oA_0 horse dM7lOj89YZE_0 person dM7-xh2kSmc_0 person dM7-xh2kSmc_1 person dM9u0c0qSV0_0 cow dNCm5MtFcp0_0 person dNEAY77it7o_0 person dNShS9OdIoA_1 person dNShS9OdIoA_0 person dNSlL572gMU_0 truck dNSlL572gMU_1 truck dNVvIPWEH1Q_0 person dNVvIPWEH1Q_1 person dNdTs9Qa1A0_0 truck dNeF_3qppZQ_0 skateboard dNj_77jiPcs_1 cow dNknNwahiv4_0 giraffe dNoz32bgN0U_0 car dNpQfDg_dIg_0 person dNqdMh44imM_0 train dNs2JO9SgGo_1 airplane dNs2JO9SgGo_2 airplane dNyMDstraS0_0 person dN1cn1CPEa8_0 person dODPVlzMR1A_0 person dOHuuTREVQk_0 person dOHuuTREVQk_1 person dOHuuTREVQk_2 person dOHuuTREVQk_3 person dOMW6BLHI2s_0 elephant dOMW6BLHI2s_1 elephant dOOQ32tmk14_0 elephant dORLSKDLr1w_0 cat dOUVBpTWHzc_0 person dOVzO5pkY2o_0 horse dOWhuaTBmr8_0 truck dOdX5nkOBoQ_1 person dOdYYCqd6i0_0 person dOdYYCqd6i0_1 person dOd-8kfbjz4_0 train dOd-8kfbjz4_1 train dOfNevz8wlc_0 bus dO2CbXVpSl0_0 elephant dPA7g60qlnk_1 boat dPJk57_DSuI_0 truck dPJ7_mdmjJo_4 truck dPJ7_mdmjJo_1 truck dPTnDrK0jl0_0 knife dPZPjPwJCnA_0 person dPiOaLH0K4Y_0 bear dPiOaLH0K4Y_2 bear dPma_hb-MR8_0 skateboard dPnxUa8yPbw_0 train dPpwBkl-F9k_3 bicycle dPpwBkl-F9k_0 bicycle dPp0no_eYOQ_0 dog dPqheqisvs8_0 person dPvgWsIPDr0_0 horse dP0jXsi0KUw_0 skateboard dP_-3SJLP1Y_0 person dQB4GI0Bgus_0 truck dQCFCRTz2rc_1 giraffe dQCFCRTz2rc_4 giraffe dQCFCRTz2rc_5 giraffe dQCFCRTz2rc_0 giraffe dQIQv4YkBaM_0 truck dQI-ReUS1hk_0 person dQM_-V4jSpM_0 cat dQNG1syFdKQ_0 person dQPdAoRj8vw_0 dog dQWw3losmfA_1 bicycle dQY2wbSJyOQ_0 person dQh9dmaqW3s_0 person dQh9dmaqW3s_1 person dQlybGW3tbw_1 cat dQnNTlCD_AQ_0 elephant dQnNTlCD_AQ_1 elephant dQoX3OkaI4M_0 person dQzWZhDVLYk_1 person dQ4hJadqL_w_0 person dQ62PlC9Frc_0 zebra dRBb5v_Fv3g_0 elephant dRDdBvl4olg_0 person dRHTO6H764g_0 person dRHYGXImEBk_2 person dRHYGXImEBk_0 person dRInM_HaQZs_0 bus dRVEs1099F8_0 horse dRcLZtR6KFs_0 person dRcrvTR9xIY_0 person dRiBVua-2Ck_0 person dRjzvcGshbA_1 person dRjzvcGshbA_0 person dRs8FcKuu6w_0 boat dRt8H1uQ5Og_0 umbrella dRt8H1uQ5Og_1 umbrella dR7jBT3cxr8_0 person dR8kCc9XNJs_0 boat dR-8FlykNZ0_0 person dSAODa472ys_0 bird dSAYK4yUlDs_4 person dSAYK4yUlDs_0 person dSAYK4yUlDs_1 person dSAYK4yUlDs_2 person dSAYK4yUlDs_3 person dSEv_R8nQik_0 zebra dSFMrnh2szI_0 cat dSLakvIEH9o_0 bear dSLmBYdUku8_0 person dSQTVC-RyAU_0 person dSWhe4RgQ_w_0 cat dSZBg-Vcr7E_0 motorcycle dSojBtCOkqQ_0 person dSx4IloBWZs_0 person dSzAX5l_fs0_0 person dSzAX5l_fs0_1 person dS0mBDDgP_A_0 person dS0mBDDgP_A_1 person dS8x0l5I7f0_0 boat dTDxzi0o_Qg_1 airplane dTMe2Vse97w_0 cat dTVBSXs5Me8_0 person dTVKs9m3eZU_0 cat dTm_DRCtjCo_0 elephant dTm_DRCtjCo_1 elephant dTrt1C_90H0_0 knife dTurjz-gJek_0 person dT6A3DwqZb0_0 boat dT8wudfW9gg_1 horse dT-INB6puFM_0 skateboard dT-INB6puFM_1 skateboard dUAtLBDfmBo_0 airplane dUAtLBDfmBo_2 airplane dUC_SF_mN_E_3 horse dUC_SF_mN_E_1 horse dUInMUIPtTs_0 person dUJH8d3CMU8_0 bear dUMLWt99A7o_0 person dUP4OTLrOA0_0 person dUW_G_--wI8_0 train dUXFUWivXPA_0 horse dUXFUWivXPA_1 horse dUbP54CBYd0_0 airplane dUm9A-1AoMU_0 person dUqrowFcbD0_0 person dUx_UfS9cQI_1 dog dUx_UfS9cQI_0 dog dU-bQRDInro_2 bird dU-bQRDInro_4 bird dVAMoKYgrwE_0 person dVKQhCF8o8w_0 person dVTHVxh6Tro_1 knife dVWAD4gOu-8_1 person dVd7OzbhOq0_0 person dViVbA7N_AE_0 airplane dVqPo7-p71Y_0 person dVtqTTZTFDQ_0 person dWCqnck4Um0_0 person dWFVX1psRZI_0 bird dWGkW13rQBY_3 horse dWGkW13rQBY_5 horse dWGkW13rQBY_8 horse dWVJFIzIKEc_2 bicycle dWVJFIzIKEc_0 bicycle dWVJFIzIKEc_1 bicycle dWXSWEaCId8_1 person dWdOl13DwwY_0 airplane dWdl9RdXrHo_0 person dWdl9RdXrHo_2 person dWd0sszZOXc_0 person dWesodD0ff4_0 airplane dWgfwKBrSiE_0 person dWgpYitSv0c_0 person dWkrnxWB1CU_0 person dWlDN9Hozgg_0 dog dWtqRwEurDU_0 person dW1oE_LHALo_0 elephant dW4DX7lQoGg_0 elephant dW5aU0U7K28_0 person dW53l1sR_zM_0 person dXEH9QiCyHk_0 train dXEH9QiCyHk_1 train dXKi3ZHjgWM_1 umbrella dXLyWGJxHnI_0 person dXOsaszlVY0_0 horse dXSuppGXFeI_0 elephant dXSuppGXFeI_1 elephant dXdFEix8vu4_0 train dXjUZeuzgaw_0 train dXkmG8AR82Q_2 airplane dXkmG8AR82Q_5 airplane dX6W4-sxsX0_0 cat dX9J6yDM5Q8_0 person dX-4XwYWv48_0 person dYGOSaGjHQU_0 person dYQMrQe1pSk_0 person dYRIEDyD9Qs_0 airplane dYRKwU2TJYI_0 elephant dYVcalOS1SE_0 dog EacR2o35-kc_0 bicycle EaeD7utPpTQ_0 person EakGzU5UgWI_0 person EakGzU5UgWI_1 person EakGzU5UgWI_3 person EamZ8De_WFE_6 elephant EamZ8De_WFE_0 elephant EamZ8De_WFE_2 elephant EamZ8De_WFE_3 elephant EamZ8De_WFE_4 elephant EavqjWy5gag_0 person Eaxszmfn7WA_1 person Eaxszmfn7WA_0 person Eay0MFBCdqY_1 horse EazzsVK1-pM_2 umbrella EbJV0e75xtk_1 person EbJV0e75xtk_0 person EbWt1hAb3LQ_0 person EbXzlcsBsfA_0 person EbYJAv5c_G8_0 person EblX3oKGsBA_0 skateboard Eb1n2o0YpOM_0 cow Eb3sGSIWtCw_0 person Eb7juFDG3Dw_0 car EcMh5TIKmzY_0 person EcNpsheyrIU_0 person EcNpsheyrIU_1 person EcWrNFz5J-o_1 dog EcpsBV2FEBE_3 horse EcsiLHpIvL4_0 person Ecu8VEIC2y8_2 elephant Ecu8VEIC2y8_1 elephant EcvYBldDm_U_0 person EdE8zCwJ56g_0 person EdE8zCwJ56g_1 person EdIfx7lQxEw_1 dog EdIfx7lQxEw_0 dog EdOvSD40Tb0_0 cow EdTkeITBkvY_0 person EdTkeITBkvY_1 person EdaY0DFamDc_1 skateboard EdfKMOIOHtI_0 person Eds-fi9s-O4_0 person Ed486SKW0kM_0 train Ed-ENhlS7Dg_1 boat EeCjxMzh5_A_0 person EeDhzR9I-Tc_0 motorcycle EeLllq2Zim4_0 dog EeMUemitsFU_0 person EeRqVkQ1Z7Q_0 car EeRqVkQ1Z7Q_1 car EeTRT4j5GcQ_0 person EeYRHJuK3wo_0 boat EeYqy9QZZTU_0 airplane Eeb2vPJsaN0_0 person Eee6rmiMYKY_1 car Eesk8VSxpIU_0 cat EetKMgVh0Pk_0 person EexaBL5jDL4_0 knife EexaBL5jDL4_3 knife Eeyjjk9-BvY_0 horse Ee7CW7lZfXA_1 person Ee7CW7lZfXA_0 person EfE6r-Iq5CM_0 person EfG_eBrAjdI_0 motorcycle EfHCZUHt0d8_0 person EfMCesQKyoE_3 airplane EfNSTkpl6dQ_0 person EfSMsLkasg8_1 person EfjC0VVD2Do_0 person EfvRGCuPoF4_0 person Ef1Tm3dKzbY_0 motorcycle Ef2GKdopP_A_0 person Ef7-yzJqZto_0 person Ef9YiYODEbg_0 cat Ef9q8mAPYZA_0 person Ef_N7JmICUU_10 bicycle Ef_5u21WLbs_0 cat EgDOCraAd64_2 train EgHVReOnDpM_0 person EgPKMlxhH0A_0 person EgPxUnCFS10_3 knife EgYCBIlDm98_0 horse Egf4iNTfanU_0 airplane Egf4iNTfanU_2 airplane EghxGvj6pTs_0 person Egl_1FgGUyE_2 bird EgpSSMkQOEE_0 bicycle EgxlP5S15uQ_1 motorcycle Eg6YUwqAQqM_0 person Eg7bJ46L4Cg_0 airplane Eg7bJ46L4Cg_1 airplane Eg7bJ46L4Cg_2 airplane Eg82FN1vC3A_0 knife Eg9-5uBMrpc_0 cat Eg-cp7jgFA0_0 person EhF73HJvEWo_1 train EhKAs4Z1JE0_0 person EhSaOGOPUns_0 skateboard EhbaW6F3U6I_1 person EhbuzBK5bes_3 giraffe EhbuzBK5bes_2 giraffe EhcmJOG2Jws_0 person EhfmC9Wa8xs_0 person Eho09eptX7c_0 person EhpwK0_8UJA_0 boat Ehpz_gcdCcY_0 knife Ehpz_gcdCcY_1 knife Ehpz_gcdCcY_2 knife Eh6FARrS1VY_0 skateboard Eh7f9wgtUns_0 bus Eh88_JdkWWs_0 person Eh-x-OzZxGo_0 person EiE9eIJ-Rv4_0 car EiLWN5T6wko_1 person EiNTdTOmvDU_0 person EiUbGE2f6fU_0 train EiUbGE2f6fU_1 train EiZG3M9_EMc_0 bird EiaYgqLcbqM_2 elephant EibdBvTND-I_0 person EibdBvTND-I_1 person Eine_0RExlI_0 person Ei1XBJFaUeI_0 person Ei1XBJFaUeI_1 person Ei6ZitRjwdA_0 person Ei7n3944Ovs_0 umbrella Ei9d8OX0ui0_1 airplane Ei9d8OX0ui0_0 airplane Ei9724H_wUs_1 person Ei9724H_wUs_0 person EjcMZ8Y0Oeg_0 boat EjgxtJaNIH8_0 skateboard Ej2wn6JRBzA_0 skateboard Ej7xV32Trwc_0 person Ej8UwQiT5jk_1 knife Ej8UwQiT5jk_3 knife Ej_zFc5qxRw_0 cat EkMGStKSilE_0 person EkMdmPclE3k_1 dog EkTrskvsL5c_1 horse EkWd3wPBEyg_0 airplane EkawSvsvh3g_0 person EkdP_pWa9s0_1 airplane Eke0rATHhX4_0 person Ekh_cm7q1y8_0 cow EklOuZWH-8Q_0 motorcycle EkyydrsMSkY_0 person Ek1DlGGsUdY_0 umbrella Ek4323MkRYo_0 bicycle ElJtz3uv-AQ_0 person ElLiin7Cda4_1 person ElLiin7Cda4_0 person ElNzy4USrLA_0 truck ElR4MuOUYKM_0 bird ElgmQr70py4_5 train Elrxptn-Zqo_0 person EluRnlB_s6c_0 train EluRnlB_s6c_3 train ElwZ1M6McHo_0 skateboard El2nzuCxrGk_1 horse El5fRl-4vko_0 knife El9Efl32L8w_0 person EmDjVcaznIA_0 zebra EmDjVcaznIA_1 zebra EmDjVcaznIA_2 zebra EmJeLKaG_hE_2 bird EmJk7hDSzaM_0 person EmJk7hDSzaM_1 person EmJk7hDSzaM_3 cow EmWzmxDjjOs_0 person EmkwHglcEKA_1 motorcycle EmlvoH2AxWs_0 person EmqEntvqLw0_0 airplane EmsMjm0VXJc_0 skateboard Em44RLa7Qp4_0 person Em_UT-f7q0E_1 train EnJkvPAMuaM_0 train EnJkvPAMuaM_3 train EnJkvPAMuaM_1 train EnL2FiVIuJg_0 elephant EnL2FiVIuJg_1 elephant EnS1Yte0Xzw_5 knife EnS1Yte0Xzw_2 knife EnUW7YSmli0_0 horse EnVtYzkXwjM_0 person EnbXP2xywwk_0 person EnmwKpZJTQc_0 person EnoNrjMNAC0_0 person EnrcDrbyUxY_0 person EnrcDrbyUxY_1 person EoaeqRc88HU_0 person EoallCLchmo_0 cow EodtHMtH9zw_0 person EojPQY8nQ2Y_0 train EouV6Ut4NP8_1 person EouV6Ut4NP8_0 person EouZIHzCFq8_0 airplane dYVtJPfJmf4_0 person dYgPc190feM_0 person dYgxCdKNrIo_1 airplane dYjCbeBAgYs_0 person dYmREF5dDkw_0 dog dYosdOz5mZo_0 person dYr1OKT1lCA_0 person dYyHudM-fQc_0 person dYyHudM-fQc_1 person dYzh49Wr9bQ_0 airplane dY9dlzr4w0Y_0 person dZFiRqMkFPc_0 person dZHJc_1os9Q_1 person dZHJc_1os9Q_0 person dZHJc_1os9Q_2 person dZMQgxFHQPA_0 train dZQ2o-4a5tU_0 person dZSQXDQcafc_0 knife dZUOCWwr2xs_0 knife dZaFo3C_1ts_0 person dZdvK41DxLI_3 car dZio0uN6DHY_0 horse dZio0uN6DHY_1 horse dZjnkqYO2lE_0 truck dZmG64W2CtM_2 umbrella dZmG64W2CtM_0 umbrella dZsXB4o-wdE_0 airplane dZzfVDrmMj0_0 bird dZzfVDrmMj0_1 bird dZ1vVETiQAQ_0 person dZ6ub2CEvbg_1 bicycle dZ6ub2CEvbg_2 bicycle dZ6ub2CEvbg_3 bicycle daBl0Q92zLE_4 bear daBl0Q92zLE_0 bear daIJjuHo2EQ_0 cow daMcE2oorrE_1 person daWo89I2Tuo_0 skateboard daWo89I2Tuo_1 skateboard daWywQD6R4g_8 elephant daWywQD6R4g_0 elephant daWywQD6R4g_2 elephant daWywQD6R4g_4 elephant daWywQD6R4g_5 elephant daWywQD6R4g_6 elephant daXEykL8UQ0_0 horse daZHZXfmY7k_0 cat daaHTdFcx5o_0 boat daaX2TXbYmo_2 airplane dadAGYt0vS0_1 horse dalHUNR5yAA_1 person dan-4YoB-Vw_0 person daoysu5sfUQ_0 person dapxBMe8Wz8_1 person daqWFFdK8Ck_0 person dawGJDtHlcs_0 person da4jNzO5wL0_0 person da61HPBGEwo_0 bicycle dbU6Fn_5bHI_0 bus dbXr-9m66-U_0 person dbdhdeVMuL0_0 bird dbhGB6XW3fM_0 horse dbxb42TzQ_g_0 skateboard dbysY1V2TwI_0 person dby-fBGIPRU_1 boat dby-fBGIPRU_4 boat db9i2fI8dv4_0 horse dcADt99ndxg_0 person dcADt99ndxg_1 person dcBMrHLTvPo_0 person dcEW4y5AI1E_1 elephant dcHcm85hd5s_2 bear dcH304rxwLY_0 person dcJN3WJZLOE_0 train dcLR55c41rg_1 horse dcLoVk60Gkg_0 cow dcLoVk60Gkg_1 cow dcLp5mtSkPA_0 cow dcO5id4LTVE_0 person dcO5id4LTVE_1 person dcO5id4LTVE_2 person dcO5id4LTVE_3 person dcO5id4LTVE_4 person dcUA_Wf8vrc_2 skateboard dcXdmOY1YCw_0 car dcXdmOY1YCw_1 car dcblbU5lyQU_0 person dcdXiEQkghM_0 person dcdXiEQkghM_1 person dcf4zn9wOjM_1 person dcj9u89LAu8_0 umbrella dcoFS0-09xc_0 person dcoFS0-09xc_1 person dcwbXzJsVDw_1 car dcxhSnf9sg0_1 horse dc1_WHDpL3w_0 person dc-BpV5fuQM_2 cow ddK4WXTyoWw_0 cow ddPN4QZuLBE_0 train ddPxOsA2Cro_0 person ddPxOsA2Cro_1 person ddW0MYEUWlc_0 person ddaqR7COVYo_0 person dddKAnk7-hQ_0 umbrella ddlPux88liU_0 person ddruq0KhCxM_1 skateboard ddsTE3NwHyM_0 person ddtNIDCxqCk_0 person ddw0wDJgJwM_0 person ddxQR-NB6E4_0 person ddzrzJEogWQ_4 motorcycle ddzrzJEogWQ_6 motorcycle ddzrzJEogWQ_0 motorcycle ddzrzJEogWQ_1 motorcycle ddzrzJEogWQ_2 motorcycle ddzrzJEogWQ_3 motorcycle ddzrzJEogWQ_5 motorcycle dd0CsqY6Fbo_0 airplane dd8a6btF_B4_0 person deDEnw72hQk_0 person deNoMwyFOO4_0 person ded6WOfO9O8_1 person deep6EOo6ds_0 person deihMrgBXEc_0 person delKGPVRJsY_0 person demxgFkqGxA_0 bus deqo50gGTBo_1 airplane dew_lb_L9hE_0 person dezAUC4KbJI_0 person de1f8qTDYUI_0 person de2HZ6DBOuM_0 person de4mcJTPj48_0 person de4mcJTPj48_1 person de7-gbLffxs_0 cow de8KeV2waGY_1 person de8V1ovs5eM_0 person de_fGa7Zxus_0 person dfAvID4lRsE_0 person dfAvID4lRsE_1 person dfDTR9mCUZI_2 dog dfEF6SMFbGM_0 skateboard dfKBB3-VicU_0 bus dfK1HsVc2B0_0 person dfh2lETTLZI_0 skateboard dfp4iVaXCpg_0 skateboard dfqLJxxdinA_0 train dfsTKKT5-UU_0 person dfseA2X5Cow_0 person df_PzyC0gTw_0 cat df_SYY4pb3I_2 cow dgGYa05XpYo_0 skateboard dgIsZXSKACE_0 person dgOQKwvhLpE_1 dog dgTYRveHMjM_0 cat dgYN1OH5oc0_0 zebra dgl2b2bRpq0_0 person dgtaJOOOtKg_0 person dgweyIjmmDY_0 cat dgyGZqXgvag_0 person dg6u7R87Gh4_0 person dhFII58PWhI_0 person dhIL9wRZMm0_1 horse dhIt9lg6Sbw_1 boat dhUG1gnTlso_0 dog dhZ-JmFNyak_0 person dhcVp1GmJyI_0 elephant dhcVp1GmJyI_1 elephant dhgs2glg_N8_1 person dhgs2glg_N8_0 person dhiYTV7DJLY_0 cow dhjeKi58cuU_0 cow dhkFVTvJ6ZU_0 cat dhy85XNJT3c_0 horse dh03d5vq1B0_2 dog dh1XFXciUf4_3 bus dh1XFXciUf4_2 bus dh6zZFXD0_c_0 elephant diDDNe-MVfs_1 elephant diMmgSNBO8k_2 person diRn1fE6zMg_0 person diSTaGHORrc_0 person diSZzd4jM0E_0 person diUCxWmV084_0 person diZ-mRLPpqI_0 person didB6Es7Des_0 person didTjworKXg_2 umbrella dif0t09rdZg_1 cow dioELry6bbk_0 airplane dix7GRytfcw_0 person dix7GRytfcw_1 person di1KJ0Mb5M8_0 dog di2TPYyIeWc_0 dog djIw9AQoU3o_2 person djLPrNtPSY8_0 person djLUJy1sWMg_0 cat djNzrBpqnnY_0 car djSxYfG99k8_0 car djaGBINLXTQ_0 elephant djh9QeYLg7M_0 airplane djiTvgkjTW4_0 train djiTvgkjTW4_3 train djiTvgkjTW4_4 train djiTvgkjTW4_5 train djiTvgkjTW4_7 train djlet5--ZW0_0 person djlet5--ZW0_1 person djpCG2oprrA_1 person djvQyzGNp7o_0 person dj2Qk--KIkk_0 person dj6yGGCBFWc_0 person dj8d91U-F_0_0 person dkQWD9hv4fo_1 train dkQbDCav3eM_1 person dkSetHNXnNY_0 cow dkb-6x7zo5E_0 person dkdCTCL5imo_2 truck dkdCTCL5imo_3 truck dkdCTCL5imo_4 truck dkiOcFZwrA0_0 bear dknj-Sv4HUs_1 person dkpsViIYlsI_0 cow dkw4aWG9l6E_0 bear dkw4aWG9l6E_4 bear dkw4aWG9l6E_5 bear dkxLcr2kvIM_1 horse dk3Nf8K3RzI_0 boat dk4gT0vHgeU_0 person dk6h_GL9OZo_0 person dk7QISqnWZc_0 bird dk7juEuA2is_0 bear dk7juEuA2is_2 bear dlAMvsjssrY_0 person dlDsSVM3JJ8_0 person dlG7MtSpAK4_0 person dlIG99k9Hoo_0 elephant dlIkYaty1Uw_0 car dlNMnGKJJjU_0 cow dlQ1Gr54T74_11 bicycle dlQ1Gr54T74_14 bicycle dlQ1Gr54T74_5 bicycle dlVOuZK_1bY_1 person dlVTSnDsl38_1 knife dlW_HPbVriI_1 truck dlW_HPbVriI_3 truck dlW_HPbVriI_0 truck dlW_HPbVriI_2 truck EpH59JsxI3w_0 car EpIb8r7uBqM_0 person EpJ_M6rB_PA_1 bird EpOaQjhIh_M_0 airplane EpP_TLXxb7Y_0 cow EpSURaF1BfY_0 truck EpT8zxDFPf8_0 cow EpVdzlk5GYU_0 truck Epd3r6iiqVk_0 bicycle EpeIZCFbjw0_0 skateboard EpnttpyYTAo_0 person Epoqtu0Pqe4_0 cow Ep8bd1STWKw_0 motorcycle Ep81Lk66O50_0 person Ep84L7WDoyE_0 person EqBJeYu5f_E_0 elephant EqBJeYu5f_E_3 elephant EqHBjvHkvf0_0 person EqJR5UZAlSg_1 car EqLYPeo9ZC0_0 person EqMqvcHp8Ko_0 car EqMqvcHp8Ko_1 car EqSYKCxmeDA_0 dog EqSYKCxmeDA_1 dog Eqh7XqsYl5M_2 person EqmnFPweBmk_1 boat EquATbp9uL0_0 person EquATbp9uL0_1 person EqvMMBAZP2o_0 person ErUllSQJNgI_4 elephant ErUllSQJNgI_5 elephant ErWUOje4g8Q_0 motorcycle ErX04vJ-JcU_0 cat Erf0FkqYsTE_0 person Ero36xFQKS4_0 truck Ero36xFQKS4_1 truck Er4yJXTWNNo_3 bicycle Er4yJXTWNNo_4 bicycle Er4yJXTWNNo_5 bicycle Er5D0fXZsjk_0 person Er9tboOA5k8_0 person EsEreMKZP7Q_1 person EsQ05q5ZZVM_3 skateboard EsQ05q5ZZVM_5 skateboard EsQ05q5ZZVM_2 skateboard EsYZbF7hCTE_0 person EsZV26-jxX8_0 motorcycle EsbWwOYbT8Q_0 train EskqA8x8mX4_2 airplane EsrUSkNrqWs_1 person Es0O5wtTZ2Q_0 person Es9GOUryI0U_0 person Es9Yq8uZ4fA_0 person Es-W0AxQ5Us_1 cow EtebDuK3fUY_0 person EtlKR9-Q2dk_0 person Etx8YkcrSF8_0 person Et0RRuaW-Rg_1 dog Et1PKq61KAk_0 person EuETmswYRrs_0 cow EuHJB5UXmZg_0 umbrella EuHvelij5ao_0 cat EuIGG3PoslE_0 person EuInxfWuqqA_0 person EuZnOeXR020_0 person Eua2VIbXEMs_0 boat EufXUqphYVw_0 person EumfsHXsVGk_0 person Eunz2V1RXXo_0 train EurWaA7qCDw_0 bear EuwjSGtSYlY_0 person EuwjSGtSYlY_1 person EuzDIk8ag30_0 person EuzVaAXsy4o_0 motorcycle Eu0nzh2HQNk_0 person EvDZK2cFYVE_0 motorcycle EvGoGf-YCA8_0 bicycle EvKPt0vynKY_0 truck EvN8x67_EQ0_0 person EvZF9DagIoQ_4 horse EvZF9DagIoQ_0 horse EvZF9DagIoQ_1 horse EvmcyDEPnoA_1 skateboard EvvbUe6FBSM_0 bird Evvij-hmE4A_0 person EwBKceBTBbo_0 dog EwBwIUrHR3o_0 person EwBwIUrHR3o_1 person EwDyryqt94g_4 airplane EwDyryqt94g_5 airplane EwKIz0qAvKQ_0 person EwSJeylFWsY_0 person EwUGFtWeyMA_0 person EwUeAvO5mrE_0 cow EwU8puKxN8Y_0 person EwU8puKxN8Y_1 person EwWCc9whfDI_0 cow EwYNowdS57c_0 person Ewet2EA1xX8_1 elephant Ewet2EA1xX8_2 elephant Ewet2EA1xX8_0 elephant EwozH_35SDg_0 person Ewq-V9jATzg_0 person Ew8lEc8Ufi8_1 bus ExCPGilpuMM_0 person ExCPGilpuMM_1 person ExCjkt_zXuw_0 person ExCjkt_zXuw_1 person ExJjWM_rAnI_3 airplane ExJjWM_rAnI_1 airplane ExPBVcERfwY_1 person ExPBVcERfwY_3 person ExPBVcERfwY_4 person ExT3xg9phtQ_0 person ExVHmko3jfY_0 horse ExW1ju88BW8_0 cat Exb1TjMi76I_0 boat Exc3W9o5-04_1 horse Exe2EizU9VQ_0 cow Exe2EizU9VQ_1 cow ExfZl3DY8JM_0 person ExfZl3DY8JM_1 person Exl9alp64lE_1 person ExqpcHBGBlw_1 person ExvcP05yrS0_0 person ExvcP05yrS0_1 person ExxZODpPkQQ_1 train Exz2WL2-kR0_0 giraffe Ex4__JMKkqI_0 person EyMzZV5iTEA_0 horse EyP_0uEuXVs_1 bear EybT7tq6XGk_0 person EymmgPoUyuM_0 person EymmgPoUyuM_1 person EymmgPoUyuM_2 person Eyn7IfnWm4o_0 airplane Eyn7IfnWm4o_3 airplane Eyn7IfnWm4o_1 airplane Eyn7IfnWm4o_2 airplane Eyp8nornJW0_0 bear Eyp8nornJW0_1 bear Eyrfi9lGdoo_1 airplane EyuKu6qMB6g_0 person EywYZ3Gjwuc_0 person EywnxH68jDU_0 cow Eyzwbz1ZxmU_0 cat Ey2TgrQ30Z0_1 bicycle Ey2TgrQ30Z0_2 bicycle Ey36TlCS4rQ_0 person Ey4BLGQL2Bg_0 bear Ey7eosaz0zU_0 person Ey7us0SSVAs_0 airplane Ey7us0SSVAs_2 airplane Ey7wIzCkFU4_0 person Ey7wIzCkFU4_1 person EzC0tuKaVGA_0 person EzEX4OUEHHQ_1 skateboard EzGa4SSPsbI_0 bicycle EzYjRjhff20_0 person EzZEWp1cluc_0 person EzeDITt3y5I_0 person EzeDITt3y5I_1 person Ezlyx_EudUQ_0 person Ezlyx_EudUQ_1 person EzuizVcVbSA_0 person Ez6I4TpzC5I_0 person E0K5Ll7wHUw_0 bird E0YZDyUoHTM_0 knife E00cOMpNw3o_0 motorcycle E01EgIBFxRk_0 person E038teDC3EM_0 person E0-Z0KM1UB4_1 person E1AwHXQ00ns_0 person E1MTmF3FAN0_0 bicycle E1NfSTmGCRE_0 knife E1ZhuBRYvKY_0 cow E1bNSKg9iv8_0 horse E1oEO09-bAw_0 dog E1pmsS_ufrs_0 person E1xPwEvYymk_1 person E1xPwEvYymk_2 person E1xPwEvYymk_0 person E1zxNG3Fglo_0 bird E17S76lXHfI_0 person E1_ETAQHwcM_0 person E2O5Y6VAhIc_0 person E2O5Y6VAhIc_1 person E2Pobz5qoAE_0 person E2Pobz5qoAE_1 person E2Vqlq1BQYs_0 airplane E2WWQOKGeb4_0 skateboard E2aiCls-clY_0 person E2lj1iRVceA_0 skateboard E22IW-PgLfU_0 person E28Cad7vBrw_0 person E28Cad7vBrw_1 person E29-bZY3lEo_0 airplane E3NmlH6taDs_0 truck E3SKOBDl6u0_0 person E3enDSeq6P0_0 person E3tmvYSpQSQ_0 person E35M5UWMXeE_0 horse E35M5UWMXeE_2 horse E4Bl9c7JbYs_0 person E4DFW1SxJfY_0 dog E4DFW1SxJfY_2 dog E4TfSUdVt8U_1 truck E4pulnGY9X8_1 person E43SZ65LnfY_0 cow E45LqepDuqg_1 person E5BtXla2lCQ_0 bicycle E5CQkNJct6Q_0 motorcycle E5HB-EDNtE8_0 person dlZZzrMO6yY_0 person dlbAWAuByWk_0 person dlcovhFKigE_0 person dlh5RGS5Bzw_0 bird dlkVXsIhcZg_1 person dlo83yH621I_1 cow dl2g71ftw9A_3 train dl2g71ftw9A_4 train dl2g71ftw9A_5 train dl6ogvuxF78_0 person dl_fuQYhAP8_0 person dmDdRd6wULk_2 dog dmJ1DuWiAdM_0 person dmMz5FhGOCc_1 person dmVAi4WMi3M_0 person dmVAi4WMi3M_1 motorcycle dmVAi4WMi3M_3 motorcycle dmW77KHtuCQ_0 horse dmYSNG-7VCg_0 elephant dmfX7DsSS1k_0 bicycle dmuWxnAfMn4_0 elephant dm4rFNN7FZQ_0 truck dm-lOmiP2d8_0 cow dnAQ7q60f_g_0 elephant dnB0we4_DrY_0 cow dnB6auv8PBk_0 person dnFZkG7_E1w_0 person dnNh07bnI_s_0 cat dnUXo5nstys_0 person dnVV1s-LcAY_0 person dnY-4hOzYts_1 person dncQtuB_6qA_0 motorcycle dncxd1B2sLk_0 giraffe dnwqVE3lPyY_1 train dnwqVE3lPyY_2 train dn_r7u_5apk_0 skateboard doHOuG6wqXY_0 motorcycle doSDuIGLFXY_0 cat doTj5H8Uf1I_0 cow doUwj_z1x5o_0 elephant doX3oiADm_s_1 person domu9ia2Vo8_0 person dorx67yK7WU_0 bird dovn1QHCR7o_0 person dowbL0CZ5do_0 bicycle do1QIWrYeW8_0 person do5o5Dw0vPc_1 elephant do5o5Dw0vPc_4 elephant do7abiC5aZk_1 car do82ENX9cOc_0 person do-LmSJTPj4_0 skateboard dpDG64ULlUg_0 boat dpGCSoTITrw_2 elephant dpJWbIaQYoI_0 person dpJWbIaQYoI_1 person dpQP5r61_GQ_0 person dpUorqkSYZE_0 dog dpYYMgh5TS0_0 truck dpcwUs5srlc_0 horse dpi0u6pfCTM_0 person dpjLyHb9AyI_0 person dpkF3SwOunc_0 dog dpn6vUVXBuM_2 umbrella dptZbHZQYPM_1 dog dptZbHZQYPM_2 dog dpxGzRQqAaU_0 motorcycle dpxGzRQqAaU_1 motorcycle dpxGzRQqAaU_2 motorcycle dpxVPiv62SY_0 person dp2cUWhnP0A_0 knife dp3Q_aTYeJ4_0 person dp_JQh45a50_0 person dp_1VrEUWbU_0 person dqCFYWRf9g8_1 elephant dqDLl7BlAAA_0 skateboard dqFRS9o1CSU_1 person dqOoL5LiXc8_0 boat dqQPbKE4UhQ_0 person dqTlCZzLk6A_1 cow dqWEwvhVNiI_0 person dqavRiIA-38_0 truck dqj-msAUvnc_0 cat dqzc4W6f-x4_1 person drAhAL_F38Y_0 person drAh2lmjDs4_0 skateboard drJGoPHMunk_0 person dreDU-1isrI_0 person dre_PgfS8yw_0 elephant drf5ijiEkUo_0 person drm2oJ3X1HM_0 person drqFwF60pgE_0 airplane drqFwF60pgE_1 airplane drqFwF60pgE_2 airplane drqe2hP0PKI_0 person dr3TumG_tlI_1 cow dr4dU5UDF-Q_0 person dr8s5VC9Fxg_1 person dsLbM2wZHrc_0 dog dsPwJ3J1ZKA_0 person dsTR1vv9XLE_0 person dsTR1vv9XLE_1 person dsUuAVsJSi4_1 motorcycle dstcI7MYsZ0_0 person dsyBSejpe-k_0 person ds1BJMsasQI_0 person ds6FmQYwgYw_0 skateboard dtDGbuCwBuY_0 bicycle dtHgnX0NtxE_0 person dtMbzXL9wO4_2 bear dtOFqz41TJ0_0 bird dtR2UeJbIvg_0 person dtWfbusf4Es_0 horse dtYdUj-d8fA_0 train dtZrB9iDzgQ_0 horse dtlUL4D7_NM_0 bird dtvZaXxNgKQ_0 person dtwUG12h74g_0 person dt8Tngmse50_0 bicycle duOX3z4IJSY_0 person duTvmDpj0sI_3 boat duTvmDpj0sI_2 boat duV82Wn9rXk_0 car duZYUVeDXEM_0 cat duaO7S-EH1A_1 person ducdg4KXQsg_0 person duoFWPZbeNc_0 person dupnmzaPsWA_6 elephant dutp3txJPTY_0 person duvuNqufLjs_0 cow du5hbB5w3UU_0 horse du96VR7vtOk_0 bird dvKKmu56UkE_0 person dvS2DSYGOGg_0 person dvbVbBosw38_1 person dvgf3R9k0uY_0 bird dvur4MZD_yc_0 person dvur4MZD_yc_1 person dv0ptUC-DIE_0 person dv6ymk8duso_0 bird dv_KURooPDU_0 person dwbRsYPV7Ag_0 person dwpopXTeeGc_0 person dwrYJ92znpw_0 cat dwy7k_gtEco_1 boat dw8kejnR7L4_0 person dw-2_KqGeYY_1 bird dxFrLHoW9jI_0 motorcycle dxGlDl4IukI_0 horse dxGlDl4IukI_1 horse dxViI6VXh6Y_0 dog dxn8VDPNvJM_0 person dxq9r-qrJ2A_0 person dxsQn1MuZRA_0 bus dx0z7DYxGSw_0 person dx4rtOOz7tA_1 umbrella dx6ucdpKZP0_0 person dx8nEHWD1xc_0 person dyAC2ey1DQU_4 bird dyJ83t1zgkU_0 skateboard dyPMbIsTtFs_0 person dyPt3VKGZPo_1 person dyR4vnjF5do_0 truck dyZixtbxEE4_1 person dym-lDsiSTM_0 boat dyt8LtUqIMU_0 boat dyy3oxsiErU_19 truck dyy3oxsiErU_14 truck dy2J0aeX5eQ_0 truck dy3nkqKOjbk_0 person dy6zETD5NFo_0 cow dzEKq7fsVnQ_0 train dzNRDfnNbeE_1 person dzS2ClyakEg_0 truck dzXv_YFLPqg_0 person dzahMuEcbCM_0 cow dzeNnQOePGs_0 person dzhSVb26d7Q_0 umbrella dzoQb8C3vxE_0 person dzsHYOJpBbY_0 bear dzv-u3s_YtI_0 person dzyVndvBofo_1 horse dz3SP1rd9zE_0 car dz_ATSJBx6k_0 airplane dz_ATSJBx6k_1 airplane d0J7uodSxF8_2 motorcycle d0NY8eqs19s_0 motorcycle d0NtMMBjQp0_1 truck d0NtMMBjQp0_2 truck d0NtMMBjQp0_0 truck d0RIwZfoGNg_0 person d0ZEYzyD9Vg_0 person d0b8-K_6D68_0 umbrella d0hJditcWj4_0 person d0hQQC2i1Y0_0 person d0hQQC2i1Y0_1 person d0hdtlKidzs_0 person d0h9QWelhII_0 boat d0lVKBOzOQ0_0 skateboard d0qGN1A7XJA_0 person d0vHpkvShqg_0 giraffe d0vUARlHvjc_1 cow d0v47QFRyvg_0 person d0v47QFRyvg_1 person d00UKAQHK2A_0 person d02xOzIVP-s_0 person d04Dr38addQ_0 airplane d09H7U6x-Fc_0 cat E5OHeMbBp9s_1 giraffe E5RbbN1bPN8_0 person E5YibOn90Co_0 skateboard E5b9Yug5vbk_0 bicycle E5b9Yug5vbk_4 bicycle E5b9Yug5vbk_1 bicycle E5b9Yug5vbk_2 bicycle E5b9Yug5vbk_3 bicycle E5dBaFyBYX0_0 airplane E5me_giHEOE_1 person E5trQkGM3Wk_0 cat E5wZ4pk5X0I_1 train E59OnpOGBLU_0 skateboard E6Am4hIuXvk_0 person E6Avey2AVRM_1 person E6A8vfHTdOQ_0 person E6EtoMfo384_0 train E6EtoMfo384_1 train E6GvpwdOQrw_2 train E6GvpwdOQrw_3 train E6GvpwdOQrw_8 train E6JLxU918TE_0 bicycle E6XGO0hx4N8_0 person E6Y2QsetU0M_1 person E6s0XT5G7Eo_0 bird E6s0XT5G7Eo_1 bird E6uGh-cPDjI_0 bicycle E62w4NFSm5E_1 dog E64d0EH39M4_0 train E67ceZopcqQ_0 person E67ceZopcqQ_1 person E68IhhK04s0_1 giraffe E68IhhK04s0_2 giraffe E7BIM8cnCrc_2 train E7BIM8cnCrc_0 train E7F0Gt3Rea4_1 person E7F0Gt3Rea4_0 person E7LY2yKO0Jg_0 knife E7MvCesCxNk_0 knife E7dG4qPI_QY_1 knife E7eYGQjaVYs_0 bird E7hXPqOOiqo_1 boat E7hXPqOOiqo_0 boat E7qoCZ2e-vQ_0 dog E7rhwzBxMqY_0 person E7zwjNToyao_0 person E70FO7I2AQ0_1 person E70FO7I2AQ0_0 person E76moy2SQhA_0 person E8JYTxKfqmQ_0 boat E8OzYJ2gVAs_1 bicycle E8OzYJ2gVAs_2 bicycle E8OzYJ2gVAs_3 bicycle E8RSSepY8tk_0 person E8R5lzlo5qw_0 motorcycle E8Xxr8SUaEY_0 horse E8h4YnZbJg4_1 person E8n_eTUwyhc_0 person E8pbsHhMGOw_0 person E842T5CgJfk_0 person E854nPMWssI_0 horse E8-Z9saoTjk_0 person E8_NjWtQtgI_1 car E9J2Brm4LSg_0 truck E9J2Brm4LSg_1 truck E9N59GTZ8uE_0 person E9R_qLxcZdY_0 bird E9S5Tk5r2wU_0 person E9ZjM9SY__o_1 person E9ZjM9SY__o_0 person E9sCn_XaSHw_1 bicycle E9sHGoiMmXc_0 person E9zmtafFrCo_0 dog E9-1FSPKZ7k_0 person E-DE7HZ04WY_1 person E-OdBMMpwlo_0 umbrella E-VRMpgKXIE_0 elephant E-VRMpgKXIE_7 elephant E-VRMpgKXIE_1 elephant E-VRMpgKXIE_2 elephant E-VRMpgKXIE_4 elephant E-YDPyDXtR8_0 cow E-h1XNBlqsE_0 person E-pnZZeRFyQ_0 person E-q9j7xipsA_0 cat E-seUZ3B-Ts_0 motorcycle E-zFmY_9LWk_0 horse E-0FMMDuLw8_0 person E-3jsRP7KHc_0 elephant E_En6n1IyBw_0 elephant E_GC0IeKtu4_0 person E_K6zdkr0mo_1 person E_Xi5uEIiec_2 bicycle E_e6E8T7on0_0 dog E_02tA9RLyw_0 umbrella E_7qbAkVDYE_0 elephant FAKE4Rfwdik_0 person FATjlgllzBU_0 person FATjlgllzBU_1 person FAdlwBJZk78_0 elephant FAeK9y98GL8_0 zebra FAiIhoJh5uQ_0 cat FAm6HgSzPTA_0 cow FAn11rZ-gsU_0 person FAqiar6B2U8_2 bird FAu0yvyjW-Q_4 boat FAu0yvyjW-Q_9 boat FAu0yvyjW-Q_1 boat FAu0yvyjW-Q_5 boat FAx0CsAigS4_0 motorcycle FA_K15dKk6k_0 bear FBAcUphtxR4_0 person FBIVWWIbq-8_1 cow FBIawPqElJ8_0 bus FBKIUCHqUQk_0 skateboard FBKIUCHqUQk_1 skateboard FBNFSYoMCNM_0 bird FBOWbksU5pI_0 person FBOWbksU5pI_1 person FBjp-C_Sbug_2 bicycle FBjp-C_Sbug_11 bicycle FBnFn5mY2R0_0 person FBwWw9c4KdY_0 person FBz0aAYDBFI_1 person FB8F1ku1XkU_0 person FCBsCwjCPWU_0 person FCQB6p_GcDY_0 person FCRAvY0glAI_0 airplane FCd1d_7Hfpg_0 umbrella FCkT11nk468_0 airplane FClLRpdDi9A_0 person FCnE02wQQk4_0 bird FCp7AKKYViY_0 person FC-ONjCL7tM_0 person FC_gwQU4yrs_0 horse FDJyHtHix-0_0 train FDYS2AyPJhc_0 person FDZBIlbFrk0_0 person FDej1TTCjP0_0 umbrella FDfaLuM3y5A_0 person FDkiv1x0OGQ_0 car FDq3yKNo4Qs_1 person FDvTPzckQKc_0 motorcycle FD3pT-lj2tc_0 cat FEM7OGFO_BI_0 elephant FEN0F0V1nhg_0 dog FEOAvRWKb-k_0 airplane FEU4yHFzkZs_1 person FEU4yHFzkZs_0 person FEWZolQuMv0_1 person FEfYdrS3kFc_0 bird FEjcdYO4xPo_0 person FEoFDmI0pxI_0 airplane FEzBza78J4w_0 person FEzBza78J4w_1 person FE0DpZ9GXoM_2 person FE0DpZ9GXoM_0 person FE0DpZ9GXoM_1 person FE0Q5phKq3c_0 person FE0Q5phKq3c_1 person FE4gj8EYF9k_0 person FE51Dml-nZY_0 person FE7iv_llNT4_2 bicycle FE-JTPLk3fI_0 truck FFCtm1GZH_s_1 bird FFHJUeZ_KKE_1 truck FFHJUeZ_KKE_2 truck FFLxkwDj1b0_2 bus FFME8B_6LNA_1 motorcycle FFQl2DLyjdk_0 cat FFQl2DLyjdk_1 cat FFantnd2gLY_0 person FFantnd2gLY_1 person FFd_4DPNyRI_0 car FFijp_s0YwA_0 dog FFi3nSvA0WY_0 person FFjqbw4R9l0_0 cow FFm26XU-R7c_2 person FFm26XU-R7c_0 person FFm26XU-R7c_1 person FFndlV1rKas_0 airplane FFpyQ_5PU7M_0 bus FF9eHa3K8fM_1 bird FGO6y3WssIg_0 person FGQCxd5EAx0_2 airplane FGQCxd5EAx0_3 airplane FGQCxd5EAx0_1 airplane FGcS28ri5uY_0 person FGdEufjjhtg_0 person FGicL13npRI_0 person FGkNC4hzcfM_0 person FGkx6qk4oDk_0 person FGmjmDC1RoU_1 skateboard FGmjmDC1RoU_0 skateboard FGoutavzP5Y_0 person FGqrkJ3h0DA_0 skateboard FG0PrdHReB0_2 person FG5l2wX8ccA_0 horse FHAj71IwE7E_0 skateboard FHA6nVCnv28_0 person FHB5eraeYEw_1 knife FHJupOaUmtQ_1 train FHOLOunv9Ec_0 horse FHTc_V_05W0_1 bird FHT1DAZpJVY_0 cow FHZ-3pbJQrY_0 bird FHgO4zu5RGA_0 person FHu50D73Fzo_0 person FIA67WzAuNs_0 bear FIA67WzAuNs_1 bear FIB12MYkANg_1 bear FIDI0sZMPVU_0 person FIGhnuJWX5M_0 person FIGhnuJWX5M_2 person FIHYnB8Jrh4_0 person FIMbYQASgkk_0 horse FIQ1iL3jVkM_0 giraffe FIV4OFmfS_s_0 person FIV4OFmfS_s_1 person FInOWVIV_go_0 motorcycle d1N4NJqa_8E_0 person d1PqtOyYTY0_0 train d1PqtOyYTY0_1 train d1PqtOyYTY0_2 train d1Quy8k5O88_0 cow d1UWs3bPTsc_0 person d1UWs3bPTsc_1 person d1YYgiXq3tw_0 person d1YYgiXq3tw_1 person d1bzn92PO0c_0 person d1eo2OWc45Q_0 cow d1tf08A41eo_0 person d1ukwE8h4f8_0 horse d1wbMXvcgNc_0 person d1wlubAM1-k_0 person d10K79pdybE_3 train d14rOFFvTg4_0 person d14rOFFvTg4_2 person d14rOFFvTg4_1 person d165nDy63o8_0 bicycle d165nDy63o8_1 bicycle d17kaiZ5Ztc_0 person d2DRRd9l3TI_0 person d2RD5tyZt6c_0 person d2TxcbWHoBM_0 cat d2WfBDEMf40_0 truck d2ZGi2fOtPY_0 person d2cDVorBK8s_0 airplane d2cDVorBK8s_1 airplane d2e49A9MnF4_0 person d2lSueNvuG4_0 horse d2ns5iCGj78_0 elephant d2sn_b1z1Vw_0 person d2wHwCwQymw_0 person d2zgNRFDpSw_1 bird d203fSHLzv8_0 train d21TfucuHss_0 umbrella d217pENbZVs_0 person d28DHw2okF8_0 person d3F_Gm514J0_2 elephant d3G8COtsJco_0 person d3MN8Sm5tiY_0 person d3MVAijPTjY_0 motorcycle d3P2bH2t8IQ_0 person d3Wdg9MPgLA_1 skateboard d3Wdg9MPgLA_0 skateboard d3duKA35FEI_0 person d3jP_YP-6EQ_0 person d3ro5gubiaQ_0 person d3ro5gubiaQ_1 person d3rzFaWiWwA_5 truck d3sHFgbvhIU_0 car d33yoN6QyYg_0 bus d36tDEgs-IA_0 person d4A2uUrnVWI_0 person d4Cumy6qZPY_0 truck d4DbIWORtjY_0 person d4DbIWORtjY_1 person d4GvMFc_Vqg_0 knife d4Le0GuzhaY_0 skateboard d4QkJdQwkCo_0 motorcycle d4VJot5IZek_0 person d4VJot5IZek_1 person d4WRTfC57h0_0 horse d4b9-LX5V1s_1 cow d4hB6abJCs8_0 person d4mhHPSo7C8_0 skateboard d4q-0AcOs78_0 person d4vhL4dar5s_0 giraffe d4vhL4dar5s_1 giraffe d45YTUkd_9M_0 person d47DPSbvftI_0 person d484zxSSkJM_1 person d4_lDGwny4k_0 skateboard d5Ao3JBz7WM_0 person d5B0EMjLeZE_0 person d5PBtpn_6JQ_0 person d5gDBPwofbs_0 person d5gDqlNLGmw_0 person d5hj8eaC5fQ_0 person d5jIlHa1Y6o_0 cow d5m8giMORSk_0 person d53_McJDtt4_0 person d55FAEl6kfM_0 cat d55rz05ynyg_0 airplane d6AkvjKCaE0_0 person d6AkvjKCaE0_1 person d6TWHVESLa8_6 cow d6TWHVESLa8_5 cow d6VCXnnHXGQ_0 person d6VCXnnHXGQ_1 person d6YTAD3T2i8_0 person d6a2EN1cB-4_0 person d6cgbxc35Ms_0 person d6mM21E4x-4_0 umbrella d6m3DUG5E7Y_0 person d6uLbEhrIvw_0 airplane d65wDJoMyA8_0 person d67YXl13SSo_0 person d6-bn34gHFc_0 person d7H5qLPNFz0_1 elephant d7cwZ3G7xSU_0 bird d7kWNGqyvRk_0 person d7mQdSSoZ2E_0 person d7m0BF65qro_1 person d7m0BF65qro_2 person d7n5m9UuhP4_0 person d7n5m9UuhP4_1 person d7yxmt8AvOM_0 person d7yxmt8AvOM_1 person d71rdGKeKkE_0 person d74EhPMCxb0_0 person d7-3m4Nz8fk_0 horse d8CJ5urtRlk_0 train d8HIJN0pULI_0 person d8XcNMVXCD8_0 bicycle d8b-SN3JEvk_0 person d8dPRbquLuM_1 person d8dPRbquLuM_0 person d8t8y3kLzgc_0 person d84iekZaJHc_4 knife d9JyT5Kko5c_0 person d9LvxSh5P-Q_0 person d9OaiymMq0w_0 person d9PCSJzZTy8_0 person d9Pj3WrvXXc_1 person d9S0dKjWhNU_0 person d9S0dKjWhNU_1 person d9S0dKjWhNU_2 person d9YlucRFs0U_0 person d9cSZXEb_5E_1 person d9dysX9rdmA_0 skateboard d9hh6urZ5FU_0 train d9kzobAaimY_0 motorcycle d9lIw5maa3M_0 person d9qijNyVVmU_0 person d95k-74VSVE_0 cow d-JD-mAXyIA_0 dog d-Mnc38YAmw_8 truck d-OQw6tKhuM_0 knife d-S3AmiMI1s_0 car d-e8mKtYWjk_0 person d-e8mKtYWjk_2 person d-e8mKtYWjk_1 person d-fv8fmGSlY_0 person d-hMPjLP2WE_0 bicycle d-hMPjLP2WE_1 bicycle d-hgDDQ3kwg_0 person d-h6ncywZ58_1 person d-h6ncywZ58_0 person d-oFe9Z0Obs_0 dog d-oFe9Z0Obs_1 dog d-rpsQgR8sw_0 person d-22m5Sq5OU_0 elephant d-5xdAZSjX8_2 skateboard d--9RMf5LCA_1 boat d_AudyfCYzg_0 car d_EP2nM4YMw_0 bus d_ElAbuvxGQ_0 dog d_ElAbuvxGQ_8 dog d_SB-LVXyi0_1 horse d_SmnRMWLD8_0 dog d_S0JCKcFCg_0 cow d_hsQ2L-klo_0 person d_nTA-SKHNM_2 knife d_nTA-SKHNM_6 knife d_ocJQiPpn0_1 skateboard d_vnePeLmwI_0 person d_2HhXHP8fg_1 cow d__UUbvo2t4_0 person eADPEBi8wWs_0 car eADPEBi8wWs_1 car eADqJI9JKq8_0 person eAFdLVF01GU_0 bicycle eANH6WnEpPs_1 person eAPcJi7CaBw_2 horse eAPcJi7CaBw_1 horse eARl2H_FaEU_0 cat eAXN0KAt66I_0 person eAXN0KAt66I_1 person eAYoRncVO74_1 person eAZbke5Perk_0 car eAfmOFI5jUM_0 horse eAsHKktPNSo_1 horse eAvDt4p-AvA_1 knife eA3lmhfjTuM_0 cow eA5hiUXY2_Q_4 airplane eA5hiUXY2_Q_6 airplane eA7FV9uQbYw_0 bus eA8fIAfGi5k_0 person eBB5vRA9JPE_0 knife eBHEKUkaBcI_0 bird eBLisw9b8i8_0 cow eBLisw9b8i8_1 cow eBLisw9b8i8_2 cow eBMqhmQr7vI_0 bicycle eBRcZ5KDeEA_0 knife eBgLKDW3lH4_0 person eBmdALv9WEE_0 person eBwWJ_geg4Q_0 knife eBy554vRg9M_0 person eB83_xIotrw_0 bicycle eB83_xIotrw_3 bicycle eB_ZHbAvx-c_1 person eCInOWr32gc_0 dog eCNG8qj36vs_0 cow eCSzfVb87kI_0 person eCUuH2vPeDI_0 person eCWhtTVetLA_0 umbrella eCeVtq40bcM_4 bus eCf8h359-j0_0 bus eClBvJnyYa4_0 truck eCmgHa6ThE4_1 person eC3Fwv7Uows_0 person eC-5SEhAGvo_0 cow eC_fRVwxsiI_0 person eDJamx945Ao_0 elephant eDJamx945Ao_1 elephant eDSAGlcfwKA_0 person eDSmePW-Vrg_0 person eDXqzj7vKFI_0 motorcycle eDX2HUt9ttU_0 person eDX2HUt9ttU_1 person eDuzDDESzU0_0 person eDwjZL3IGqM_0 person FIrviDrZriY_0 bus FI2T176uKi4_0 person FI4oF175yHo_0 cow FJCE3uzu0i4_0 dog FJL5lb3wBKI_0 airplane FJPRJ0A8BII_0 boat FJVcRzA_pdI_0 airplane FJdcStnbgU0_0 person FJl_FwYbg8s_1 knife FJmyu27Omwk_0 person FJsMdQrRgFs_0 train FJvHbRGgbXM_0 giraffe FJvSXVq8PPk_0 bicycle FJxbfz8q8Qw_0 person FJzU4eC5GiI_0 person FJ5jeLsVXys_0 elephant FJ5jeLsVXys_1 elephant FJ7oeGn4dBM_0 cat FKGFVLnchKE_0 skateboard FKGFVLnchKE_2 skateboard FKGFVLnchKE_4 skateboard FKGFVLnchKE_5 skateboard FKKoXDLhFjo_0 person FKMCYA2_RMs_0 horse FKMCYA2_RMs_2 horse FKMsbMSiqrQ_0 person FKTETXdoJjk_0 person FKVxjU1kTMM_0 person FKWzB37H8-E_0 cow FKdcZ0D4-K8_4 horse FKdcZ0D4-K8_1 horse FKhBf2FcrKE_1 person FKhBf2FcrKE_0 person FKnj73Wv84c_0 umbrella FKsZiccYt_g_0 person FKwKsWjLhiI_0 person FKzvgRVfOjM_1 horse FKzvgRVfOjM_5 horse FK0ezSvbg7o_0 dog FK37T3KvNUU_0 cow FK8OxK802HI_0 person FLF92L3WRrs_0 person FLF92L3WRrs_1 person FLQzeGFBo2I_1 bird FLQzeGFBo2I_2 bird FLTewjXG6Wc_1 person FLTewjXG6Wc_2 person FLTewjXG6Wc_0 person FLWAw0tGOo8_2 bicycle FLWAw0tGOo8_3 bicycle FLZeutEdtzU_1 horse FLqZVv798FE_1 person FLqZVv798FE_0 person FLq3zU7UtgQ_0 skateboard FLr23Hv4LfE_0 train FLr23Hv4LfE_2 train FLskMa3WD7M_0 person FLyV4pkEHUg_0 person FL1q74zVLvo_1 truck FL8ulwhcOho_1 car FL-QttmKDc0_0 airplane FL-73OGqifE_0 cat FL_DeYOGkaU_2 horse FL_DeYOGkaU_0 horse FMHc-oH_rOE_0 person FMTZga_deFY_0 dog FMig7WOUQyU_1 bear FMig7WOUQyU_2 bear FMv3NfETfq4_0 bicycle FMv3NfETfq4_1 bicycle FNCMx4Aum_M_1 motorcycle FNJmejn3KNQ_0 truck FNJmejn3KNQ_3 truck FNJmejn3KNQ_5 truck FNKJAi0Xbz0_0 person FNNdAL0qtWM_0 horse FNSpSfZSQfE_0 person FNbjJJgHt6c_1 person FNgfcu9JUHA_0 cow FNjDy-du_gs_0 truck FNv5k4sCs5k_0 person FNxfPhr1AZk_0 person FN1B1veyxCQ_0 cow FOAmP97Gboo_0 elephant FOAmP97Gboo_2 elephant FOL80Pq_HSs_0 cat FOXwGm4ddCk_1 person FOacAsl9vUM_1 bird FOnRpTgHAdI_0 person FOyA2uyFS0s_0 car FO-yhRhInHQ_0 motorcycle FO_sYJabdgQ_1 bird FPBkLbjkE0I_1 person FPC9a1ebnRk_0 person FPFEZjz68RM_0 person FPHxPqZ9of4_0 elephant FPIVRAQI9Ao_1 airplane FPS-rWu8sfw_0 truck FPdj2aDA2Is_0 person FPd8NgysFbw_0 person FPhiHYzZrc8_2 bird FPmbKUp9Apc_0 person FPoBK2S6-kE_0 elephant FPpdaMeuTPM_0 person FP-joReSPjM_1 train FP-joReSPjM_4 train FQBe4ewvq3k_0 bus FQDYCsUTzLU_0 person FQIKRtrwRJU_0 person FQKMItJWON8_4 bicycle FQNa7v1nuHs_0 bird FQNa7v1nuHs_1 dog FQPeEa0PIhY_0 person FQQ5mFLQS_8_0 airplane FQTA_Rs2r4k_0 airplane FQa2-poPUOQ_0 person FQiI3CA-HsU_2 person FQiI3CA-HsU_0 person FQiI3CA-HsU_1 person FQnnRHyzLcE_0 boat FQyvUPmvsSo_0 bus FQ0G5VjpRO8_0 cat FQ09pTeRKXM_0 person FQ8nNpJodyM_0 person FQ_PnAPHimg_0 train FQ_YvOmwGng_1 skateboard FQ_YvOmwGng_2 skateboard FQ_YvOmwGng_0 skateboard FRBmAObAjLg_0 umbrella FRCsksZQW0g_0 motorcycle FRFZtNbUMfU_0 person FRFZtNbUMfU_1 person FRKbwt_HIJY_0 cat FRUF5D_Bg4I_0 boat FRZeTLb7R70_0 person FRcpw1KTh4w_0 skateboard FRh68K9peM8_0 knife FRs6gVga80M_2 airplane FR0IeE_jWVE_1 person FSCpm1kxTIE_0 umbrella FSJSVNwlHck_0 person FSSrkLtKRBk_0 person FSchPfgxMmk_0 person FSmTDuGYKRo_0 person FSrvVBrHdIY_0 person FSrvVBrHdIY_1 person FSs-_cK-4DE_1 bird FS8ZnDA42Xg_0 zebra FTHxfldxSrg_0 person FTlLAXuBE2M_1 person FTlLAXuBE2M_2 person FTlLAXuBE2M_0 person FTr8b641J_g_2 zebra FTr_sg-tAYA_0 person FTr_sg-tAYA_1 person FT7LfULOrmU_0 person FUNI1-oxWb0_0 person FUPer2xPyRM_0 person FUQokq7Dm_0_0 bird FUWPXNKt90g_0 skateboard FUcLObUwigo_1 person FUcQGevNVQs_0 person FUp8cy7p6kc_0 person FUt-f-8QJmk_0 boat FUzb9oSwhq4_2 horse FU63gEB5T14_0 person FU63gEB5T14_1 person FU-Gyo-nX8w_0 person FU-Gyo-nX8w_1 person FVGYeJ_eKRY_0 person FVGYeJ_eKRY_1 person FVSihamjW0c_0 person FVcaEg-4Saw_0 airplane FVm133076uE_0 person FVxqyMXxbTg_0 person FVxqyMXxbTg_1 person FVyZRq7FJUM_2 person FVyZRq7FJUM_0 person FVyZRq7FJUM_1 person FWAdovzWBpk_0 person FWCxpF5CAAo_0 person FWH6qzGM4Ko_0 cat FWTx-_C46YA_0 dog FWVW97tTSiI_2 skateboard FWZANVS2JwI_0 bird FWbVfjbC570_0 train FWd_KJNB1hY_0 person FWeJwZsAuq4_3 knife FWiwkCVxsvU_0 airplane FWpcgznz11Q_0 knife FWqFrwl7d-g_0 airplane FWqFrwl7d-g_2 airplane FWuSKVVP9Gw_0 airplane FXPnVqm98h8_2 car FXbqlcQOm4U_1 car FXcjcGBH8uA_0 airplane FXdP8V2Fyag_0 bus FXdevKY06to_0 bus FXjUPTGnrIk_1 person FXjUPTGnrIk_0 person FXrzFKXFtUE_0 skateboard FXvqDQa0_pw_0 bus FXz3PiouB_s_0 truck FX7DATABx3o_0 person FYPRZ3A5Wug_1 horse FYQxEw6enVw_0 knife FYR_8E37mhY_1 boat FZJlwJ_5CIY_0 person FZJ0L36775Q_0 bear FZOwW_igs2Q_0 person FZUo3m0w40U_1 boat FZXz9ivLbZE_1 person FZfD0ASOr-0_0 person eD5a0lOEA4c_0 person eD5_C8Rnll0_1 cow eD9mxZpbjpo_3 knife eEBoNITml_U_2 airplane eEBoNITml_U_5 airplane eEKY2ZIJ7cw_0 person eEKY2ZIJ7cw_1 person eEUzIzmFpmg_0 dog eEZirBqUuUc_0 cow eErb9l8tm9Q_1 person eEwALO20qQs_0 motorcycle eEzaprIjPOA_1 horse eE7zgmIkklg_0 person eE_bJ6JguBg_0 person eE_bJ6JguBg_1 person eFDTDuBtPdg_1 elephant eFIUN94eOFY_0 skateboard eFKWB3vWXzM_0 person eFNnJotKCuE_0 dog eFQAqsrxJIk_1 cow eFQAqsrxJIk_0 cow eFYXRQfFBFk_0 person eFYi8GYHOwc_0 bus eFYi8GYHOwc_2 bus eFYi8GYHOwc_1 bus eFbHzEjDjsQ_0 person eFbHzEjDjsQ_1 person eFbOmylKLps_0 bicycle eFbOmylKLps_1 bicycle eFbOmylKLps_2 bicycle eFbOmylKLps_3 bicycle eFbOmylKLps_5 bicycle eFbmkhM4yvA_1 skateboard eFeLxXgEWb4_9 airplane eFeLxXgEWb4_10 airplane eFeLxXgEWb4_19 airplane eFkMiDqxNNg_0 person eFn7qz_Ik-g_1 bicycle eFsEtWFKOCE_0 person eFsEtWFKOCE_1 person eFsJVO58dOk_0 motorcycle eFsJVO58dOk_1 person eFtXO4KQyP0_0 person eF6vo2K3X7Y_1 horse eGANqnJQvcA_0 person eGEeIkSKn9I_0 person eGFxLRdHt9o_0 person eGIMcDTDuZI_2 giraffe eGKe_SHbpew_0 dog eGLaqISw-ZU_0 cow eGXX9n0KkAw_0 train eGavpqx_a-Y_1 person eGeSgNqD64Q_0 cat eGp90l6AeQM_3 horse eGp90l6AeQM_4 horse eGp90l6AeQM_6 horse eGp90l6AeQM_7 horse eGp90l6AeQM_1 horse eGp90l6AeQM_2 horse eGsO1ybeNmw_0 person eGulNc3Hz6E_1 person eGw-BT7HLw0_0 person eGx11vRzfMI_0 person eG420j0UncU_0 cat eG9ay7ouawQ_0 boat eG_gCk-NdFc_0 bicycle eHFxA8eOkKo_1 dog eHJOSAF8Ksc_0 boat eHMokGJS_8k_0 bird eHPZiFRZgH8_0 person eHS3e7Drwlw_0 horse eHYl5vL9urI_0 person eHYl5vL9urI_1 person eHZGFVBiNbM_0 person eHhu8cP6sYY_1 truck eHlKAc_jO3w_0 horse eHlKAc_jO3w_1 horse eHmn6jMH470_0 bicycle eHo7GgOz-4M_0 bicycle eHo7GgOz-4M_1 bicycle eHpMDoo4x9o_0 person eHpMDoo4x9o_1 person eHrYu8_xQuI_3 airplane eHuFhF5mn60_2 dog eHuHorwvDFE_0 person eH-lfDuzZRU_0 person eIbRJYX77lM_1 person eIceWO1K4hg_1 knife eIlLo4L0TBY_0 person eIm2mZqCQIU_0 person eItSvz_9tc8_1 horse eI5A6Q8wsk8_0 person eJGswWs5a_U_0 person eJJBtIMsces_0 cat eJNeGPvJZBs_0 person eJN7jtqxGc0_1 person eJO3ahTuQlg_2 knife eJTzEdYt2KA_0 person eJTzEdYt2KA_1 motorcycle eJZyuG0FB0M_1 person eJg7Dq1HzW8_1 person eJi66YisQnM_0 cat eJnTGfqwSKw_0 person eJntPRQdD6A_0 cow eJntPRQdD6A_3 cow eJxFV3LV_-o_1 elephant eJzkkZWgmiM_0 person eJ2omVOUJv4_0 person eJ4AprAxRh4_0 airplane eJ4AprAxRh4_7 airplane eJ4AprAxRh4_5 airplane eJ9q5sR4oiE_1 train eJ9q5sR4oiE_3 train eKBgCy3izjg_0 person eKCONra70xU_1 person eKGFKx5vbJw_1 bird eKGFKx5vbJw_2 bird eKJMggclbAI_0 truck eKYCRb3cMSc_0 cat eKcN648xBxg_0 cow eKdNbqJsxIY_1 car eKirxEVv1N4_1 giraffe eKpHpiZZSOY_0 motorcycle eKsu0SXh0Cg_0 giraffe eK5wkhSqhQg_0 person eLAIclbgwtw_1 motorcycle eLAIclbgwtw_2 motorcycle eLCZ9U490do_0 person eLK_O-E6TXY_0 cow eLLFV2_GBOs_1 cow eLLFV2_GBOs_4 cow eLLFV2_GBOs_5 cow eLLFV2_GBOs_0 cow eLLFV2_GBOs_3 cow eLRLhwJpaKE_0 person eLXWvZhL6g4_0 cat eLfUxNIWQn8_0 cat eLsJ-MoKt-c_0 motorcycle eLzEA8IlB5E_0 cat eL2OKu4DhkM_1 bear eL-v_R-bG30_0 skateboard eMJ8eEFu7lo_1 car eMJ8eEFu7lo_3 car eMN980Fn4Kc_1 horse eMQEyMimXFU_0 cat eMWM---NOF0_0 person eMcgmNHMY_g_0 person eMdVb5oIUWc_0 person eMgUOtsKC0w_0 train eMsSwXfIf7o_0 person eMv2h_s0LpQ_1 skateboard eMwSfQmonxM_0 bird eM5e2PBO5hY_0 giraffe eM-1RwyzQpI_1 truck eM-1RwyzQpI_4 truck eM-1RwyzQpI_5 truck eNDHGq_Vm3A_0 person eNEaC09BQF8_0 person eNG3je3HCHI_0 person eNG3je3HCHI_1 person eNIXfUjWW10_0 bus eNSkFxbG_L0_0 skateboard eNTeTVBDq8U_0 person eNVGmOIKNII_0 skateboard eNVGmOIKNII_2 skateboard eNYeXwUr7rY_0 skateboard eNbwp7DEy6A_0 dog eNbwp7DEy6A_1 dog eNlXrdcWYPA_0 person eNllsU_utBs_0 giraffe eN0ufEmLTDM_0 person eN3a3uFzNxw_0 person eOJorgJNcl4_1 car eOMSAOLQMc0_0 person eOMro57lp5o_0 bicycle eON5oS1ddkA_2 knife eOXMKiuur7c_0 person eOZ2mMo0l60_0 person eOe9DskHw1g_4 airplane eOe9DskHw1g_3 airplane eOhLZkf2gyQ_0 person eOj2KctQDKQ_1 bear eO0M1RCeWaA_0 dog eO9s3APOXdI_0 bear ePDBmIR0Mnk_1 bear ePEoVXrSERQ_0 person ePPnXOa8FII_0 motorcycle ePWPPUSuctk_0 horse ePWPPUSuctk_2 horse ePWPPUSuctk_3 horse ePaqZZz_gtY_1 horse ePgL4a_1DcI_0 person ePgqzaxKKo8_0 person ePhchRaBs-k_1 airplane ePhchRaBs-k_2 airplane ePjAF53eBSA_0 person ePkzyffCJhs_0 person ePli_zXbgF4_5 bear ePli_zXbgF4_1 bear ePli_zXbgF4_2 bear ePli_zXbgF4_3 bear ePli_zXbgF4_4 bear ePoC0Pj8xLA_2 person ePo6J3guHBw_0 person eQA0KwcbJlQ_0 person eQI72zFfl34_0 cow eQI72zFfl34_2 cow eQMmOyBJUaA_0 person eQOqA8LeUOU_1 truck eQOqA8LeUOU_2 truck eQOqA8LeUOU_8 truck eQS3V0HV61g_0 person eQTlUSSbOyY_0 person eQWRQaVSPT8_0 skateboard eQXSsw2MJGk_0 horse eQZEFoxVGuY_2 person eQZOAGlSYBc_0 person eQcocP3auyk_0 car eQfbBM_c96I_0 knife eQfbBM_c96I_1 knife eQi8AZ4DQO4_0 airplane eQjFi5iBL-c_0 skateboard eQl0Q82jNOY_0 cat eQmSzg2ZEpw_0 person eQmSzg2ZEpw_1 person eQoRdZR8_q8_0 person eQpbjnMSNLE_1 bus eQ1R5EruVgo_0 bird eQ1R5EruVgo_1 bird eQ2eWzgVggo_0 person eRAZ8LnDRN4_0 person eRBc8OmROx4_0 cat eRCMzS-dM8o_0 person eREzhoz4UA8_0 bicycle FZieBxFsZO4_4 bird FZieBxFsZO4_7 bird FZieBxFsZO4_8 bird FZieBxFsZO4_11 bird FZsDQUdCBiE_0 person FaINra3PYko_0 bus FaINra3PYko_2 bus FaINra3PYko_1 bus FanmFyCIvSc_1 skateboard Faxr0F1n4lk_0 person Fa8JS9CCs60_0 person FbC6M7cRN1k_0 person FbLE0CqDZ_I_0 person FbN-_RdBAoA_0 person FbRfH2tJCZg_0 train FbUasHXeVXg_1 person FbVrmfwHLD8_1 car Fba1mHso_c8_0 person Fbcl3O89qPI_0 person Fbryy4ItyRo_0 motorcycle FbsxP5HIH-w_0 person FbtbQbo3w6A_0 person FbtbQbo3w6A_1 person FbtbQbo3w6A_2 person FbzdX2M1spw_0 person Fb9GVgZUQkk_1 bird Fb-bT-5HFvo_1 person Fb-bT-5HFvo_0 person FcAKq2q6WuI_0 person FcGoc7P1MnA_0 airplane FcHZFDzsW6U_0 person FcI2xE1s0tE_0 person FcJofbjqKR0_0 person FcNTnULQ914_0 train FcPxUMks1f8_0 airplane FcQ9ypCnsnM_3 elephant FcdE5l-9Cl4_0 person Fcfkxe_EegE_3 skateboard FckxSGw75TA_8 elephant FckxSGw75TA_1 elephant FckxSGw75TA_3 elephant FckxSGw75TA_4 elephant FckxSGw75TA_6 elephant Fcmq6FVlPrs_2 horse FcyT7NFOtOU_1 truck FczLlZB8PPQ_0 horse FdBcdDQa2Yc_0 person FdG3QrZtdYo_0 person FdM1BVOZnpc_0 person FdM1BVOZnpc_1 person FdYZH48B1gQ_0 giraffe FdYpikKc6Rk_0 person FdcxQx4sFow_0 person FdgWx-kasEQ_0 car Fdgw87Au0kg_0 person Fdp1t1Kk42s_0 person FdvgBe0Ix0A_0 person FdvgBe0Ix0A_1 person FdviMb1gxkI_0 elephant FdyA9CQ40Xo_0 cat Fd1Rn6HvibQ_0 bus Fd1ZmuLPSNA_2 truck Fd1ZmuLPSNA_0 truck Fd1ZmuLPSNA_1 truck Fd1ySlMqOEk_0 horse Fd6kpMD00LI_0 person FeAmji-BcLE_0 skateboard FeHGwC6UYlQ_1 person FeHGwC6UYlQ_0 person FefqZU-M3NQ_0 dog FeioRbELmKY_0 cat Fel-MqoIa98_0 person FenJI9gPekk_0 person FevOpclGxX8_0 person Fe0XVxKTD10_0 person Fe1ne3adKqs_0 person Fe1o0fdRyjk_0 person Fe1o0fdRyjk_1 person Fe1o0fdRyjk_2 person Fe_r1BcuOm8_0 airplane FfCfKve9svg_0 elephant FfGzM6IRg6I_0 person FfTyXxo_JLY_0 horse FfWtRI5MlvQ_1 person FfWtRI5MlvQ_0 person FfddIx2fdDE_0 person FfkcxMLN90Q_1 person FfkcxMLN90Q_0 person FfpScNxcfaE_0 person FfpuED53W2w_0 person Ff3kCsp4dss_0 horse Ff37VadXulw_0 person Ff37VadXulw_1 person Ff-s3k4nzl0_0 cow FgAW1wm55t4_0 umbrella FgBAfHhZDtY_0 cow FgCkJ9L956k_2 horse FgHkoen3Fbs_0 person FgHkoen3Fbs_1 person FgHkoen3Fbs_2 person FgK205YdiNI_0 zebra FgaH6B8Im-s_1 person Fgh-oweWR10_1 truck Fgh-oweWR10_2 truck Fgh-oweWR10_5 truck FgkgjnYWuvc_0 motorcycle FglWoBFeCGs_0 boat Fgqe5FVDM7w_1 bus Fgqe5FVDM7w_3 bus FgtxhgrL-1s_0 bicycle FhAkQ-D6j7M_1 person FhNe0p3NvAk_0 person FhS2OrbfOqA_0 horse FhTIUIB4MQk_0 person Fhdb7UXlKgw_0 person FhhQQi3XBRs_0 person Fhim9zq_3dc_0 dog Fhtl-JSkWvY_1 skateboard Fh1QSbERb_I_0 person Fh1jlYGKYy8_0 cow Fh2wm1SuBlM_0 person Fh5hapK4iY0_0 horse Fh-e1BaovqE_0 person FiAj5FRP_QI_0 bear FiAj5FRP_QI_1 bear FiAj5FRP_QI_2 bear FiGZEZ8BFeg_0 person FiLeL7fMtKI_0 person FiMl9o33Uaw_0 person FiQbZpev_LA_0 person Fim4ZNdANXI_0 horse FipIgAA0lFk_0 bicycle FirrKl6H41c_0 person FirrKl6H41c_1 person FivrGIBKDvo_1 elephant Fiz1rnLi2OM_0 person Fi4kJfnwDFc_1 bicycle Fi4kJfnwDFc_0 bicycle Fi7LPQxqu14_0 person Fi9uLLmtWaQ_0 person Fi_IAiAUqaU_1 horse Fi_IAiAUqaU_0 horse FjBRf4S85bg_0 elephant FjBRf4S85bg_1 elephant FjCz86a5wp4_0 person FjF5nRRKjKc_0 person FjMslXNPmHo_0 airplane FjRDB5KtmZk_0 cow FjUvDc65QJo_0 person FjZltjNG2NU_0 skateboard FjfP5wdsmM0_0 cat Fjo3Q6r1Unc_0 cow FjsVcnD_MIg_0 motorcycle FjvoIjZBqfU_1 person FjvoIjZBqfU_0 person Fj98ZrblH1g_0 umbrella FkAQLLdAAbk_0 elephant FkAQLLdAAbk_1 elephant FkFAVoUYxPc_1 skateboard FkOkAlvY34U_0 cow FkSrQgrkwxM_0 person FkSrQgrkwxM_1 person FkZy3LGoN9I_0 dog FkkUslZGIbg_0 bear FkvcJknwKuY_0 person FkzewHxki8o_0 skateboard Fk4XzK5XI6A_0 bus Fk4XzK5XI6A_1 bus FlD1RAiVpek_0 person FlD1RAiVpek_2 person FlD1RAiVpek_1 person FlEhS-F3ygQ_0 umbrella FlGO6UYJUzE_0 horse FlNEteNmUhc_0 person FlR1fAhH2Xo_0 dog FlYY0RaMPNY_0 person FlgN1oA45yM_0 bear Fl2yqFTps4E_0 person Fl6OhW0-1w0_0 person Fl9EhNo7Keg_0 person FmDFcSMFeno_0 person FmDFcSMFeno_1 person FmDOHRJspxI_0 person FmMYoani5Vg_0 person FmOLwdbHDxQ_0 person FmOfXWRFoXQ_2 bird FmUhkvEy_7s_0 person FmUhkvEy_7s_1 person FmVDxGIS5zk_5 train FmVDxGIS5zk_7 train FmVDxGIS5zk_8 train FmVDxGIS5zk_9 train FmVDxGIS5zk_10 train FmVDxGIS5zk_1 train FmVDxGIS5zk_2 train Fmc6udEpldU_0 cat Fme4Abd5nUA_2 bird Fme4Abd5nUA_1 bird FmoAxj0I_HE_0 person FmqOvCWa7zg_0 person FmrozJZpKR8_0 train FmsAY671mqQ_7 knife FmuPNtoqS2E_0 elephant Fm1Depfmi_k_1 person Fm5EMiek6AE_0 person Fm6Hq8f2Qxk_1 airplane Fm6Hq8f2Qxk_2 airplane FnEnQ8PP_eE_0 skateboard FnEnQ8PP_eE_1 skateboard FnGScEGhwDA_0 person FnKvuj-emb4_0 person FnKvuj-emb4_1 person FnMl1BAE_jc_0 bear FnMl1BAE_jc_4 bear FnNceIdqZ3w_0 person FnNceIdqZ3w_1 person FnTofG0IZf0_0 person Fnb6xihA7ck_0 person FncXKaqIxJo_0 person FncXKaqIxJo_3 person FniMTwzxRZQ_0 person Fnv6GlZeZ98_2 airplane FnwZm6-uVkU_0 person Fn6j8CspFw4_5 horse Fn6j8CspFw4_2 horse Fn6j8CspFw4_3 horse Fn6j8CspFw4_4 horse Fn7CPx1Df1I_0 dog eRGlFEYZ74g_0 person eRQQ8fY6DVA_0 person eRToPN2xDdI_1 horse eRToPN2xDdI_2 horse eRVbBhT_bcs_0 person eRXcoQINrwY_0 cow eRa3aIGemkw_0 person eRiOVczmKs0_0 person eRk0k7ru0C0_0 person eRlVo64o3EE_0 horse eRn_VZZAhDc_0 bird eRpQzm5PYXw_0 person eRvRu0q-GoE_0 dog eR2L8Yeikhc_0 person eR2s4XgNo7o_2 dog eR6IwGLaa1M_0 bicycle eR7y-Ei3DLg_0 person eSGBtfzFobI_0 train eSId-3VXvKk_3 dog eSIwAUMyFgU_0 person eSKH9cYOKk8_0 horse eSPrJOSU8AM_0 train eSa1vsOaz1c_0 knife eSiLV8rS59E_0 person eSiLV8rS59E_1 person eSljhVPS-Ik_2 person eSljhVPS-Ik_0 person eSpAsKZSmiA_0 airplane eTDKrXMMrQ0_0 cow eTKPoRwNChU_0 person eTKPoRwNChU_1 person eTKSWSWvAyw_1 person eTNf-Cqbbro_1 person eTQF3UDg8qc_0 truck eTTKvmF97nI_0 elephant eTUWLCcJU2k_2 bus eTU8LeMW9qA_0 person eTc1z6mbb50_0 truck eTdIp3O6Gdc_0 bear eTkYJ5e2d6g_0 person eTkbZ2QtHvw_0 train eTkbZ2QtHvw_1 train eTpyN9lx8_4_0 horse eTsE0jLxU3w_0 truck eTsE0jLxU3w_2 truck eT3B8Dicg34_1 person eT5K9fPU-0g_0 person eUGoobFpS4s_0 person eUKe6XaWIfA_0 motorcycle eUQjLdCSTbY_0 person eUQ4P2JG1yg_0 bus eURPg0TbtFI_0 person eUU0KJ-w2bc_0 person eUVgOxQT_-8_0 cow eUbEHnOzRA8_0 person eUbEHnOzRA8_1 person eUbEHnOzRA8_2 person eUe_Rayk8X8_0 person eUyzGl0--ms_1 person eU6G8jITD_Y_0 airplane eVJOOrHqc34_1 skateboard eVL1UQ_nteE_0 car eVNGBAn5Oxc_0 cat eVPABDrI9js_0 bird eVYydWvg5Go_1 person eVcLRosJZew_0 person eVhB8QJJogM_1 knife eVn8akHyS64_0 airplane eVn8akHyS64_2 airplane eVn8akHyS64_3 airplane eVn8akHyS64_6 airplane eVuy4uctm28_0 person eVu1gME4-Qs_0 elephant eVu1gME4-Qs_1 elephant eVywFyCLwko_0 person eVzfhyg8qFU_0 person eV2KIbTSnH4_1 train eV4pA62ABv8_1 train eV6nRsgY8PQ_0 person eV64Qw4Zebk_0 person eV-VIypuuNY_1 bird eWHnCpVoKhw_0 truck eWbvhqFVvXk_0 boat eWlQOgHQT7g_0 airplane eWpIepmfRus_0 person eWpIepmfRus_1 person eWsle8FxRvY_0 person eWyDiulNMGo_0 motorcycle eW6l7xJBq-Q_1 boat eW6o2X8qAtQ_0 car eXDegroOl34_0 person eXECAC_iXPc_0 person eXLLe0Z-fJk_0 person eXUIt5B2NQc_0 person eXYniqUW4z8_0 bicycle eXYniqUW4z8_2 bicycle eXaCA1qL7uY_0 person eXeifN6Jv8c_0 elephant eXeifN6Jv8c_1 elephant eXeifN6Jv8c_3 elephant eXeifN6Jv8c_4 elephant eXeifN6Jv8c_7 elephant eXfkthdw2L4_0 person eXixQXmPyYw_0 elephant eXoF6xS_5u4_3 knife eXuelMqu_1M_0 knife eXveKyc2TQg_0 horse eXxAlPRFiqs_0 person eXxAlPRFiqs_1 person eXxAlPRFiqs_2 person eX3bd4kHxuc_9 airplane eYDpQFJpz7k_0 person eYJe2k1E0XQ_0 bus eYY-Mz3L_Ac_1 elephant eYeHu-IftM0_0 person eYnlQEvgHVc_0 cat eYqlHj6MSc0_7 bicycle eYyGqoW9Q3c_0 bus eYyri5GAJDE_0 person eZEN_5rnTLM_0 person eZL3Ew4O7YI_0 person eZXS_3nTpdo_1 motorcycle eZXS_3nTpdo_2 motorcycle eZZb5rnc1iA_0 bus eZf-Rsr1aNs_1 train eZgo_XfmmO0_0 person eZgo_XfmmO0_1 person eZl_FRsZx3o_0 person eZym_LkJnpY_1 knife eZ2Y_Qtg0VU_0 horse eZ2Y_Qtg0VU_1 horse eZ4N2Y737ss_0 person eZ_peGgPSDE_0 person eaHXGY8ImzY_0 person eaOqHSeEVG0_0 elephant eaR-dFaZRGc_2 giraffe eaTX3J2X23g_1 person eaTX3J2X23g_0 person eaalMrdHsQ0_0 horse earUgdES0lk_0 person eaxPmkwGK5U_0 bird ea1EeKBBjxk_0 umbrella ea1YcZPjbxU_2 truck ea4saeRZ0_M_0 person ea8mbQn2kv0_2 dog ea8mbQn2kv0_1 dog ebFgEyNciRc_0 cow ebMZJ-lUhbw_2 bicycle ebMZJ-lUhbw_3 bicycle ebMZJ-lUhbw_1 bicycle ebOubiwIUC0_0 person ebV9mcxICDs_0 dog ebY5nNOPdN0_0 person ebY52fJyTPs_1 person ebY52fJyTPs_0 person ebagV2pOV20_0 boat ebhnTUXh7Pc_0 cat ebh7xOXlO7Y_1 person eboXP28MlOE_0 airplane ebt0_AWnuyM_3 bear ebyMEAOqPhQ_0 skateboard ebz4umtEYag_1 motorcycle eb0UO8Y5r5A_0 car eb1-qD5D7Us_0 person eb5d4XIDSqs_0 car ecDEmZdWz8Q_0 person ecGOS5ZO0Tw_0 skateboard ecGOS5ZO0Tw_1 skateboard ecJIf9dcDHk_0 person ecKMZLATsNg_0 person ecKst7suEZo_1 motorcycle ecPynengjhg_0 person ecUmR_974l4_0 bear eccbjuLjCr0_0 bicycle ecex13DrS00_1 bus ecgqb4spDo0_0 cow eclnV3fwFVg_0 car ecndV9N-b9M_0 boat ecrgwn6gB7c_2 person ecrgwn6gB7c_0 person ecrgwn6gB7c_1 person ec0L5W9HzYQ_0 person ec0zPF4t8jM_0 person ec10-YUa1PE_0 person ec4Mjwm2hyQ_0 person ec4ya7ogbFU_0 person ec59VG2krTI_0 knife ec7hzm4ZgOM_0 bus ec8daVdUMW8_0 elephant edErePLiFl4_0 truck edFb7FxjVPc_1 person edOvHaEGfM0_0 train edO7Q7znUJA_0 cat edPmPMqUt4c_1 person edPmPMqUt4c_2 person edS79MnRXwE_1 person edYcGdD4UGI_0 person edd8R4oDMdg_0 person edlAlkitTfg_0 bird edlAlkitTfg_1 bird edq1Zw1FWGY_0 person edrtSs6UdCI_0 boat edtqJ_N0258_0 person ed0O35MjM6Q_0 cow ed5jfyH6JyI_0 person eeEjRmROBZs_0 train eeEjRmROBZs_1 train eeJDVUC0bio_0 bird eeV0a3p0uz8_1 dog eeYr-ujfh4Y_1 person eeYtwUSuQzY_1 airplane eeYtwUSuQzY_2 airplane eeYtwUSuQzY_0 airplane eeZyIsjtgj0_0 train eeahFaPbx5M_0 skateboard eea6uRdJLL4_1 bird eee-1I8uLeU_0 cow eefTfPIGkq4_1 person eef-qkyU0jY_0 boat eepn_UxMI5o_0 skateboard FoFA-VOPhV8_0 zebra FoIc9MjzbBk_0 person FoSynLz7aJ8_0 horse FoSynLz7aJ8_1 horse FoUqmWxXlNU_0 person FoUqmWxXlNU_1 person FobAHnW_q6s_0 person Fog-McdMlO0_0 person FomH9b8uRKs_2 knife Fot4m5WU4Aw_1 person Fot4m5WU4Aw_0 person FouVJvkYyPs_0 person FpCdNHknwMQ_3 car FpCdNHknwMQ_5 car FpEzn8x46OE_0 bird FpGO4RTCIuk_6 bicycle FpGO4RTCIuk_0 bicycle FpGO4RTCIuk_2 bicycle FpGyjKY-NIk_0 motorcycle FpGzMvzCvKo_0 person FpI0Do5LaU8_0 person FpTdRnuOS8M_0 person FpTdRnuOS8M_1 person Fpaob2f1sqE_1 person Fpaob2f1sqE_0 person Fpev0w7vGO4_0 person FprxIVYXUL4_0 horse FpzpuYeDf6M_0 bus Fp1vbL5guA0_0 person Fp2HgWZlr2k_0 person Fp7RJqXwz6c_0 person Fp-TG2XDrC4_4 car Fp-TG2XDrC4_0 car Fp-TG2XDrC4_1 car Fp-TG2XDrC4_2 car Fp-TG2XDrC4_3 car Fp_5yBxyvR4_0 umbrella FqFhpogmR2s_0 cat FqHStgmNnKA_0 bicycle FqHStgmNnKA_1 bicycle FqTHQ5KBbaY_0 elephant FqjhuAhttZw_2 train FqjhuAhttZw_1 train FquAMi_ikSA_0 truck FqxWiT-6dLM_0 person FqxZmvVkHIA_0 giraffe FqxZmvVkHIA_2 giraffe Fqx-wOpqzZo_0 airplane FqzYUW3X9pc_0 person FqzYUW3X9pc_1 person Fq_esHSu_sk_0 person FrC2HuRBsYA_0 person FrC-Gp1GmVw_0 cow FrC-Gp1GmVw_1 cow FrIO6gNGeao_0 person FrUCytgm6sM_2 horse FrViqM6fVR0_0 dog FrVxG6x7tj0_0 knife FrVxG6x7tj0_2 knife FrgvokGeeds_0 person Frk0tcM1o_w_0 person Frm5N8YRz_E_0 person FrpsbU7nO00_0 person FrxIGKawDiA_0 person FrzgyfVukw4_0 person Frz8huGrR4M_2 motorcycle Fr0K__Q_Kv4_1 bird Fr2qdnHURF4_0 boat FsHjWJUILr4_0 person FsScYp1HNk0_0 person FsXYM3nf7O4_0 horse FsZyoaRLGfw_1 person FskWl7cTGUU_4 motorcycle FslFjbzL4rY_0 train FsuA_2-7e1w_0 elephant FsuA_2-7e1w_1 elephant FsvwyL1hLDU_0 bus FswGt3qhUXE_1 horse Fs6Lk0xDsWk_0 dog Fs6Vua80iU4_1 bus Fs-DmOC6Ksw_0 person FtAgz58w2vs_0 person FtC0Y3Dca60_0 dog FtD8uBgTi3E_0 cat FtJ8y0gIpKg_0 dog FtJ8y0gIpKg_3 dog FtJ8y0gIpKg_5 dog FtJ8y0gIpKg_1 dog FtJ8y0gIpKg_4 dog FtMshKheG8Q_0 elephant Ftet3EW_gR0_0 skateboard Ftet3EW_gR0_1 skateboard Ftj_1qTEwE8_0 cow FtqLCjhRQgQ_1 person FtqLCjhRQgQ_0 person FtwMaVMlLbM_0 person FtwZasadNWo_1 person FtwZasadNWo_0 person Ft3Xr78g1jg_0 dog Ft4RUB75d64_0 horse Ft5ZV3L5LV4_0 person Ft8VPp_VNJs_0 knife FuCuNV5vL-8_0 person FuIIvsD7qyY_1 person FuMf00RPDmg_0 bear FuNvDTe7cAM_0 bird FuR3p7f2R30_0 person FuTf8iiIHWI_0 motorcycle FuVQuZfX71w_1 elephant Fuc49AUfyaA_1 train FufG8eRehvk_0 person FuoKMOMcl0I_0 bus Fu3A7S4V26Q_0 person Fu4p4U9AqY4_0 train Fu5TDXXdHyc_3 train Fu5TDXXdHyc_0 train Fu5TDXXdHyc_1 train Fu5TDXXdHyc_2 train FvB0FA24g0c_0 motorcycle FvD-5pXN6B4_0 person FvF8CGSAVBw_0 horse FvIqBpjD4A4_0 person FvKJQTsxS6o_0 bus FvNiWF5wWJA_0 truck FvN6HD0c3I8_1 bicycle FvQ8wYSFAhA_0 bird FvZ_lMA5MYE_0 person FvZ_lMA5MYE_2 person FvZ_lMA5MYE_1 person FvcxD9PJ1-g_0 bear FvcxD9PJ1-g_1 bear FviKCn2JGbY_0 person FvksDxENves_0 bird FvksDxENves_1 bird FvmW4A9wN1c_0 person FvslrkU6Ii8_1 skateboard FvslrkU6Ii8_5 skateboard FvslrkU6Ii8_4 skateboard FvuJoToFsZ0_0 skateboard Fv2LjW2C5SU_0 knife Fv2LjW2C5SU_2 knife Fv2SAN8CNlg_0 horse Fv6OQz_y5V0_0 person Fv80QjBLyXw_3 train Fv80QjBLyXw_4 train FwBCZ90I_aw_0 cat FwIN5LlmnSA_0 person FwMy9UR3xJA_0 person FwMy9UR3xJA_1 person FwNHDlUxkVE_0 person FwSQA6A_bWE_0 person FwZzzptQg0s_0 person FwZzzptQg0s_1 person Fwf5SGfOguQ_0 bird Fwf_1L-RQB4_0 cow FwhmGtqpt5s_1 skateboard FwrkNuHACuE_0 person Fwtyj6Ut62E_2 dog Fw8NHywJSJw_7 airplane Fw8NHywJSJw_8 airplane FxHZCFGlLk8_0 truck FxI0-u_zPQQ_1 skateboard FxI0-u_zPQQ_0 skateboard FxJg66y6Vj4_0 person FxJ0douRc4s_0 person FxMnA-aNvVI_0 knife FxXVgnAjOCs_0 person FxXVgnAjOCs_1 person FxitbyLzBbw_0 person FxmfshFrhyg_0 person FxmfshFrhyg_1 person Fxp_EDLEylo_1 bear FxxuVRsJiCQ_7 bird FxxuVRsJiCQ_9 bird FxxuVRsJiCQ_11 bird Fx74SXbZiUI_0 boat Fx-8EgSEaDg_0 person FyFea2NifCo_0 elephant FyKB3iEKNlg_0 person FyO1UliwWNQ_0 skateboard FyQulDaVp8I_0 person FyTFrxalrzY_2 bicycle Fyb5_PxuzrI_1 airplane FyjgIZnRT0A_0 person FylDI9Ssx18_0 dog FyqooE73pSs_0 train FyuLo6pvAxk_0 person FyuLo6pvAxk_1 person Fy6UODQTxBw_0 dog Fy8cULzM424_0 person FzJOOqEWb48_0 person FzP8vDH_ynM_0 bicycle FzV_56qru4c_1 person FzV_56qru4c_0 person FzaaAJ_dGjI_1 dog Fzc4L1eWvQ0_0 knife FzeiG746wec_0 person FzoJlCfL5bc_0 person FzpV3zrU7w0_0 cat FzufL9SIDZ4_2 person FzvLoCiUbCU_0 person Fz4RMW4ONrQ_0 skateboard F0Ekv-HAlnk_0 airplane F0G64yaBMBM_0 person F0G64yaBMBM_1 person F0I59IAm-vo_0 person F0Qk5fG3X-M_0 person F0Q9zBIa4vg_0 knife F0Q9zBIa4vg_1 knife F0Q_-7qxWws_3 elephant F0UBtRxGNhA_5 bird F0XjqeFLlgU_1 bird F0ZAshDVPxg_0 person F0c4qnJQtDU_0 bear F0gFV3Zl1ew_0 train F0gFV3Zl1ew_2 train F0hx5kgZ3go_0 elephant F0mBUyvb90Y_0 person F0qXU9y4p-Q_0 person F0z1cmfnPsQ_1 bicycle F1B_Y1twDK0_0 cow F1CZ2DPXJ9M_4 bicycle F1CZ2DPXJ9M_1 bicycle F1KHVI6XeVo_0 person F1eNAhwM5Pc_0 horse F1jGg9828BI_0 person F1j27LEBSpI_1 car F1qXLHQywDc_1 elephant F1sQlUVWZLM_0 person F15XLgp6ED4_0 skateboard F2Bb2pFQRyU_1 person F2EV6W4vdT8_0 bus F2GhztG-3ZM_0 cat F2HupbPd4Rc_0 person F2JDbaIJXuM_0 person F2JeBrL43Kg_0 person F2JnnpLll3c_1 horse F2Kd_wTgfHc_0 bird F2N-fmDDyCs_0 train F2an_w-D4WM_0 dog F2bbT3y10lk_0 person F2dx02YK1MY_0 cat F2kBHcrY7Ck_0 person F2nvlBMOvGc_0 boat F2nvlBMOvGc_2 boat F2nvlBMOvGc_3 boat F2nvlBMOvGc_4 boat F2yvXHbr1Us_6 bird F2yvXHbr1Us_7 bird F20W1m4x2Ys_0 person F20_Ihwr_1Y_0 elephant F21R2kQ-je4_0 person F2244CO9Fuo_0 airplane F250PqK5Gb4_1 airplane F3AMItpIJlI_0 dog F3AMItpIJlI_4 dog F3FUBdTgY7c_0 car F3FUBdTgY7c_1 car F3Lz3rnQ-7A_0 person F3Lz3rnQ-7A_1 person F3NneLgyZiU_0 person F3RkQzIQjeU_2 bicycle F3XFJeSjPDU_0 bird F3XFJeSjPDU_3 bird F3gY7oCc-j8_0 cat F3j318NP2P0_0 person F3j318NP2P0_1 person F3oP1Se_HdQ_0 motorcycle F35JtGCIiCo_0 dog F377W3trtdg_2 dog F4DJmxH-fuw_0 skateboard F4FXVb3DdJE_0 person F4FXVb3DdJE_1 person F4HgVMHEiVQ_1 bird F4Ja9TDp5eg_0 person F4R1rt0I4Ik_1 person F4R1rt0I4Ik_0 person F4WWEXEO6Cw_0 airplane F4hUo05eI2s_0 person F4hVb1AsJ9M_0 umbrella F4hVb1AsJ9M_1 umbrella F4hp-2UBFcI_0 person F4l8U4NGPMU_1 elephant F4rQJlBkGa8_0 person F4tzOjT91r0_2 elephant F41NWCYabpM_0 person F44j0JHVdfU_2 bicycle F44z7XXoIZk_0 cow F4-R6x6hSno_0 airplane F4-R6x6hSno_3 airplane F5IEcbmSBiU_0 person F5UiBt9FiQ4_1 truck F5brWxznDYA_0 bicycle F5drV0qDFvU_0 person F5pSgana5Ds_0 person F5pwABHMaZM_1 skateboard F5y_lQCCiYk_0 person F51aHL_AuQ8_2 person F51aHL_AuQ8_0 person F51aHL_AuQ8_1 person F54NzXjey4Q_1 person F6AkwJu9acQ_0 person F6BUhbvKAY0_3 bear F6I3hGIdHBM_0 airplane F6L1DckOdFs_0 person F6L1DckOdFs_1 person F6L1DckOdFs_2 person F6UTU1zVfY0_0 person F6X-PDReV8U_0 skateboard F6uVxnnSkQg_0 cat F63FWqs6n6A_1 person F63OB46zw20_0 person F66U-dCKTVs_5 elephant F67kQb83GEo_0 person F7Aw74QT7I8_0 motorcycle F7D1ccHfWQM_0 train F7GYFMuRxr8_0 person F7MruF3gqRk_0 person F7MruF3gqRk_1 person F7M2n9Irv10_0 person F7adrDrejOI_0 bicycle F7adrDrejOI_3 bicycle F7adrDrejOI_7 bicycle F7iFGXShjIg_1 knife F7lmwAhsTVE_1 cat F7lmwAhsTVE_0 cat F7wyUoc1ELM_1 person F7wyUoc1ELM_0 person F72e40LPG8g_2 airplane F72e40LPG8g_3 airplane F72yH9hRoS0_0 person F77I6mkMOmM_0 person F77I6mkMOmM_1 person F77WzfDD-Ac_0 person F77WzfDD-Ac_1 person F8VZcw3-DMg_0 person F8XbiaxQYFA_0 car F8kTGPYH29o_0 airplane F8sVrU5FfZw_0 person F8vyo42LQM0_0 airplane F9KIXBo3lNI_0 bird F9KIXBo3lNI_1 bird F9WnfUhb8A4_0 boat F9hhOJk3fdY_0 person F9jiY40SX4g_0 person F9kDOaogdPA_0 train F9kDOaogdPA_1 train F9nirQJj4wc_0 motorcycle F9qYvrO4nMM_1 person F9qYvrO4nMM_0 person F942FTRne2Q_0 person F95fIsG0A7U_0 horse F98XVAomn1s_0 person F-AROt5V1zQ_0 airplane F-L2byRMMEI_0 truck F-QpXlvCAdw_0 giraffe F-RVugkjZ1k_0 person F-RVugkjZ1k_1 person F-dxzMmjOT0_0 person F-dxzMmjOT0_3 person F-dxzMmjOT0_1 person F-poowwxrxU_0 person F-3G1FhnsdY_2 cow F-3G1FhnsdY_3 cow F-7EAK7rTI8_0 bird F_AoZsBu8j8_0 person F_AoZsBu8j8_1 person F_BBB0J-9tQ_0 motorcycle F_CsG_jIxC8_1 truck F_I4rwh1mtE_0 person F_JJmqKJBnY_0 person F_Kw8qyfgjU_0 person F_WtOi2ZeSE_0 umbrella F_oxJfyCUrw_0 person F_wVAS7hR9E_0 cat F_5NdFCcCrQ_0 airplane F_59LD9YnAU_2 person F_8qVC7MHM0_0 person GABXImD8qwM_3 dog GADBGhd7Hbc_0 horse GAF3BbJqKos_0 person GAF3BbJqKos_1 person GAGFuwQyn2A_1 person GAVdXzEftIU_1 person GAaPJd_iVeU_0 train GAb6ZqG64o4_0 person GAb9NG_JnoU_0 cow GAe7SnwoPQQ_1 airplane GAg-aVsz7AI_1 person GAinaDnPPO0_0 elephant GAnYrNhN90c_1 person GAoDRtFNSeQ_0 bird GAoaBt8kfHQ_0 train GApyoyRTlPk_0 person GArUrBTpgzk_4 airplane GArUrBTpgzk_1 airplane GArUrBTpgzk_3 airplane GAzsUwyCRAI_0 cow GBF7wVda328_0 dog GBLwQswYGpQ_0 dog GBUiAfFHr8o_0 person GBYAc4swbr8_0 person GBYFzcFWKtI_0 skateboard GBYeOSgHxaw_1 person GBhV-vm_cDs_0 motorcycle GBhV-vm_cDs_1 motorcycle GBhV-vm_cDs_2 motorcycle GBhV-vm_cDs_3 motorcycle GBjWoHEvi24_0 truck GBnf-AAsQts_0 person GBvWcmiB_zQ_0 person GBv60Rpf6hA_0 person GBwqR6gIUJk_0 person GBwqR6gIUJk_1 person GB0RUQ72TDU_1 motorcycle GB0RUQ72TDU_2 motorcycle GB0RUQ72TDU_4 motorcycle GB1A1gXLxF8_1 umbrella GB1A1gXLxF8_0 umbrella GB2Z9Zd9kCM_0 cow GB3M7jlJvZo_0 umbrella GB3dD_Sz5yA_0 cow GCECUCM275I_0 truck GCECUCM275I_3 truck GCECUCM275I_4 truck GCECUCM275I_1 truck GCECUCM275I_2 truck GCHyhn505e4_0 person GCL5aSCyDAQ_1 horse GCR8piyI8to_0 person GCdYlCKelqg_2 bird GCf79ImcoV4_0 truck GCiR2DBKEUo_3 umbrella GCiR2DBKEUo_0 umbrella GCyZCLCX4jI_1 bus GC5X3-Zi5fo_0 bear GC_4PRhWwy0_1 person GDBvvswiioY_0 horse GDErDO6sQxg_0 person GDHukw9i8AE_0 bear GDPBufHJ6pE_0 person GDVxjq335kg_0 person GDVxjq335kg_1 person GDW_ebhUmXg_0 person GDeoeNk-jj8_1 train GDgRHR5rt5g_0 dog GDhVskUd-i0_0 truck GDkTfXax1EI_1 person GDr1CfMsWCo_0 knife GDyR3j6e9uU_0 bear GD0qZhFYMtE_1 bear GD5H2vUIQUM_0 bird GD7nVz18opA_0 cow GEC16HE9LPs_0 skateboard GEK0W7Soe5I_0 person GEOILdSs_m4_0 person GEXtPkuLXV4_0 person GElPgxFGsYM_0 person GEmM96O2bm0_0 person GEoAqEILC5I_0 bicycle ee4MHg5K9xo_0 person ee4MHg5K9xo_1 person efANTTg0s7E_0 person efD7irKhsjg_1 zebra efFDVTrJnI0_0 person efQ-zUFNN-U_2 airplane efQ-zUFNN-U_3 airplane efQ-zUFNN-U_0 airplane efQ-zUFNN-U_1 airplane efUVmXxR3pI_0 person efXikRhGmrs_0 person efdHHLZ3g1Q_0 motorcycle effHbT0DhsY_1 horse effHbT0DhsY_2 horse effHbT0DhsY_3 horse efj0ZypW97U_0 person efl9qpSfN9o_0 skateboard efo_cgnnucQ_4 knife efqCl5PWA5Y_2 bear ef6fQWU1KdY_0 person ef9zPCUJ5uQ_0 boat egByT16s_54_0 person egByT16s_54_1 person egHnmalt3d8_0 horse egQiifLgKHE_0 person egVsaW3pIR8_0 bus egotrU2sxIs_1 cow egotrU2sxIs_0 cow egymuz3YUjw_0 person eg0xHA2KO2M_0 car eg0xHA2KO2M_1 car ehAg6V-5Puk_0 airplane ehB-VoBE8As_0 person ehFoBFIrRho_0 person ehFvz7g6tcc_1 person ehFvz7g6tcc_0 person ehF--LpGjPU_0 person ehI3hX4P2gg_0 bus ehSU0TuduDM_9 boat ehSU0TuduDM_0 boat ehSU0TuduDM_3 boat ehSU0TuduDM_7 boat ehSU0TuduDM_8 boat ehTOHuz8De4_0 horse ehhoOXi21uc_0 person ehhzn87_kyY_0 knife ehpsJCYWhMo_0 dog eh0-hoyeQv4_0 person eh383O3j2o8_0 train eh8ClQx55Pk_0 elephant eh8ClQx55Pk_3 elephant eh8ClQx55Pk_1 elephant eh-Hpgj7SPM_0 bird eiIxHOvvvog_0 person eiKfZPTeN-M_0 person eiMVAVfFk50_1 giraffe eiNlPbSqaQM_2 bear eiOC7H2_I7E_0 motorcycle eiYV7UFe9_4_0 person eiZm5CglnLc_0 person eiirsESzuHs_0 bicycle eim8NPBqZXg_1 person eis2vlxPtf4_1 person eivFKGFBySc_0 person eivMnaQyUKU_0 person ei0PFx0qNIQ_1 person ei0PFx0qNIQ_0 person ei4Yn0KXnAM_0 person ejDpzIUHAMk_0 person ejD4KjqrkFo_0 cat ejIMw0_a1Zo_0 person ejIMw0_a1Zo_1 person ejVKT8cDDTY_2 motorcycle ejoDQZqi4DU_0 person ejsflVtvinE_0 dog ejzqfqBU2XY_2 horse ejzqfqBU2XY_0 horse ejzqfqBU2XY_1 horse ej5D22-gpzY_0 person ekBhYo1n09M_0 person ekGn7Al_5S0_0 person ekOQkNLi9gA_0 person ekPQmhXqsJs_0 cow ekQPPxQDQrA_0 bird ekYErFjRBcY_0 person ekaQzIhIz6U_0 person ekhId7QWajE_0 person ekw22HGT0TY_0 person ek6F1Yy6r4g_1 person ek6F1Yy6r4g_0 person ek9m3wFRD78_0 motorcycle elAJmgZ3uV8_1 person elIopJ6sLS8_0 motorcycle elS7CV83kDQ_0 cat elbH9USSXbU_1 person ele_x5If5RM_0 cat elfDIDNaxO8_0 bicycle elfDIDNaxO8_1 bicycle elk9Eg_zAzA_0 horse elwOqTHVPb4_0 car el_1tnvsCAY_0 elephant emAlGe0D2Ro_0 car emBk5WfF9MA_0 person emFvwwYH0Dk_0 person emLp02HobE4_0 person emO2DsNKmTw_0 train emVjapACNME_0 person emWHcaPL5H0_0 person emXkTzHEyT4_0 boat emhCPyXIbNk_0 person emqrQO4JZsU_1 skateboard emxIavKneZw_0 person emzfRpng4hM_0 bicycle em3XyVBpKCc_0 train enA3HVeW4MM_1 person enCpXewY40c_0 truck enCpXewY40c_1 truck enR0OQhVBwE_0 person enWAeU6n9LQ_0 person enXS9AGUoow_0 motorcycle enY96p1ZALE_0 knife enfPrTim6AU_0 cow engcDIwacLg_1 person engcDIwacLg_0 person en06DIx0cz0_1 person en06DIx0cz0_0 person en6AOaqCY1s_0 truck en9gUgAJoek_0 person eoFFf1yMhOg_0 person eoauVNDdle8_0 person eodvToXk2OQ_1 cow eodvToXk2OQ_0 cow eohpHQHPoXo_0 dog eovUEztTVZ4_0 person eoyj6UfwM1c_0 airplane epIcFi7yUZg_3 cow epK_YUgNzQI_0 cat epUTWEmTW1o_0 bus epXYWAgJeJM_0 person epZSAxAzWRs_0 person epeLK68bI3k_0 person epeLK68bI3k_1 person eph8ACa_bv4_0 person eph8ACa_bv4_2 person epis0oQPudE_1 person epu8oDLyhBw_0 cow epxbwMupoU0_0 truck epxxfkiUpVQ_0 person ep15pnX1AxU_0 truck ep4od2aZYv8_0 dog eqAMk_GzwUg_0 truck eqMRouLMQI0_0 person eqPXFnE2SxE_0 person eqTdm4-YomY_0 train eqWb0eTMl98_0 cow eqiPG6XAei8_1 person eqiVR6aa8XA_0 person eqnF1_Lwa94_0 motorcycle eqswu7XtVeE_0 boat eqswu7XtVeE_1 boat eqvu61eQ-D0_0 person eqwZeHPEjT0_0 bus eq2VUeTEEGM_0 elephant eq2VUeTEEGM_1 elephant eq2-yJIiWyA_0 skateboard eq7fzAhOZEo_0 person eq8-99wqpC4_0 motorcycle eq-XVpUOFlQ_0 cow erDb15O0GYM_0 person erIMuEor6gc_0 person erJzcEpQ-sA_0 person erKEWcCPgjU_0 person erKRZXMcCzQ_0 bus erLW6pBgIrE_0 person erLW6pBgIrE_1 person erWerfoGejo_1 dog erZ0-WmkPj8_0 person erfJrdfPp8M_0 truck eri-jOmjJ5U_0 person erprzr0GCa0_0 person errX-c_luf8_0 horse erwHbfRwbDc_0 train eryYeuoNAdw_0 person esEKixC0bi0_0 motorcycle esFUx8MS7FU_0 person esFUx8MS7FU_1 person esHEHZv3XAw_0 person esdMTvdz7G8_0 person esd9prHEDmY_0 cat esnr6cTpfQI_0 skateboard esnr6cTpfQI_1 skateboard esrkVh27SSg_0 giraffe esr3dKZtZ9I_1 person estRADheTso_0 person esxEV1BYf8g_0 dog es0lurDiGrM_0 truck etCrz_vcvJI_0 zebra etFtHhL2hac_6 bicycle etHjccaFHjw_0 person etZXvy6wqZM_0 cat etZjkcz1NXE_1 person etfOefeQ0NA_2 knife etgjVXNON5k_0 person ethiyhktDW0_0 train etrQY3yeg8M_1 person etrQY3yeg8M_0 person etu6chaT_o0_0 motorcycle etu6chaT_o0_1 motorcycle etu6chaT_o0_2 motorcycle euNO4mGjpL4_0 person euS2rEsG-jA_0 person euaiFpmh6SU_1 person GEuy-JvOFBM_0 horse GEwLV10zHSM_0 person GEwYE_QVNHE_0 boat GE061if8j60_0 horse GE8D0jEjasg_1 bird GFCN_4akSi4_0 person GFMwf7Ly_Sc_2 person GFMwf7Ly_Sc_0 person GFN08ryY-U0_2 knife GFTwQgse_Lk_4 knife GFXh14V5BN0_0 cow GFkCQFowcfs_0 person GFkCQFowcfs_1 person GFlTNatYs1E_2 horse GFlTNatYs1E_0 horse GFmBVLxS0W4_0 person GFsVA4Rxqv0_0 cow GFtZEmPze30_0 person GFytNaOS7eE_0 boat GF28RuK9Mio_0 person GF28RuK9Mio_1 person GF29WU5hVFU_1 umbrella GF29WU5hVFU_2 umbrella GF4b86WLzWE_0 person GF-zdmzb4zY_0 bus GGBhXIkXN-U_0 dog GGCSOyr8iNg_0 cat GGNkUcwxgU0_1 airplane GGX2r0RT9h4_0 bird GGY5BDDn5LE_0 person GGtf7t-SVb0_0 person GGytoCC23B4_0 dog GG2kiaUm9pg_0 person GG_CxOFs69U_0 bicycle GHAR-041e4w_0 person GHF_00q4fw0_0 person GHN9eBe1Bp8_0 knife GHWPuquucrM_0 cow GHZjWHKMwyw_1 truck GHqedSEAQ9k_1 person GHqmzbJnjVg_0 person GHu-Q-Jbh6E_0 umbrella GH_-l0dCs1A_0 truck GINmKyxk55E_0 person GIOByl4-GaE_0 person GIQcZHeI0rA_1 knife GIRWosek2kk_0 person GIesL1NmKrU_0 airplane GIiKoRSDN-Q_0 skateboard GIiKoRSDN-Q_1 skateboard GItE5rGj_-g_0 person GI0iwCtSgJY_0 person GI7YeWGyVRM_0 horse GJAe8ctAWb0_0 person GJHbNDEY178_1 person GJHbNDEY178_0 person GJIPOsnsWAg_0 person GJIPOsnsWAg_1 person GJL8p4_PeKo_0 person GJMk0Meedm0_0 person GJbtzWK_dYk_0 person GJpkQJ1A6Gw_1 cow GJy5Zhvk6lE_0 person GJ1O_aGTN94_0 motorcycle GJ4kWS7SklQ_0 person GJ7mp6eUiPg_0 car GJ9641JuJGs_1 person GJ9641JuJGs_0 person GKCr5DPt-O4_0 car GKC9zObtOMM_0 person GKEhy910De4_0 train GKWJ0lgaDCg_0 umbrella GKWJ0lgaDCg_2 umbrella GKewJtAM0mQ_1 person GKewJtAM0mQ_0 person GKhEkZ-cdNQ_0 train GKlP0uncbyg_0 person GKlP0uncbyg_1 person GKlP0uncbyg_2 person GKlP0uncbyg_4 person GKmEvD6kEV0_0 bicycle GKn-IcumftE_0 person GKpcLh6EzTI_0 truck GKs6SswOMow_0 skateboard GKyR_cV3NzE_0 bird GK1HKUicpqc_0 person GK7khWET2AA_0 person GLBHzmRhRXw_0 person GLCLinUtVWM_0 person GLJJdMPYSaY_0 person GLLgtpj5VIc_2 elephant GLLkz3ew2Cw_0 person GLN48vyNNE8_0 person GLOfyCC7cpg_1 person GLOfyCC7cpg_0 person GLTbuhg3c9c_0 cow GLTcmtEP3PQ_6 person GLTcmtEP3PQ_0 person GLTcmtEP3PQ_1 person GLTcmtEP3PQ_2 person GLTcmtEP3PQ_4 person GLT0qdbJFmE_0 person GLYc7lsUKvQ_0 cow GLemLQ7Taz4_0 dog GLiiNf5XBGw_1 person GLnBX7vZMds_0 car GLncyVpSovs_0 person GLonpYW6Yi8_0 person GLsxpYW-07A_0 person GLy3RuBdLZ4_0 giraffe GL2K160VZnM_0 airplane GL5i6mrfwJQ_0 person GL6eTReYh8E_0 giraffe GL7g579uon4_0 bus GL_EwiiBm1A_1 person GL_EwiiBm1A_0 person GMCQFxoF1UE_0 bear GMJi6djWGYg_0 elephant GMLP7F_Da2w_0 person GMVqWicQ2d4_0 motorcycle GMeN9Z1A9X4_0 car GMj9b1A2R98_0 bus GM3BiiUS2Xw_0 cat GM31sVP8NMA_0 elephant GNJ088XwXpI_2 skateboard GNLzZ4OPnHc_0 boat GNLzZ4OPnHc_1 boat GNN-BevC79g_0 knife GNRZ4AjoiSE_0 airplane GNawMpiTEFs_0 person GNnrNuC9zGU_2 person GNnrNuC9zGU_1 person GNqCvE7d9mE_0 person GNr1nF-F-40_2 boat GNvEs3KBgRw_0 person GN97F0ERx8k_1 person GN97F0ERx8k_0 person GOE3QOj97xk_0 person GOLZ7CWDXjk_1 person GON778LYTqk_0 person GOQICMUoGL8_2 person GOWRiwkZo2U_0 person GOW84-_w-LQ_0 bicycle GOZwEuPDmzc_0 person GOb0e4ojb3c_0 airplane GOkeNGfFi8Q_0 person GOkeNGfFi8Q_1 person GOpAs6aca30_1 person GOpAs6aca30_2 person GOrO-A4yd5c_0 person GO0RyAWdVQA_0 person GO1tmJmOjZU_0 cow GO9YRVC_2SA_3 elephant GO9YRVC_2SA_4 elephant GO98cqZbP2o_0 car GO98cqZbP2o_1 car GPABD8HFpQU_0 skateboard GPCArlk4udc_0 bird GPHwY1J1u04_1 cat GPLKI0foxxc_0 person GPUUqd1IyNA_0 dog GPUdCDtaGOQ_2 boat GPViSMkz1ds_1 horse GPViSMkz1ds_0 horse GPZznxc87vA_0 cow GPlHiCxNeIU_0 person GPnO7jt_-JI_0 person GPn2JSguaBI_4 umbrella GPn2JSguaBI_0 umbrella GPn2JSguaBI_1 umbrella GPtN0Kb9qZs_0 train GPzwYc908OM_0 bicycle GP2YaQXsf0s_0 umbrella GQJu2FlmC0A_0 knife GQRDl6gw-n8_2 bear GQRDl6gw-n8_3 bear GQV1QfplpXU_0 person GQ6mrqpELDs_0 person GQ99sfZjwTo_0 person GRMv9irLuQw_0 motorcycle GRQUwn0jA8Q_0 person GRRXv9O7hNk_0 motorcycle GRRullNXQUY_3 skateboard GRTcBPmHWPU_0 motorcycle GRjf8G-WDvc_0 person GRk94EZiwO8_0 skateboard GRo9Bmi4ghA_0 cat GRwCcOF0NyI_0 train GRwCcOF0NyI_3 train GRwCcOF0NyI_1 train GRwCcOF0NyI_2 train GRwvd8Xl-l0_0 bird GR5qTAjCnB4_0 cow GSD3hdUWKNg_0 person GSD_Asi3tsA_0 elephant GSD_Asi3tsA_6 elephant GSIFRlloCGA_0 cow GSMYNBUuI74_1 motorcycle GSb8ilGRCd8_0 umbrella GSkpDZZFQd4_0 boat GSmR-G7zCN0_0 airplane GSqatXKKzUU_1 boat GS1El_XLryU_3 bird GTaW87cQCZk_0 bird GTegSO4BiDY_0 person GTgztSxvdzw_0 horse GTg35QGB0bQ_1 person GTg35QGB0bQ_0 person GTjqtTiUFFA_0 person GTkZ7eZIV5I_0 skateboard GTpF9CW8Kyo_2 cow GTpF9CW8Kyo_3 cow GTpF9CW8Kyo_0 cow GTpF9CW8Kyo_1 cow GTt9sqczKqg_0 person GTuP3gwjf70_0 person GT4askC-EmE_0 skateboard GT4askC-EmE_2 skateboard GT6Ta63CfGc_0 bus GT7pB1SoSWQ_0 horse GUA64cJx_1s_0 person GUG7toTLyt4_0 bear GURTVjQ25hM_0 airplane eufhHTT-6cc_0 person eujtr13Kbtg_0 cow eutsycO_2Zw_0 umbrella eu0WWqOzPNI_1 boat eu07YiPAVxk_0 truck eu6zY6HpY1M_0 person evA7SzcjAkU_2 knife evA7SzcjAkU_3 knife evA7SzcjAkU_0 knife evDr0RJRRV8_0 horse evMMyqn2S94_0 person evRaMSC7xlI_0 train evVOgDU7DsE_6 truck evcE8ru07G8_0 umbrella evcWn6cN50A_0 umbrella evhP2M5P0rM_1 person evksM4sehcQ_0 cat evtk4IiqjkM_1 person evw-tqTTtQ8_0 horse ev1ATOeJPxY_0 person ev1ATOeJPxY_1 person ev53NALjp3I_0 person ev7a6Z-ZOv4_0 person ev-fVsUuvfA_0 person ewB46nb-ZFI_0 bird ewFZmQCCZm0_0 truck ewFZmQCCZm0_2 truck ewOgoCimrdA_0 elephant ewUWpmdjLHA_0 bicycle ewUWpmdjLHA_2 bicycle ewgdEY7GtsQ_1 airplane ewkBRzmoZzo_1 train ewkBRzmoZzo_2 train ewkeB8zzSVE_2 dog ewkeB8zzSVE_3 dog ewkeB8zzSVE_1 dog ewoUjWEEJS4_0 dog ew9rbdv73TA_0 umbrella exR3lT_G3Yk_0 knife exZF88kJoP8_0 person exjWaQ0ssbM_3 airplane exjWaQ0ssbM_0 airplane exjWaQ0ssbM_1 airplane exn-_MfEP6Q_0 person exoNfV0vU_Q_1 person exoNfV0vU_Q_0 person exw_qJh1qp8_0 cat ex6Il_1Ielw_0 motorcycle ex7mPB9cYwc_0 person ex7mPB9cYwc_1 person ex-yo1W_s34_0 skateboard eyAxkbxVdHA_0 person eyAxkbxVdHA_1 person eyNJXyldIhM_0 person eySeJsY8tZU_0 horse eyZeTi4-udw_0 boat eycvZhhuzOI_0 person eyd3cO1cRyw_0 person eyg_dFAAJ_c_0 umbrella eyi_kSPelbM_0 person eyo2iTfyALs_0 cat ey49lNbkqdQ_0 person ey7evH7qmFA_1 person ey9CIllx21w_2 truck ey9CIllx21w_5 truck ey9CIllx21w_8 truck ezOxb6H18Dk_0 person ezX_8NsARn4_1 person ezYCeDV1Aew_0 bicycle ezam_iANUkY_0 motorcycle ezdehi1wmW4_0 cow ezktd-PtOQo_2 horse ezktd-PtOQo_3 horse ezrNhnjWp-s_0 person ezrNhnjWp-s_1 person ezu6OcJjjLk_1 person ezvAmpvi364_1 person ezyLlrEVZRU_1 train ez4u6-2yh8U_1 person ez7mJtg4aoU_0 cow e0Al-yQwL8w_1 bear e0C174hEUpI_0 person e0HCj6FnKMo_0 person e0HrgDMAL5c_0 boat e0K-Wc2SGSk_0 person e0V--elE2Dc_3 boat e0V--elE2Dc_0 boat e0XejLvBbTw_0 motorcycle e0dXS2okSxo_0 train e0jUh6hQykw_0 person e0jUh6hQykw_2 person e0kJTvItoXc_1 person e0kJTvItoXc_0 person e0qJxStHuGA_1 skateboard e0rXPv5Q8ac_0 person e1KQ3rXcBVg_0 airplane e1KQ3rXcBVg_2 airplane e1KQ3rXcBVg_1 airplane e1S7tY6zlBs_0 bus e1ZNGYPt280_0 cow e1a0tLtZdm8_0 person e1dAdTW0-s8_0 person e1guDr5Lq88_0 person e1iYijyYnIc_0 person e1iYijyYnIc_1 person e1v5-Vy3ikU_0 motorcycle e11u2SRsMQk_0 umbrella e110Ssoc3rc_0 horse e2Biqc_Y8fI_0 boat e2Biqc_Y8fI_1 boat e2C6vpxx1BQ_1 person e2C6vpxx1BQ_0 person e2DeceLJ4QU_1 elephant e2DeceLJ4QU_0 elephant e2DmJ2nN-bM_0 person e2DmJ2nN-bM_1 person e2IXk3LUK0k_1 truck e2Jc499uBac_0 bus e2MbvKCUxBQ_0 skateboard e2oWEimFUeM_0 boat e2oWEimFUeM_6 boat e26M0NUTUcs_0 person e29Si0sk8Vs_0 person e3Ep8F-TVbQ_1 bicycle e3Ep8F-TVbQ_0 bicycle e3MrKt1yh3E_0 airplane e3ezeG4Gm80_1 knife e3fz03vzrmQ_0 person e3pGW6uqeQA_0 cat e3tP581aZ0Q_0 person e34jQApS9Bw_0 person e3_zIH1Jrf0_0 person e4R8Aj-X5iA_1 horse e4ZrrwoRRXc_0 bear e4c8OdRhAyA_0 knife e4c8OdRhAyA_3 knife e4iZ27N3agg_0 person e4rO9AJXQzY_1 person e4yT58KhTcs_1 airplane e4yT58KhTcs_2 airplane e4zdJYlc4z8_0 person e47QRGUx_Hs_0 truck e47QRGUx_Hs_1 truck e48A0CBQct8_0 person e5CFfGS4B1s_0 person e5DZWu7GqG4_3 bicycle e5MbNYLt7wU_0 person e5MbNYLt7wU_1 person e5RlRpaBXnE_0 dog e5UjJAZHaBc_0 person e5VUEXqXFTM_0 umbrella e5kfPy-MIGw_0 elephant e5lFDgi4EIs_0 cow e5-Pz_Q8VUA_0 person e6F88LQJoLc_0 person e6G0gHixPGE_0 boat e6IQ-jfygns_0 person e6IQ-jfygns_1 person e6T5hbKQwAs_0 person e6aWxOF189s_0 person e6hz-jEGxsg_0 person e6muu75RFmg_0 bus e6s13mZyuYY_0 skateboard e6s13mZyuYY_2 skateboard e6s13mZyuYY_3 skateboard e6xT3S6wuwE_0 person e64lVlYKNYs_0 horse e7IeNjbA7ms_0 motorcycle e7JZ2C-e9_w_1 skateboard e7Q3z9gbUw8_0 skateboard e7TKWwysO8Q_0 elephant e7W79Xp4qxI_0 person e7aF0fG2O2U_0 bear e7aF0fG2O2U_1 bear e7eZQb8WjmQ_0 person e7xAzZCvd_Y_0 truck e70XtlB-Au8_0 truck e70XtlB-Au8_1 truck e70XtlB-Au8_2 truck e70XtlB-Au8_3 truck e70XtlB-Au8_7 truck e70jqVThihE_3 knife e70jqVThihE_1 knife e72VJJ7jkoI_2 airplane e76gr0pJMLg_0 boat e8BQbcBgcjc_0 person e8VeeESy9Xc_0 horse e8XzpXJnucs_0 motorcycle e8XzpXJnucs_1 motorcycle e8XzpXJnucs_2 motorcycle e8Y4hXyFPDY_0 person e8ZFu6n4mg8_0 person e8b7eo56B5Y_1 person e8b7eo56B5Y_0 person e8mSJe1G9U4_0 horse e8mSJe1G9U4_1 horse e8mSJe1G9U4_3 horse e8mSJe1G9U4_4 horse e804z6ehgWE_0 train e836XbTclWA_0 person e86xkdgTdTA_0 person e873uWjeaPU_0 person e88X3OKvqTI_0 cow e9Ceg407V2o_1 bird e9GSzFiQj8I_0 person e9GoxfmycMQ_0 person e9MugXot7JI_0 elephant e9MugXot7JI_2 elephant e9MugXot7JI_1 elephant e9Y8BHEdYpg_1 person e9Y8BHEdYpg_0 person e9Z237Wup_E_0 boat e9aADbJBMmQ_1 boat GUY72Rg_9g4_3 airplane GUY72Rg_9g4_0 airplane GUY72Rg_9g4_1 airplane GUY72Rg_9g4_2 airplane GUcZWh6tol4_0 cow GUq5xrqphew_0 cow GVCJZzVnGUQ_2 person GVCJZzVnGUQ_0 person GVCJZzVnGUQ_1 person GVG_dHMt7eA_0 truck GVRLfBtpGgA_0 person GVeNt6hXwK4_0 person GWCwYIRE8YU_0 person GWIAU4GsgZM_0 person GWQD6FxWwpk_0 boat GWckuI3sTHA_0 bear GWmOpSmpGmg_0 car GWmOpSmpGmg_1 car GWmOpSmpGmg_2 car GWsXKIAM9yY_1 cat GWsXKIAM9yY_0 cat GWygvbszdUs_1 train GXS6axKBr7A_0 person GXX1pJeR1HE_0 elephant GXX1pJeR1HE_1 elephant GXZ3IXi7YXk_0 person GXcbgDsx_Zc_0 person GXfsYdVEMeA_10 elephant GXfsYdVEMeA_0 elephant GXfsYdVEMeA_5 elephant GXfsYdVEMeA_6 elephant GXfsYdVEMeA_8 elephant GXgoAnrkdVg_0 person GXiDQ52vcoY_0 person GXoA1zfvnOA_0 car GXrzW-OHh_Q_0 cow GXtA9dxzvII_0 person GXyeuhOYX2k_0 truck GXyeuhOYX2k_1 truck GX1v3ymtHtc_0 person GX-3aTTy4lM_0 person GX-3aTTy4lM_1 person GX-3aTTy4lM_2 person GYA-3PblNaU_0 person GYHWtVM2x6c_0 person GYTD79P3b8w_1 person GYT5Cq1tl2Q_0 cat GYWNYnWPaeE_0 person GYY-ElZl7ZM_0 dog GYldHkVSD_A_3 airplane GYmeM7epDjY_0 person GYmeM7epDjY_1 person GYoXwAkvJns_0 person GYsx_49_O1U_0 truck GYuIsHEGV6o_0 person GYuMuXQgLPI_0 person GY0HVEiAPvo_0 person GY3D9bb9kLY_0 airplane GY65ShkktrM_1 person GY9iCFFBA20_0 person GY-carc6vxw_2 horse GY-carc6vxw_3 horse GY-carc6vxw_4 horse GY-dmOLQNH4_0 truck GZIpKCyb0bU_0 airplane GZLsv-Y_aRw_0 person GZM5nvvMeNo_1 airplane GZOUGcF_xaM_2 train GZThnpa-8Ak_0 train GZUk3BlrK7k_0 person GZWH1bUqm9U_0 person GZYSkuRZwGE_2 skateboard GZb9G8sVRz4_0 person GZb9G8sVRz4_1 person GZgL3ZQI9nM_0 cow GZhuCclpFuk_0 elephant GZq8tIKR9b4_5 bus GZsP_n7aFMo_0 person GZxvpxqvHFs_1 airplane GZ0bYvVD_us_1 bird GZ1aL_iE5a8_1 person GZ6PRvVVeZk_0 person GaAL3IYDUgM_0 skateboard GaD4QsNCcik_0 person GaF_t9Af1hg_3 umbrella GaJvFxg_lFY_0 person GaJ7Bu5UrgQ_1 bus GaJ7Bu5UrgQ_2 bus GaVmURUD-i8_0 person GaYAyNs2FDI_1 person Gad1St-JBls_0 dog GaeWhfSP3EA_2 knife GagCDetg0dg_0 bicycle Gai7qgVSFc8_1 cat GangZBQawtQ_0 person Gax9nZtMs7M_0 person Gayl2EVJTkw_0 dog Ga3YHyqOqYY_1 person Ga3YHyqOqYY_0 person Ga_Oju23T9s_0 person GbBl5CcJgeE_14 elephant GbBl5CcJgeE_6 elephant GbBl5CcJgeE_8 elephant GbBl5CcJgeE_9 elephant GbBl5CcJgeE_10 elephant GbC0DAAn-XU_3 bear GbC0DAAn-XU_12 bear GbC0DAAn-XU_14 bear GbE-oXaNVBA_0 elephant GbE-oXaNVBA_3 elephant GbE-oXaNVBA_5 elephant GbE-oXaNVBA_6 elephant GbE-oXaNVBA_7 elephant GbE-oXaNVBA_8 elephant GbE-oXaNVBA_9 elephant GbE-oXaNVBA_12 elephant GbGEC5pQ9f8_1 cow GbHLET097K8_0 boat GbN_zMz1D6o_0 person GbOK07Tq7mA_0 boat GbVDftpuPMo_1 person GbW-55xLUnQ_0 airplane GbY3uHcC3ys_0 truck Gbbhlv2Obsc_0 person Gbbhlv2Obsc_1 person Gbd1-rm9Oyw_0 truck GbmEMxbMtCI_0 bicycle Gbs4s3pX3H0_5 knife Gbs4s3pX3H0_0 knife Gbs4s3pX3H0_1 knife Gbs4s3pX3H0_2 knife Gbs4s3pX3H0_3 knife GbulfCx1hwo_0 person Gb_YkJHLgns_0 train Gb_YkJHLgns_1 train GcCQF52Ok14_5 person GcCQF52Ok14_1 person GcCQF52Ok14_3 person GcCQF52Ok14_4 person GcEgsdqMiBg_1 person GcEsDxUkr00_5 elephant GcEsDxUkr00_1 elephant GcRRhnk4ynk_0 person GcnVDv6bIAk_0 person GctFFbsebBs_0 person GcwS7IyeG5Y_0 motorcycle Gc0lgXRlxGE_1 person Gc0lgXRlxGE_0 person Gc3iNFz3s-o_0 cow Gc5OyOM0VxI_1 person Gc5OyOM0VxI_0 person GdI2CnryrFQ_2 car GdNJ-VDNc3k_1 person GdQuxx_RXvs_2 bear GdbphRsxpKU_5 horse GdbphRsxpKU_3 horse GdfyxcmHHOQ_0 person GdiGBeJ9m_k_0 person GdiGBeJ9m_k_1 person GdsJ0QHb83w_1 person GdsJ0QHb83w_2 person GduwjeptozQ_0 person Gd5qUjEeqZ4_0 motorcycle GeHV-tf-ZGA_0 bus GeUECF6hDkg_0 airplane Geb74PkjTYY_1 person GehgPYVYwDs_0 person Gek3IJfBaU0_0 train GeuYAXldbbg_4 airplane GeuYAXldbbg_1 airplane GeuYAXldbbg_2 airplane GeuYAXldbbg_3 airplane GewTJtB97l8_2 knife Ge2suMLyOTY_0 cow Ge4SjOnEYWs_1 person Ge4SjOnEYWs_0 person Ge8RWLzmrE0_0 person Ge8RWLzmrE0_2 horse Ge9uJatNWuw_0 person Ge9uJatNWuw_1 person Ge-VfDpriPY_1 person Ge-VfDpriPY_0 person GfCjURNr9T4_0 person GfLxzlZxHic_0 person GfbcHsH3DKI_0 person GfeXUZVyvL4_0 person GfefENTSQOI_0 person GfkX7I9bclY_0 cow GfqA0SZPeXU_2 horse GfqA0SZPeXU_3 horse GfxwasnA0Ao_0 bird GfxwasnA0Ao_3 bird GfyBiJNU7bY_0 car Gf50aWojLhk_1 airplane GgV4eSmNyaA_1 elephant GgV4eSmNyaA_0 elephant GgcoCmlTlbc_0 person GgfESlKFIkU_0 dog GgkncqtrgPI_0 person GgsFohIKlpw_0 dog GgyOGY2q9xE_0 skateboard Gg9uDi7KjJ0_0 person GhBPvHC15BE_0 person GhHPtGuUtRY_0 person GhI4uqxOQpc_0 horse GhLdswZDYMs_0 bicycle GhLdswZDYMs_1 bicycle GhMC34aeHnU_2 person GhMC34aeHnU_0 person GhMC34aeHnU_1 person GhQRZOseJfY_0 truck GhbtO__NASs_0 person GhbtO__NASs_1 person Ghbt5lVT3dk_0 truck GhiVm-6oFyg_0 train GhwtPgHjLvg_0 dog GhxWr3HvvXA_1 person GiRzA3Fe1-s_0 person Gijruln92tk_0 truck Gik59IGJFLo_0 bird GioAI9XlGGg_0 bird GioEMsI07Jw_0 person e9ihaIQuVMU_0 knife e9ihaIQuVMU_2 knife e9iolRKSwBw_0 person e9mOqKDBOVg_0 person e9nH--aGWDM_0 person e90GV6rl3NE_0 person e9-w67QSEBs_0 person e9-w67QSEBs_1 person e9_LqDqVkGs_0 person e9_LqDqVkGs_1 person e9_LqDqVkGs_2 person e-PcZyfAPZ4_0 person e-R-FxrDQao_0 person e-dVHSE1qXI_0 person e-gU8I2kZyY_1 bicycle e-n0pRU6uSk_0 bus e-n0pRU6uSk_1 bus e-qbVMLqnEw_0 person e-siUblegSA_0 dog e-siUblegSA_1 dog e-v2yWUGKiU_1 boat e-zbkYroVUk_0 person e-43rdp3psc_0 person e--Qr92yhBo_2 horse e--vN-5QX-E_0 person e-_nLPye6sc_0 person e_APlM8VSiw_1 person e_APlM8VSiw_0 person e_FyX6iUBZk_1 person e_GD2rN9Jcg_0 person e_SYVD0TY14_0 airplane e_UwPkRMD74_0 person e_aHtRh2PpI_0 cat e_b_4zlKmdo_0 giraffe e_qdDAeerKQ_1 bird e_-SOM0hufo_0 truck fAHFZWyNZQ4_0 bird fAHFZWyNZQ4_2 bird fAJAQb5tzFA_0 dog fAJ939SI_YI_0 person fAKXvHREf8E_0 bird fAMkbedQ0GI_1 person fAQoNDLgds4_0 bear fAUG8-TdflE_0 person fAjj5137yKM_0 bicycle fAm_6grpTOI_0 person fAyBUKM7898_0 person fAz2ecihxEU_0 person fA5ArJS7ScI_0 car fA6XfSl7pqY_0 person fA_OWAI_8kc_0 person fBH6rLEukMU_0 person fBIh-CAYfy0_0 person fBLrr2zYnRw_1 person fBLvIU3Q7Rw_0 horse fBPjBSdwz1o_0 elephant fBPjBSdwz1o_1 elephant fBP3dZYp3sM_0 person fBT1cNog4Lw_0 person fBkDTXhVYCs_0 giraffe fBmp8URVoB4_0 car fBsQegHOF8Y_0 person fBtfkn4uDKE_0 cow fBvAf66603Q_0 person fBwrgO05rqo_0 truck fByljFegqK4_0 person fCADagfWgSU_1 elephant fCK_OirKTO4_0 person fCMJnkyFS5c_0 person fCMJnkyFS5c_1 person fCPVsi1S2jM_0 cat fCTNp-hiUkQ_0 person fCTNp-hiUkQ_1 person fCT0UeuTcQk_0 person fCUZclkgF-c_3 car fCUZclkgF-c_4 car fCUZclkgF-c_5 car fCVoLETgca4_0 bicycle fCW56GByDs0_1 person fCW56GByDs0_0 person fCX_8Q_OAos_1 dog fCZXrHFimHM_0 person fCbvdNQUcRE_0 cat fCdlrWXZ7kY_0 person fCiWi1Dk-yE_1 person fCkgtao7rJk_0 motorcycle fCmwPCLYVXE_0 skateboard fCmwPCLYVXE_1 skateboard fCm-8YmQfoY_1 giraffe fCoXLMBzqTc_0 cat fCohGx6PWyM_0 person fCr-fmsVVWE_0 person fCsSoErwvfw_2 skateboard fCsSoErwvfw_0 skateboard fCsSoErwvfw_1 skateboard fCtyUxRaSdQ_0 skateboard fCwicNYDKmo_0 person fCzWVcZvGuk_1 motorcycle fC6O_2ljm_c_1 person fC6O_2ljm_c_2 person fC6O_2ljm_c_0 person fC8FUnipL3M_0 bird fDBgRd9yK8Q_5 airplane fDBgRd9yK8Q_1 airplane fDBgRd9yK8Q_4 airplane fDCK-s1gX18_0 skateboard fDCadv28EEo_1 person fDCadv28EEo_0 person fDFpsal4hHo_0 person fDIVkvMCQ9I_1 cow fDJjIhw4XBI_2 person fDJjIhw4XBI_1 person fDLBxom0wgI_1 cat fDVesIz_ON0_1 person fDe30IPiQ0Y_1 horse fDuiW9_sHcQ_1 person fDyXAhF761Q_0 person fD89z8ycv7U_0 person fD89z8ycv7U_1 person fD89z8ycv7U_2 person fEDj20Gce80_0 boat fEK6hdzjG5E_0 cow fESV3o1vc1A_1 bird fES_1kR2d8o_0 person fEVLKYBuE7k_0 truck fEXq69B6L0s_0 giraffe fEZ5cqJWg0A_0 bicycle fEdlpwoza6o_0 person fEdlpwoza6o_1 person fEdlpwoza6o_2 person fEgqRE0XOMM_0 person fEh5hyz4LCU_0 skateboard fEiWI60P4XI_0 bicycle fElOryAiN0s_0 person fEmh4mfGsCA_0 person fEupHSTMXLk_0 knife fE0raHY_nY8_0 cat fE_sSvVFvZU_0 dog fFBkKrJlobs_0 cow fFEDu-fiUUM_0 person fFGmvl4E9QI_0 bird fFImZECw1c0_0 skateboard fFImZECw1c0_1 skateboard fFOTZMvg0n0_0 horse fFRp0dBucFA_0 bus fFTJuANVr2I_0 person fFWU4PNTKDo_0 person fFWU4PNTKDo_1 person fFaJ5epORzQ_0 person fFd91uPKDVA_0 person fFksYDaR-NI_1 elephant fFmCHQgzMRc_1 person fFmCHQgzMRc_2 person fFmhW2ygNKw_0 person fFncU3kR5qw_0 car fFogpyIr-Ic_0 person fFq0hnzgGSw_2 bicycle fF0RlMrKBFo_0 bicycle fF1S-952IOU_0 horse fF3WOuwnvrA_3 elephant fF3WOuwnvrA_5 elephant fF3pBoS7xFg_1 person fF3pBoS7xFg_0 person fF34g3sNiHo_0 person fF7snD5S5Q4_0 car fF_BanWRtKo_1 skateboard fF_BanWRtKo_0 skateboard fGGJnSDPzUI_0 person fGI6_U9U_zc_1 person fGPsR0YiVaE_0 train fGgJ0VACAo4_0 umbrella fGlnCmVPzIs_0 person fGrC6VCXVL4_0 person fG1NOqIRoLA_0 person fG6uSVeocMo_0 person fG-4n3Gy1fk_0 person fHO3g6Q_bNE_0 person fHUjlWalvJQ_0 person fHVJzD_AvV8_0 person fHepRAiQQ04_0 cow fHlfVMMfXNg_0 person fHm5WgSYk2Y_0 bus fHoBjwC8H50_0 dog fHoBjwC8H50_3 dog fHsaxiTw0dI_0 motorcycle fHzSK8AEv5U_0 person fHzzixV1xyg_1 cow fH5U2jXbkEg_1 knife fH8PS8Fjvbg_1 cow fH8PS8Fjvbg_2 cow fIABVBcluZ0_0 skateboard fIABVBcluZ0_1 skateboard fIFMCt78hmI_0 truck fILyoB3Pgrg_1 dog fIM7jmsq_FE_0 person fIN8z4lkdyA_0 car fIN8z4lkdyA_2 car fIN8z4lkdyA_3 car fIPXE6MOZp0_0 airplane fIT1bTlW3UQ_0 person fIVT3rTMptI_1 truck fIXFrPFEL0w_0 giraffe fIlXSJxnKD8_0 person fInEVgREyyY_0 dog fInYB8sD7tM_0 person fIrb5Y93wjw_0 train fIvUwaa2ziY_0 person fIyrHecb8SQ_0 elephant fI0VoDDN2lE_2 person fI0VoDDN2lE_0 person fI0VoDDN2lE_1 person fI5fnVs_kWg_0 motorcycle fI8DySScPWU_0 skateboard fJGPTgv8EUs_0 person fJJBGybbnH4_1 knife fJJX9D4siG4_0 cat fJTeqi3aqRc_0 car fJYGkMT9c6U_0 truck fJY5zGaYs8s_0 person fJdWgbIMXZ0_5 train fJdWgbIMXZ0_0 train fJdWgbIMXZ0_2 train fJpRqXhL3wE_0 skateboard fJp4DAu46Yg_1 person fJxbRDMY46o_0 person fJyBgU7rZvE_0 person fJ71o3Q-oVE_1 cat fKDRpRcSnrw_0 cat fKHs2FNZk6M_0 person fKLJqhEdsTY_0 cow fKLJqhEdsTY_1 cow fKLS0DAexvw_1 boat fKLS0DAexvw_2 boat fKLS0DAexvw_3 boat fKRZ4PPWgg8_1 person fKcOtlmf6r0_3 boat fKcOtlmf6r0_2 boat fKgpRiyDlvc_0 person fKhENDvpnmA_0 boat fKhe37bCgeA_1 horse fKp-Lvw2bUM_2 elephant fKp-Lvw2bUM_3 elephant fKp-Lvw2bUM_4 elephant fKrxRvMxZqM_0 person fKxBpYS29uM_0 dog fKyPRwF5y6s_0 person fKzFEc6hR-c_2 person fK89Z2AwlCg_3 bus GiuUBGsdiqI_0 person GizeLrnWRmk_1 person GizeLrnWRmk_0 person Gi--TM8Xz3I_0 person GjCs_s2EnpE_0 person GjFr4qO_LX4_0 dog GjJFQButa0w_0 bear GjJk6U2crcw_0 skateboard GjJp-yqt7xk_0 airplane GjZDPTKpIdE_0 person GjZP-buSAG8_0 person Gjdyi0kf79Y_0 truck GjfhgZMeHAA_0 person Gjgu3OFbWKI_0 bear GjkrI0adkJk_0 person GjmNPrYyCwg_0 person Gj87GZKvhdo_0 horse GkCXvg93pAA_0 cow GkGG1F5by14_0 person GkddmkbGSAc_0 cat Gkfp-yV9e94_0 person GklwzbjOzYQ_0 person GkmRFBuktnQ_0 person Gkxkfi_wHeA_1 motorcycle Gkxkfi_wHeA_0 motorcycle Gk6IzYQADXg_1 skateboard Gk6IzYQADXg_0 skateboard Gk9v8ABOPNw_1 elephant GlLzIn-6ouU_1 bicycle GlLzIn-6ouU_2 bicycle GlPdixjfu44_0 cat GletqIQ8irw_0 motorcycle GlsMcq1cM2c_1 bird GlxEVs7z_7Y_0 person Gl7S2JNezLg_0 boat Gl7S2JNezLg_3 boat Gl9cy66E4FQ_2 knife Gl_UMssuTWU_0 person GmI47tbiNQ0_0 person GmKT2rhDILU_1 knife GmQX3sIhhqo_0 cow GmS0yrU3Hcw_0 person GmUFocQWPTo_1 boat Gmdxq1glmKY_1 dog GmeGRg8XZ5M_0 person GmvKmbIHKHM_1 person GmvKmbIHKHM_0 person Gmww9V50JtU_0 dog Gm9BnQSZlxk_1 person Gm9kb3zHsLA_0 cat GnFoElm_rrw_0 dog GnGd8Q_cSHU_0 person GnGd8Q_cSHU_1 person GnO2sxJNWjk_0 elephant GnRp7QHoAr4_0 train GnkSrEpnmRo_1 person GnmgLr5p-r8_0 bus Gno0JyFsjGk_5 knife Gn0av9LV5FU_0 elephant Gn3AqY6vUyU_0 elephant Gn7B_MiLuhA_0 skateboard GoEBr-GbeCk_0 elephant GoEcYxqxcZ8_1 bus GoEy1J3s8Xs_0 cow GoRGaOgttBU_0 horse GoUjZ5wJ2do_0 car GoWyqQorqOY_0 cat GoXlqK766lk_0 person GolDzhH16vg_0 train GorfZ7y-Jw8_0 skateboard GosFitiV7as_0 person GotzQ9ecvkM_0 person GoubTEJzKUI_0 person Go16BKYvDSs_0 horse Go5M-oyC28A_0 elephant Go8BM-B0ML4_0 skateboard GpCjTjkSw3k_0 train GpCjTjkSw3k_5 train GpCjTjkSw3k_3 train GpCjTjkSw3k_4 train GpCjTjkSw3k_2 train GpDilZGSveI_0 person GpJmJforKzo_0 person GpPbMduP_3Y_0 cow GpProJiVxa4_0 bear GpTPDl3MzZw_0 cat GpVy_gD1slw_0 dog GpY4Nw8LLy4_0 bird GpkftB3rq5g_0 dog Gpn_kF1lXuc_0 bicycle Gpn_kF1lXuc_8 bicycle Gpn_kF1lXuc_13 bicycle Gpn_kF1lXuc_14 bicycle GpzE4RQTM1Y_0 airplane Gp3g6UYBBzw_0 person Gp3g6UYBBzw_1 person Gp70TnjZRfU_1 train Gp70TnjZRfU_2 train Gp70TnjZRfU_0 train GqZeX-EEEL8_0 person Gqc_LkQvKak_2 horse GqjVd_dRiB8_0 person GqjVd_dRiB8_1 person GqjoBpwsgUc_0 person GqjoBpwsgUc_1 person Gqntj1GoicU_0 bus GqzN0dyl5p4_4 truck Gq-mMFeLCyo_0 person GrG-ipHg_4w_0 person GrK4qEJjeKE_0 airplane GrNDwiO4kdI_0 airplane GrQ0zJbkeXE_0 person GrXOOtPiIGw_0 zebra GrYsw9-Skqg_0 person GrZvWtxffXE_0 person GrpvM1_CRqI_0 train GruxXrzWzjk_0 airplane GruxXrzWzjk_2 airplane GruxXrzWzjk_3 airplane GruxXrzWzjk_5 airplane GrzyUDtV-Ug_0 person Gr6be_D6d9Q_2 skateboard GsFDHyoPppk_0 person GsGHB19iuE4_0 person GsKJMkVSeV4_2 airplane GsL7VYYWhu0_0 person GsOgw9XtlWc_0 airplane GsOgw9XtlWc_1 airplane GsTlT_7Zb1Y_0 train GsVvc55IHn0_0 skateboard GshXL9V-lrM_1 person Gsj4aXqBPHM_0 truck Gsn06D15nmk_0 motorcycle GsrSyK5ymQo_0 boat GsrenPacLW0_1 person Gs67R7prarI_1 motorcycle Gs7J9Yo-uF0_0 cow Gs7J9Yo-uF0_1 cow Gs79ZsyWm74_0 person GtAKWYvc9kY_0 elephant GtCbEqqQgqY_0 person GtCbEqqQgqY_1 person GtD2m1EXxjc_1 bicycle GtKaIcQJZcc_1 person GtLYNeredOY_0 boat GtVrmoeEcMM_0 knife GtZPw5ftw88_0 person GtZSRodviU8_0 person Gta1hcIAAE0_0 elephant GtiiYqVQ2Kw_0 person Gtmp8y8APfQ_1 skateboard Gtnqm4SnEXo_0 horse Gtnqm4SnEXo_1 horse Gtnqm4SnEXo_2 horse Gtnqm4SnEXo_3 horse Gtnqm4SnEXo_4 horse Gtqcx01NTTw_0 knife Gtsvc9lA7hs_0 airplane Gt33VfmFDWw_0 person Gt6q9b3QUvE_0 bicycle Gt6q9b3QUvE_2 bicycle Gt7thmVY6aQ_0 person GuQvGMFuhu4_1 car GuQvGMFuhu4_3 car GuXelRN3wMo_4 bear GuaD24NfCe0_0 person GuawwNMbfBI_0 person Gue43DvNTGc_1 train Guf15LHosg8_0 person GugU0nZdPJU_0 bus GuhfGduN9v0_0 person GulmsZq-VsU_6 boat GulmsZq-VsU_0 boat GulmsZq-VsU_3 boat GulmsZq-VsU_4 boat GulmsZq-VsU_5 boat GusEs8RA4_o_0 motorcycle GuwTG6RtcFI_0 person Gu4MWCc2Wws_0 bicycle Gu-vFv_w9Vo_0 person GvFmkdxnKyI_0 horse GvIj2sMkJwM_0 person GvNhgCGtUOQ_0 truck GvQvyfTNykM_0 truck GvRM_UnjJoE_2 horse GvdMRPX4KR4_0 train GvdMRPX4KR4_1 train GvdMRPX4KR4_5 train GvoIcT-hFek_0 person Gv9mTaerVLc_0 person GwFrSa-YwfI_0 bear GwFrSa-YwfI_1 bear GwIn1NaaEwE_0 bus GwbpMG2B14Y_0 truck GwgaNLd1f7s_0 truck GwlNXPuUvXM_0 person GwnBP9a07RE_0 person GwnBP9a07RE_3 person GwnBP9a07RE_4 person GwnBP9a07RE_1 person GwnBP9a07RE_2 person Gwx1ad4lW1Q_2 person Gwyl7djxZkg_0 cow Gwy4ODXAAU8_0 person Gw5YyHT1Nt8_0 person Gw9Vi_Io9DM_0 person Gw_Tiv72jms_1 horse GxANCkxq7Ng_0 motorcycle fLCd0DDhfBk_0 person fLEUT0rTkv0_0 bird fLJniCJFPTg_3 elephant fLPHwVvk6K4_0 person fLPHwVvk6K4_1 person fLWW1YWO26Y_0 bird fLdMmSIfseM_2 person fLdMmSIfseM_0 person fLe279fKywo_0 dog fLsDTJxlsW8_0 person fLwrxElzLZs_0 person fLyNbq9v6kg_0 person fL1w15qwbqE_0 person fMOnb4P7tww_1 person fMOnb4P7tww_0 person fMO1J7ojQqk_0 dog fMTosfHKy2I_0 dog fMi6lVyCOHw_0 boat fMwCpOTv9RY_0 bus fM-puV4uyzs_0 person fNAZ9IDLZy0_0 person fND_OguW0MM_1 elephant fNIdPhAsjiM_0 cat fNJSPU5r3sc_0 person fNO_o1D0kvY_0 person fNdRm3HWQmo_1 motorcycle fNgr2EBEDCQ_0 car fNgr2EBEDCQ_1 car fNg3y0FHjgg_0 person fNhDT1fwzKM_0 person fNhDT1fwzKM_1 person fNh54BNEJBQ_0 cat fNw9dDcM4ms_0 bear fN-FYknWOSk_1 person fN-FYknWOSk_2 person fN-43XPvLwg_0 motorcycle fOLR2dvBtqo_0 cow fOO1pHvrPWQ_0 person fOatLQK_AyQ_3 bicycle fOcPVX4sAxg_0 horse fOjKgQf86dk_0 horse fOkrLuGKDvk_0 person fOkrLuGKDvk_1 person fOkrLuGKDvk_2 person fOsd2aWzfBo_0 cow fOtnatCU7_Q_0 person fOuV2101nEo_0 bear fOv8ocd2xhA_2 knife fO30fgQYdT4_0 bus fO8Do_0RQXU_0 person fO9GgD7GqE0_2 bus fPBIIZV6fuU_0 person fPMNtuJztSA_0 person fPVn9Wxf_HQ_0 person fPVn9Wxf_HQ_1 person fPrhiYslRjA_0 person fPzDDdztZNk_0 horse fPzQyo7caqU_0 person fPzqpL90owQ_6 bear fP5AyxuGIS8_0 person fP8x_x2_k5g_0 person fP-DMm3u5n4_0 cat fQEGEb4W3IE_0 person fQNyLEXwnn0_0 person fQOjoYB5hPQ_0 person fQOjoYB5hPQ_1 person fQOymYsdTtU_0 person fQdA_-549Dk_0 dog fQh5RtZzYzo_0 bicycle fQlChBB42M0_0 person fQoJWcmQmsU_1 person fQo0G2i1QjY_0 person fQt3g_9u1RQ_0 airplane fQyE_yIAu_0_1 skateboard fQ26oO2Y5NM_0 bicycle fQ4H6UmTepU_5 giraffe fREDiuJlBf8_0 person fREDiuJlBf8_1 person fRFF0xtrWhI_0 elephant fROdeQpu88o_1 knife fRS5rhYP7LM_0 person fRXDSh8gr0c_1 person fRZ7Wze7ATs_3 knife fRcegyxH0Is_0 car fRhNtVu6anA_0 dog fRjCbO3MyU8_0 person fRmnBvuwZlU_0 dog fRmnBvuwZlU_1 dog fRrLguORoeU_1 umbrella fRrLguORoeU_2 umbrella fRrd-Z2R-Gs_0 person fRtzYh_gGgI_1 cow fRwzMPH6Kvw_0 person fR1zDIeBHFg_0 person fR6FrFNXUxY_0 person fR-JNy5hccc_0 umbrella fSA7T5svJ-o_0 bus fSBe_a8ZkZU_0 cat fSey4VJgLM0_0 person fSfKYTVt7V8_2 bird fSfX4Z6SR2U_0 horse fSj-h8lAhWw_0 cat fSoqM6oq2AA_0 train fSoqM6oq2AA_2 train fS0098HnnhM_0 person fS3KL3nj7FY_0 person fS73PiHaNi8_0 person fS8_byjM-1M_3 zebra fS8_byjM-1M_0 zebra fS_6fgFOiPU_3 train fTFLfGUcgMs_0 elephant fTFLfGUcgMs_3 elephant fTFVwPKxUHE_2 elephant fTP9YgSJZg8_2 knife fTVb5uxWnsI_0 person fTVb5uxWnsI_1 person fTgirzB_QLU_0 person fThV1JtaTJg_0 person fTkIm1nb6qg_1 bird fTkIm1nb6qg_2 bird fTnnG_WcLYY_3 knife fTnnG_WcLYY_4 knife fTwiavhNzxs_0 person fUB-cH8rjW4_1 person fUB-cH8rjW4_0 person fUF__EdDFVs_0 skateboard fUISEtXSRYM_0 person fUU4R6RP4ek_0 motorcycle fUXpqgf4jUA_0 bus fUd8LjmonBM_0 person fUetaCH3tZk_0 person fUg6JULdTnU_0 person fUonzpmV18o_3 bird fUqVKgWVVNY_1 person fUqVKgWVVNY_2 person fUwzXH9i0yQ_0 person fUx60fl9UkU_0 person fUzsVWD48bA_0 person fU3o6Frqdww_0 truck fU4DzirdCVE_1 airplane fVAmI93Yb6E_0 cat fVAsOuag4vY_1 giraffe fVHZEHosow0_2 person fVH3n0aghP4_1 person fVH3n0aghP4_0 person fVH7PpDqlPE_0 boat fVIVas1R1tk_0 cow fVOy449KQlY_0 person fVX7qR-o-9I_0 cat fVZfWzDBb-c_0 person fVZ_9hWIGpA_2 truck fVdrMKHN9WY_1 cow fVq7Of0Tr-s_0 person fVr3XVUzJaA_0 train fVv5EqFYsAY_0 person fV80H_L3AN8_1 motorcycle fWLqbV7Z7Go_1 person fWLqbV7Z7Go_0 person fWb_-8hhubg_0 person fWmJ9tUUCwg_0 person fWpdcmgr5r4_0 horse fWxgjNDC4OQ_0 car fWxgjNDC4OQ_1 car fWxsOgW3P6U_0 person fW1Z_Mx1RaA_0 person fW4fh_WBiMY_0 train fW7yPljMFRc_0 person fW7yPljMFRc_1 person fW_HPaNBsDE_0 cat fXCFktk2xdc_0 person fXLB02IH0G4_0 person fXLB02IH0G4_1 person fXOdZ0uKuBc_1 dog fXWqvRfBWto_0 person fXX7K6CQfBw_0 airplane fXYn01Cgmqs_0 dog fXY7h0cc6tw_0 cow fXbnEKMaIoM_1 boat fXbnEKMaIoM_0 boat fXka5y708fI_1 person fXowuJDXhhU_0 person fXyBm7_EDVc_0 skateboard fXzIQASqygY_0 bird fX-kSrf_K8w_0 horse fYDgPdRtmjU_0 train fYLtnvuW_VI_0 motorcycle fYMA0fLN8sI_0 horse fYN5ZIicl_k_0 car fYmfHE2mONE_1 person fYnsIFGQfT8_0 person fYql4FiApLQ_0 horse fYtm_pGBWkU_0 person fYu5ChRgapY_0 motorcycle fYw5KVCsg_4_0 person fYyI8x0tNAA_1 bear fY4-6vsjmD8_0 person fY82KLfOpbk_0 person fY82KLfOpbk_1 person fZCdkf9VQzU_2 cow fZEFEAYBlGE_0 cat fZFYdgZbSBg_0 person fZFYdgZbSBg_1 person fZJOS8BlA-w_0 person fZOtury_J_w_0 person fZTIKbSjOhk_0 airplane fZTJH_9Pqvg_0 person fZTJH_9Pqvg_1 person fZWP75nltcM_0 bird fZXzEYFmZ_8_0 person fZXzEYFmZ_8_1 person fZiiYH3WfD8_0 skateboard fZnbOFaSEQc_0 person fZnbOFaSEQc_1 person fZp_UgW_xZU_1 motorcycle fZp_UgW_xZU_0 person fZu7wEVEuX8_0 person GxHmm60dKvc_0 skateboard GxLI4BFLrps_0 person GxPYf4SAQvE_0 person GxPYf4SAQvE_1 person GxWuAfBV300_0 person Gxg0Pt_9bIE_0 person GxwwTXW-DdQ_2 train Gx1zPI3b2oc_0 person Gx3xtKPwlz0_1 horse Gx4ryd6AGl4_1 train Gx4ryd6AGl4_2 train Gx4ryd6AGl4_3 train Gx4ryd6AGl4_0 train GyGdlCtDdJc_0 person GyIKdb5KDHk_1 train GyPRnKI78iA_0 person GyU8x9urAxE_0 motorcycle GyVDsnuS5jU_0 person GyXlgRxQ1jo_0 train GyXlgRxQ1jo_1 train GyZHiIEOBos_0 cat Gya_TrOGXpo_0 person GyhjyC5aJ8U_0 bus Gyjb_P1W7TA_2 bus Gyn_wSuRB3w_1 truck Gyzaf_gaIYY_0 motorcycle Gy9JueTT4XU_0 person Gy_XuBCvbUc_1 dog Gy_XuBCvbUc_2 dog GzB9OTV44PA_0 person GzHy2xjKB_8_0 person GzLmftr6tl8_0 person GzRkvFxVlx0_0 person GzTDLPCsgSM_0 person GzVj8bI0bSk_0 skateboard GzVj8bI0bSk_1 skateboard GzcgYGEqOlY_1 horse GzesZ0laH2w_0 motorcycle GzizYdL25ZY_0 person GzjkTrnmEnU_0 airplane GzjkTrnmEnU_1 airplane GznFDBDT2c0_0 truck GznFDBDT2c0_2 truck Gzrgq_nWH_Q_0 horse GzujCDTak_4_0 horse GzujCDTak_4_2 horse Gzy_PnFtEpM_0 person Gz3Np50b9q4_0 truck G0DQ6VdMp-U_7 car G0DQ6VdMp-U_0 car G0DQ6VdMp-U_1 car G0DQ6VdMp-U_2 car G0DQ6VdMp-U_4 car G0DQ6VdMp-U_5 car G0DQ6VdMp-U_6 car G0FSe53KN-w_0 person G0WsFATo9RQ_0 person G0dXxEbeJnM_1 person G0d44YoKXX4_0 person G0kDhLojiI4_0 giraffe G0leBoTgEx4_0 person G0rwWyFSsYE_0 train G0r2tR6EcF8_1 person G0urH-9ytbc_0 horse G01Xi8VMxgQ_0 person G03JTuHY_RM_0 knife G1AIHF-KITc_0 person G1AtN7CvCXw_0 person G1EnmuHlxig_0 person G1P_XnEL4dc_1 person G1P_XnEL4dc_0 person G1TS-PvdREA_0 person G1TS-PvdREA_1 person G1ThERK4a8E_4 airplane G1ThERK4a8E_0 airplane G1UoN56m5DM_0 person G1YNrrT9-z8_0 bird G1YNrrT9-z8_1 bird G1cY71JK5_E_0 motorcycle G1c0-CTyZ3I_0 person G1dKhZZARDk_0 airplane G1z6RMtKkbM_0 bird G1z6RMtKkbM_1 bird G11cHAnx17E_0 horse G13ARgckI9w_0 person G17Kpx1bgXM_0 horse G1_R_EJpLZU_0 cow G2FXcVDezv4_0 truck G2HOmWxj5gg_0 person G2LNQIwbLHE_0 person G2S4rwP6qJY_0 bicycle G2V6wliL2AA_0 knife G2g4Z-Syzi8_1 dog G2lFYYEolz4_0 train G2lFYYEolz4_2 train G2x5gACWSwA_0 cow G2z7yjdCUuI_0 airplane G23Q_C35Uqs_0 bear G24yJOgl9t0_1 person G25iisvOYhA_0 cat G2-v9IBlnTs_0 person G3AuCS7s68w_0 bird G3IID08lWos_0 person G3P-Vvra2GU_0 horse G3SowFCFa0g_0 person G3VeVH6pbdE_1 person G3a0EYtnqHA_0 person G3cazaory7w_0 person G3f8bIoGGZ0_0 dog G3kNB0zhHQc_0 person G3pT4MJrpDI_5 umbrella G3pT4MJrpDI_6 umbrella G3pT4MJrpDI_4 umbrella G3vP7_U6yXU_1 cow G37Dm4oy794_0 bicycle G38EbyEOITE_0 horse G38SrxcVYWs_1 person G39ryVtNnhQ_3 elephant G39ryVtNnhQ_8 elephant G39ryVtNnhQ_9 elephant G39ryVtNnhQ_11 elephant G4PD_RAK48Y_0 person G4VPBDOgq54_1 skateboard G4VpcUuXgRs_0 person G4VpcUuXgRs_1 person G4ckSGXUGts_0 person G4fbkcKiZVg_0 person G4nRZ4PHvC4_0 dog G4rJejZ9FIM_0 car G4r0UJvtDXs_0 cow G4xFWKKoN0M_0 motorcycle G47wnMA6RVE_0 bus G4_xR7lZIPo_3 bear G5D1cAo2D6s_1 person G5JwolS0D1M_5 elephant G5QgL60_yfc_0 knife G5SlrQeATlc_0 bus G5SlrQeATlc_2 bus G5hG8j0KxBI_0 person G5ixkqq66VA_0 person G5rBbx_kODY_0 person G5ztukDN_Qg_0 zebra G51fdi_hG_0_0 train G52uuPWcC3M_0 umbrella G553b8ZAd3Q_0 person G58FuwBYL-0_0 skateboard G5_UJ1wEKh4_0 person G6OttGznP9E_0 person G6OttGznP9E_1 person G6QMME1QbK8_2 car G6Qmm4T-cd0_0 bus G6WiR4W4WWk_0 person G6b9lySVCCY_0 person G6eAvUHoDkc_0 person G6fvYSH13nI_2 train G6iVTjyPM04_1 horse G6sFOs8MgGU_0 bird G6sFOs8MgGU_3 bird G6sFOs8MgGU_6 bird G66e5ltBFoI_0 person G7DhRPK7pwc_1 bicycle G7F-ufxEXPY_0 knife G7H7fQ_Q1Ec_0 person G7H7fQ_Q1Ec_1 person G7ID9RdMSkE_0 person G7MvPG8Qv84_0 giraffe G7TezoE9Cmo_0 person G7WblvVQPF0_0 person G7Z01jmMzlI_0 bird G7krBQa_KLc_0 person G7p90FBQk_0_0 truck G7slUshqPvY_0 elephant G74HXSqYO-A_0 motorcycle G75uQAEuUkE_0 person G766vinfuBw_5 bicycle G766vinfuBw_9 bicycle G77KKnCpwWY_3 skateboard G8EC6svgwKU_0 person G8NIqmq7YdE_2 bear G8V2UsTc1Ik_0 cat G8V33bTVNII_14 bicycle G8V33bTVNII_1 bicycle G8V33bTVNII_2 bicycle G8V33bTVNII_6 bicycle G8V33bTVNII_9 bicycle G8XX8bkx6Ek_0 person G8hStuDYwH0_2 airplane G8kDZAPbUe8_0 person G8kDZAPbUe8_1 person G8k84FwnW2k_0 motorcycle G8lDrK3u3r0_2 elephant G8lfwRN3Iew_12 boat G8lfwRN3Iew_0 boat G8lfwRN3Iew_8 boat G8lfwRN3Iew_9 boat G8lfwRN3Iew_11 boat G8sDCWad2Bg_0 cat G8s2n3jAKW8_0 cow G8tbj2R0iso_0 person G80DOuBBH_Y_3 airplane G8--2JpJa6g_0 person G9DdsOO1mZo_0 horse G9FQJdIxjsk_0 bird G9YPEOrV5UU_0 person G9YPEOrV5UU_1 person G9YPEOrV5UU_2 person G9ZKH_DS9DU_0 person G9gsnqhd_Sw_0 cat G9hPaEx7Ci0_1 knife G9i66tUOspc_0 dog G9juxPad3zY_0 person G9nlPUwJQB0_0 person G9nvXjuig6s_0 person G9qCl1NZelo_0 cow G9rxIfeUWVo_0 airplane G9vDsElCKAY_0 dog G9zd0G8dIt0_0 person G93PAKTtVpM_0 horse G97UC0qtVDw_0 person G97YtHMd2hw_0 person G99rEXOdlC8_0 horse G9_TgGWQQi8_0 person G-Sr-qmWZNo_0 cow G-YYtvCU7qY_0 dog G-d6o3nTBFA_0 zebra G-nFiFb0Xos_1 knife G-nbiqZuFdc_2 horse G-qCe2DK3Tk_0 motorcycle G-u_ThqhoJE_0 train G-yCRlVSs6w_0 person G-3kOsn1fPY_1 person G_ADLUKVq8Y_0 boat G_LtPKO6be4_0 horse fZ1GVGZmTRA_0 person faJuqm4umTQ_0 person faSv8ijeKeE_0 person faVBgge6xkE_0 person faW2tWwuCMg_1 person faW2tWwuCMg_0 person fahs60oGhLU_0 train fatTPMeG5Pc_1 bear fa-rHhFEloA_1 truck fa--elcQpd4_0 elephant fbDYKST2P-I_0 motorcycle fbFVM0UM5V0_0 person fbM5MhIve5s_0 dog fbM5MhIve5s_1 dog fbiXTCkCkqY_0 skateboard fbmZZXaRkak_5 horse fbmZZXaRkak_6 horse fbmnWcE_64U_0 skateboard fbsyvHQPZZk_1 dog fb3Iq9yQ1VY_0 person fb3WxEfe8l8_0 motorcycle fcCb2W4HMLk_0 person fcD6n99azfw_0 person fcGNPf6n7Ws_0 bear fcWegrm8wCE_0 person fcbcnvGoWLs_0 car fchtQi7-OD4_0 horse fclxNO1L-rY_0 cow fcpGNeDgpDI_0 person fc1qNL5u2wg_0 person fdCTLMd6wEY_0 cat fdQaoSZKA_s_0 person fdRULl8YSnU_0 cow fdYvCuft5zQ_4 elephant fdYvCuft5zQ_5 elephant fdYvCuft5zQ_1 elephant fdYvCuft5zQ_2 elephant fdZBeWyKON0_0 person fdbvWvUoFW8_1 bird fdbvWvUoFW8_2 bird fdbvWvUoFW8_3 bird fdkrZ9uL854_0 person fdlDkbbDniw_1 elephant fdmV18YEDKM_0 cat fdnBDcIwPBA_0 person fd3ea86gmJI_0 motorcycle fd3ea86gmJI_1 motorcycle fd8Ba2cZgxI_2 bear feAexE1IYq8_0 person fePU3BlF4Zc_0 person fePU3BlF4Zc_1 person feQX_1dqh9g_9 bicycle feQX_1dqh9g_1 bicycle feQX_1dqh9g_3 bicycle feZfxIunWHo_0 person feZoXB7I6wE_0 person fedmeW-WImw_0 train fegJtwcNo5c_0 bicycle feh4XVzjQdI_0 cat felt48AIbIs_1 person fenYF-k-y4c_0 skateboard feqLG8n4nDE_1 person fe05wKXl2cI_0 person fe05wKXl2cI_1 skateboard fe5_49oxMwc_0 person ffIQZZ_P3ck_0 cat ffOeGlw8_C8_1 cow ffZoY75S_-k_1 bird ffZoY75S_-k_0 bird ffbSaNikNF4_1 elephant ffeYBfcgF3s_0 person fftSD6UfvEA_1 person ffttXyArNGc_1 knife ffvXiSjPp6c_0 horse ffwk_8ycQiA_0 person ff1PHzfARZk_0 person ff5MH6QQuJk_6 knife ff5MH6QQuJk_2 knife ff5SaJnQg5M_0 person fgEpQHGYIjc_0 person fgFy8l-b1iI_0 motorcycle fgJJxPEHVZQ_0 person fgPShysxuQM_0 cat fgQE-9shdmQ_0 elephant fgUjCKe_e_Y_0 person fgWtwTKCtMQ_0 person fgfizI4AnVs_0 person fggT4HM2Uy4_0 person fgsaC375d38_1 bird fgvUj1mCqio_0 train fg1ISXcyb10_1 dog fg5mCaScLE4_10 umbrella fg5mCaScLE4_0 umbrella fg5mCaScLE4_3 umbrella fg5mCaScLE4_4 umbrella fg5mCaScLE4_6 umbrella fg5mCaScLE4_7 umbrella fhHLCLuQAdE_0 bird fhHLCLuQAdE_3 bird fhHLCLuQAdE_4 bird fhHLCLuQAdE_1 bird fhHLCLuQAdE_2 bird fhQN_vhNmgo_0 cow fhan95LbdqQ_1 knife fhmsHcZfBC4_0 person fhutr5rLQN0_0 person fh5lB6U-7Wk_0 person fiGa0nIEYbw_0 person fiKecNhAgFU_0 motorcycle fiS0pY80kkU_0 dog fiWtkuDUFvM_0 elephant fiZAhg2twZs_0 person figjWJDEn1c_0 person fijO0rB1rfY_0 airplane finRU64JVRU_1 bus fi2s2k_aamk_0 person fi46OpYa89I_3 bicycle fi46OpYa89I_10 bicycle fi46OpYa89I_2 bicycle fi6gdEVUAUc_0 cat fi8YGUm_6x0_0 person fi9GleMDHIc_0 person fjF31Mh-tNQ_0 person fjKXALm76kI_0 bus fjXufPzimEQ_0 person fjZ4J-BZX2U_0 person fjaHYcaE7-w_0 person fjaHYcaE7-w_1 person fjnR81fSTeI_0 umbrella fjnxqBnMZzs_0 person fjtn0lRVX_4_0 truck fjwgdNBSCFc_0 person fjwgdNBSCFc_1 person fj29rB34ea8_0 person fkERi_ma2UE_0 person fkERi_ma2UE_1 person fkHiDyuUaWA_0 person fkIfLHGu_CQ_0 person fkQEEtG6Tbg_0 person fkSf5a3q6oY_0 boat fkSf5a3q6oY_3 boat fkUDB0V3UXc_0 horse fkUDB0V3UXc_1 horse fkVSILZPyXg_0 bear fkaKyYrWPpQ_0 person fkfnbZ2MSXk_4 bicycle fkfnbZ2MSXk_0 bicycle fkfnbZ2MSXk_6 bicycle fkx0e2gvPYA_0 truck fkyM4LNUCck_0 person fk0v7vZDpgU_0 person fk10mtIF_Hs_0 horse fk8yMMO1gRA_0 person fk8yMMO1gRA_1 person flADy--Uwx8_0 truck flERyzHjhzQ_0 skateboard flMijcdhRAU_0 person flgTyT4DB7E_0 bear flgaLcoSjb4_0 bear fluEronPyZk_0 cow fl6-NRwVy10_0 person fl7Q9yxFoOs_2 person fl95IAyDN-s_0 skateboard fmERtylbqN4_0 person fmGJj0qYc6g_1 person fmGJj0qYc6g_2 person fmLKgz4DQhQ_0 airplane fmL66yeOiI8_0 person fmRfUvIIvT8_0 person fmYELQL9Cs0_0 bus fmbEAdugI3Q_0 person fmbb6SQ6qiI_0 person fmbb6SQ6qiI_1 person fmbu89zGN4Y_0 person fmdem4Z9BHI_0 bird fmfg5yyhjkA_1 person fmiq_EhaURY_1 person fmiq_EhaURY_0 person fmtIa6nxUd4_0 train fmuzrZHZYis_0 skateboard fmwC1khd3BU_2 person fm3zFVlJw4k_1 person fm-ScTLdSL8_1 bus fm_bcsJYhu4_0 dog fnAGderLxPg_0 elephant fnAGderLxPg_3 elephant fnDP4B5jpSY_0 person fnFMQ2VFlEc_0 person fnOL3ZL61u0_0 person fnOkwsmzdaI_0 horse fnRq5X91IV0_0 person fnZR6FD_eZ8_0 boat fnZR6FD_eZ8_1 boat fnbSgwO8v0c_1 boat fnbsAmTQJOs_0 bicycle fnbsAmTQJOs_1 bicycle fniJ36z0_Pc_0 cow fnj1YtAaztU_0 person fnkHdQf9H3w_0 knife fnmuFbydHek_0 person fnpjkwiPkSY_0 skateboard fntRlkYDiD0_1 person fntZVzkwhz4_1 person fnvst-Sk4MU_0 umbrella fnvst-Sk4MU_1 umbrella fnz6gTPuInQ_0 dog fnz6gTPuInQ_1 dog foAoOCF4rE4_0 car foI1jEbg9uA_0 train foJs0wXX1O8_0 truck foaFgrzsPOY_0 person fobJTCY7ifQ_0 bus fodsoLtLzqI_1 cat fojRgMUsu3c_0 person G_RgJ0t0Cbo_0 person G_aU-_2ZiSw_0 dog G_lOQAV6xWs_0 cat G_poofS7HD0_1 person G_poofS7HD0_0 person G__VTazZtp0_0 elephant HARRnedV05U_0 car HAVUursfTOI_1 zebra HAtu6frOH1k_0 person HA1TDbNot8E_0 person HA-iE7bcfT0_0 car HA-iE7bcfT0_1 car HBI13CpuAmI_0 knife HBLJbCs1mSg_0 truck HBMah_r3E1g_0 person HBOqQBe7rhE_0 person HBO6G57uhXA_0 person HBY4_6b_sRY_0 cat HBiSuZWtb4E_0 boat HBmaJJ0nTAo_0 person HBwjWdXrpPA_0 dog HBzYVphfmRQ_0 person HCA4jkg9HTY_1 person HCA4jkg9HTY_0 person HCEjNJewxbw_0 person HCJ1EYfF8qg_0 elephant HCKZ7kihdaM_2 airplane HCMBgpQ2z18_0 cow HCSbzHGXxmA_0 cat HCczjWUmlW0_1 truck HCczjWUmlW0_0 truck HCg0k7LnfkY_1 cow HCg0k7LnfkY_0 cow HCiRQdh20qg_0 dog HCm-B3JjzhY_0 cow HCpxRBja8lE_0 person HCp6gYC9NFE_0 cow HC72_Yrigik_0 person HDN4DqO_KLg_0 dog HDQEWwETuU4_0 person HDRKiYaoEnA_0 person HDSw0KM8cSs_0 person HDkI156rPRA_0 person HDmK6y86kYM_0 person HDmK6y86kYM_1 person HDnYEdh7xG8_0 person HDqUvaFm_R0_0 skateboard HDr5if6Mb_4_0 person HDziFGwpXmg_1 car HDziFGwpXmg_2 car HDziFGwpXmg_3 car HDziFGwpXmg_7 car HD1tKnKT1Dc_0 motorcycle HD7QKzuFNas_1 person HD7QKzuFNas_0 person HD_alEnCVhM_0 truck HD_alEnCVhM_1 truck HD_wYO2_O8k_0 person HD_4ZJr68p8_1 horse HEIjtOJze90_0 person HEfIJ3wMKRI_1 person HEmv-biWoEA_0 airplane HErkHysJd-M_0 person HEr_leMW1zE_0 bear HEr_leMW1zE_3 bear HEr_leMW1zE_1 bear HEyY4zEX-no_0 person HE-4YEdBwuw_0 dog HE-4YEdBwuw_1 dog HFDK_y7kibQ_0 knife HFE9ujNILoA_0 cat HFQFlm1jWiE_0 person HFQFlm1jWiE_1 person HFRCZSouOn4_0 bird HFWQl2JJfic_2 person HFa18pRSsXU_0 train HFlanXHBGHg_0 person HFuw8C2bQ6g_0 person HF07qDRPgrw_0 horse HF1xhyTtWLk_0 motorcycle HF3Nn3KqXOk_0 person HF3Nn3KqXOk_1 person HF4PefI86r0_0 person HGFcsJmjWHs_0 elephant HGFcsJmjWHs_9 elephant HGFcsJmjWHs_4 elephant HGFcsJmjWHs_5 elephant HGFcsJmjWHs_7 elephant HGLC_YFRxPY_0 skateboard HGLLnmQiCU0_0 person HGLLnmQiCU0_2 person HGLLnmQiCU0_1 person HGLdrgf2e2c_0 person HGVNoha70iA_0 truck HGZDROOjAY4_1 person HGZDROOjAY4_0 person HGeCBN48g9o_0 person HGm4OftDlT8_2 horse HGnIxotAPOU_0 person HGnegc2CRTM_0 person HGvXva6SUvE_0 person HGw4URr4QUs_0 person HG1zQzSX2rU_0 person HG8oY2Ac4-M_0 person HG_JAnXBzJQ_0 skateboard HHGq5gd6w1g_0 skateboard HHPW65GVeoA_0 person HHRUnCEVnAo_0 cat HHc5mD1TxGQ_1 knife HHe9m9BOi3A_0 person HHgC0pkNiIA_0 person HHgC0pkNiIA_1 person HHi26rWtC38_0 person HHx5E8VfnkY_0 person HH0OILx6PKY_0 person HH1JApHMx2I_0 dog HH148v63a5o_0 person HH9wMNMJ2sE_0 elephant HIBd79qG-XQ_0 person HICJGOFvwoc_2 bird HIHX1rpDx_I_0 cat HIIQ917jPqg_0 train HIJGcmgyEcg_0 knife HIJGcmgyEcg_1 knife HIKyhRtWQ4c_2 horse HIK-Z8wXFug_0 person HISWMgqg80E_0 skateboard HITf8extnnk_0 person HIXuU8Z0N9o_1 motorcycle HIgiF2bkOys_0 person HIgiF2bkOys_1 person HIiu2EVu5H8_0 person HIqhXDkhHsc_0 person HIqr0-BB8Xo_1 knife HIrcAjP1fDs_2 bird HIz27dqnl20_0 bus HI3L38NCy0A_1 boat HI3L38NCy0A_0 boat HI_h7HfFDVw_0 boat HJGPBeom3y4_1 umbrella HJSiTzkFpHk_0 person HJVpMFJT2LU_0 person HJVpMFJT2LU_1 person HJg7wtoy2vk_0 person HJhZhn0zf1s_0 person HJi1L5HxuLo_0 skateboard HJi1L5HxuLo_1 skateboard HJi1L5HxuLo_2 skateboard HJq4kVvdeRg_1 skateboard HJrd3kpvjh0_0 person HJr5BOgO9XY_0 person HJ6BZjeSHTY_0 boat HKFJzdCsRfA_0 person HKGK0FLN9vA_2 zebra HKGK0FLN9vA_3 zebra HKIwynmyQp4_0 person HKWELXwIVvI_0 person HKqHmDjxF6Y_1 person HKsVn1IWaas_0 person HK28Vb__IfY_0 person HLAEqFEcR90_4 horse HLAEqFEcR90_0 horse HLAEqFEcR90_2 horse HLAEqFEcR90_3 horse HLBgSJD-3lg_0 bicycle HLL_j-CQKqQ_0 umbrella HLaiRkL4gFA_0 motorcycle HLhbGKVR4mE_3 dog HLy3UUDhaJY_4 giraffe HL06bx_HNg0_0 cat HL6dNcrAEoM_0 person HL8fh6O6iUA_1 train HL9F68y-0kY_0 horse HL9F68y-0kY_1 person HL9o2Vs9d8s_1 person HMF0KrAf0iI_0 person HMIGIwIcNq8_0 person HMJerOjZn4I_0 person HMQQrRvzwiM_0 boat HMUBbUP6Ko8_2 boat HMV7H81wz84_0 train HMb-pPTMZ5I_0 umbrella HMxMledcSVE_0 person HMyUpcpZGdM_1 bird HM4hJE0Db2Q_0 person HM4zY3uzwOQ_0 person HM7sD8YClkI_0 person HM_3ck6yooo_0 person HNGh3Rvn6Sw_2 knife HNGh3Rvn6Sw_3 knife HNRwM8zXMTM_0 person HNXQ_dkhX-Y_0 truck HNdRITK9TGE_0 person HNeVOXPyunw_2 person fo9SmkQa35Y_0 motorcycle fo9SmkQa35Y_1 motorcycle fpM1eiK3iok_0 truck fpNLFTgOciY_1 umbrella fpRq9BsaPzs_1 horse fpRq9BsaPzs_2 horse fpVZYKlsFsU_0 boat fpdUwZ8Gnd8_1 cow fpeYfCUzvDY_0 cat fpkxYBJDTtI_0 person fpkxYBJDTtI_1 person fpmtNez1u0o_0 bus fpnTZF4bvk8_0 person fpomSxrdTyE_0 person fpo2kf1idyo_0 person fpp_41AxRNI_5 giraffe fpp_41AxRNI_1 giraffe fpp_41AxRNI_4 giraffe fqQL3QPq-lo_0 train fqXvzEGxSak_0 bus fqcie5yyOxA_0 cat fqfHWT5hjkY_0 cat fqkVB4qZbgw_0 person fqlWb2OJg3Y_0 bus fqnioIm10xY_1 train fqpMhE5qOKk_1 person fqxGN6r9oIY_0 zebra fq5Zh2Lo9GQ_0 elephant fq959dAMasM_0 truck frFSlwby-0k_0 train frFrggXiJZY_1 person frItg4I9oEQ_0 person frItg4I9oEQ_1 person frJtciauQQw_0 person frRHj0FPzVQ_1 person frW5BpQ3-Fw_0 person frXxZevI11c_0 person frXxZevI11c_1 person frY6tIPR-Co_0 bicycle freW9Vk3GhU_1 person frfLZ70XIXI_1 dog frgCmAtYao4_1 boat frh4LMyWaQw_0 person frn-rfqmGVs_0 person frx5Uv7-1zw_0 person fr3S3gEtDS0_1 person fr616yExbeg_0 knife fsD7pYdfrpg_0 person fsE0DlVODpY_1 person fsFtKjirvM4_1 person fsFtKjirvM4_0 person fsOoFz6I_js_1 person fsOoFz6I_js_0 person fsVlTdh13Lk_0 person fsXVGaRpUNg_0 person fsd-DhcH5gE_0 person fsd-DhcH5gE_1 person fsh-wcyuPM0_0 person fs3oXXx75XA_0 person fs6L5bmf4pQ_1 person fs6Rgfl4CtI_0 boat fs6p-qaLswQ_0 cow fs7RdtNY3Ck_0 elephant fs9uDpde9ig_1 elephant ftG2YflDq_E_0 knife ftH3_awR5ZA_0 person ftIp5PyaGNc_1 knife ftNSK_rSs98_1 airplane ftSUBEOhdck_0 cat ftX9ErOmiAE_0 car ftX9ErOmiAE_1 car ftcnCvd4yeU_0 person ftlmGO0CnHk_0 truck fuHAM8D3ros_3 bicycle fuO2QMXiDMU_0 motorcycle fuPtCtdvowQ_0 person fuSxdcdxe70_1 person fuSxdcdxe70_0 person fuh4-mC5fvg_0 car fuklviv_MRE_0 truck funKReksXEQ_4 horse fur41mRCURs_0 cow futBuKCP9zw_0 umbrella fu5d7x7pORY_0 horse fu_f4n_bYPU_0 person fvAislzoQVU_0 person fvDUF-aukF4_0 person fvH1bolPY2U_0 person fvKg6ReEigA_14 bicycle fvKg6ReEigA_2 bicycle fvKg6ReEigA_3 bicycle fvKg6ReEigA_4 bicycle fvKg6ReEigA_5 bicycle fvKg6ReEigA_8 bicycle fvKg6ReEigA_11 bicycle fvKg6ReEigA_15 bicycle fvKg6ReEigA_16 bicycle fvKg6ReEigA_17 bicycle fvKg6ReEigA_19 bicycle fvLauezWx5g_1 skateboard fvLkNgA4N0k_1 person fvZYmQ6SJrQ_0 person fvcIpyJFuQA_0 person fvdoipKMj4g_0 person fvfb_kQCs-I_0 horse fvhVuqonUHg_0 person fvhVuqonUHg_1 person fvlGWjjirUQ_0 person fvqWMyJJqog_0 person fvqWMyJJqog_1 person fvtTggVCkFk_0 person fvzbC9c98ik_0 dog fv42-nzlEsY_0 train fv8F7gjL7Js_0 airplane fwCUjUa0cHQ_0 person fwG8C9CEISw_0 person fwLL8mlHf0I_0 bicycle fwL9zu2j3rk_0 person fwQMFtFdERs_0 horse fwQMFtFdERs_1 horse fwTB5tDP4cU_0 person fwT-VIjQCa8_0 person fwop4msktdA_0 cow fwv2gGVEi6g_0 person fwwOICMutXc_0 dog fxFzCD192K4_1 bird fxHZn2FXRGk_0 horse fxHZn2FXRGk_1 horse fxQYhMoNR9I_0 person fxQY5tnybxQ_0 skateboard fxWwYiT8yXk_0 person fxWyDyUmxuY_0 horse fxbNI1vTtq0_0 train fxbjh88g3Vw_0 person fxcDLsblNhs_1 bird fxdVSYuYJOE_0 person fxhuSOpUuGs_0 person fxr4HpTRNS0_0 dog fxxjK3mjCF0_1 person fxyg5GQk8H8_0 airplane fxyg5GQk8H8_2 airplane fxyg5GQk8H8_3 airplane fxyg5GQk8H8_4 airplane fx07mGL1WQY_1 train fx2_nahpAfE_0 person fx4HT1nuEg4_1 person fx4HT1nuEg4_0 person fx9TwmuIYCY_0 skateboard fx9fckiExps_0 person fx_zN3FWeJ0_1 bus fx_zN3FWeJ0_3 bus fx_zN3FWeJ0_0 bus fyE4_usnxHc_0 person fyE4_usnxHc_2 person fyOZZ_u9Jm0_0 person fyOxr6iISdI_0 elephant fyRO8_b4wJU_0 person fyTzI2wuC0M_0 person fybHaZZmAzE_1 train fydZoAN9JpI_0 person fydZoAN9JpI_1 person fydZoAN9JpI_3 person fyhSoeveW3I_0 train fyyLjISjzvM_0 person fyztN8okJkU_0 person fyztN8okJkU_1 person fy5GdRFHsLs_0 cat fzFR54WdDEU_0 person fzV_Z79golE_1 truck fzaNjkWQtW0_1 skateboard fze3woUbt0w_0 dog fzh-lO5lQhQ_1 bird fzoZsW3AMTU_0 bird fzp3cT3c5Wg_0 person fzp3cT3c5Wg_1 person fzp3cT3c5Wg_2 person fzqX7N7ICQw_1 person fzqX7N7ICQw_0 person fzrGdIi_J9k_0 person fzr9mWLJM6E_1 person fzr9mWLJM6E_0 person fzvrWQX908c_0 person fz1PTzziIcg_0 person fz1kPSLo_p8_1 train fz8emqnbleQ_1 boat f0BJ56Dn3D0_0 cat f0E5mPnVSSU_1 person f0JOvKbLwTQ_0 person f0LbneUbWUk_0 cow f0TYLMAZLpA_0 person f0XZTHcpmZY_4 elephant f0XZTHcpmZY_2 elephant f0XpDJO5Tw0_0 person f0XpDJO5Tw0_1 person f0Z8cmobjWs_0 truck f0Z8cmobjWs_4 truck f0Z8cmobjWs_7 truck f0Z8cmobjWs_8 truck f0mYYISWwxo_1 person f0mYYISWwxo_0 person f0o0SmB2JAE_1 cow f0o0SmB2JAE_0 cow f03_N__tWuI_2 elephant f1ASjw4-yL8_0 person f1Da4qa1SIw_1 person f1EKnOQEf5g_0 boat f1GkfW2mOlE_0 person f1G2DlbJqyI_0 person f1HKyLr8nL0_0 person f1JCS5F-LuU_0 person f1KEvGLqqwI_1 umbrella f1O6FYMq5zk_0 person f1XB0uA4Dvo_0 bus f1Z1HedJzos_0 skateboard f1fEuZwBkDQ_0 person f1nxCdtYwdQ_0 horse f1sTzp9ahWM_1 person f1sTzp9ahWM_0 person f1uaPSveXCI_0 person f2ADBeQ0Vys_0 person f2ADBeQ0Vys_1 person f2EbBSZ8osI_0 zebra f2EbBSZ8osI_1 zebra f2HKs4L6fwE_0 person f2HKs4L6fwE_2 person f2HKs4L6fwE_1 person f2MDAAk-Euo_1 person f2ULSb7lIAo_0 cow f2ULSb7lIAo_1 cow f2ULSb7lIAo_3 cow f2hfKAL0ZoA_0 umbrella f2hfKAL0ZoA_4 umbrella f2hhMTSObNY_0 skateboard f2p2YcmHn8c_1 bicycle f2s4nNZ_qew_0 boat f2ypHkP1WUg_0 person f3EOdxK13SU_0 giraffe f3HU85Jx7m0_0 cow f3JkzQkcdVM_0 horse f3Kxw7yBcW0_2 person f3Kxw7yBcW0_1 person f3Np8rGlxOE_1 person f3VJKfFdBW0_1 truck f3aufQBTMME_0 boat f3bk60UZpqE_0 truck f3bk60UZpqE_5 truck f3bk60UZpqE_9 truck f3kQ_6EG8cM_0 person f3spBT1AGyw_0 person f31ePv3WlNc_0 person f33OpHIFMWA_1 elephant f33OpHIFMWA_3 elephant f33OpHIFMWA_0 elephant f33OpHIFMWA_2 elephant f35syqOsqSo_0 boat f38P7AlhP5g_0 person f39rc-7_QQc_0 person HNr7Ed0_pQY_1 bus HNtUUtLCSDY_0 giraffe HNtojLNWnKQ_0 person HN6XGq0aRx4_0 person HN84N_vu_hw_1 person HOAbQ4r1tzM_1 knife HOA47mRJ9B8_0 person HOOwNsMTi9g_0 person HOSMm-4fUVM_0 dog HOZcbA0OPF0_0 person HOkS1ljUX4s_0 bear HOmzECHFah4_0 dog HOxzSXuj0O0_0 elephant HOxzSXuj0O0_3 elephant HO6yeFgs7Hs_1 bicycle HO7Uf5Enr1U_1 person HPAa3KI1Z30_1 dog HPDws9wJu40_0 train HPIdRNu7STU_0 dog HPIxVE3OLG4_0 person HPPTr0Mpe0A_0 bicycle HPRp9F-4ts4_0 dog HPSJZXcOiEc_0 person HPjcp8hS6vs_0 person HPjcp8hS6vs_1 person HP0RUfuvfx4_0 person HP4O8FbEpEg_0 bus HP6ROW7ahtU_0 person HP6YRIGqiI4_0 horse HP62suxiDNw_0 bicycle HP62suxiDNw_2 bicycle HP62suxiDNw_3 bicycle HP62suxiDNw_1 bicycle HP9u4FmRvbw_1 bear HQBhagraDwo_0 cat HQIxUlu7xSY_0 person HQKVBNWD_ls_0 person HQM9aDN7Tf0_0 person HQZVUknJ0lw_1 person HQZVUknJ0lw_0 person HQePQ1mfzKw_0 person HQePQ1mfzKw_1 person HQhnj0h9OyA_0 person HQhnj0h9OyA_1 person HQjXFK_0sFo_0 person HQxihmm6sSs_0 person HQz_At1F0Yk_2 bicycle HQ4ZWia0f1E_2 cow HQ9gmrJ6Bm4_3 airplane HQ9gmrJ6Bm4_4 airplane HQ9gmrJ6Bm4_5 airplane HQ9gmrJ6Bm4_1 airplane HRCOvhALHv0_0 train HRUX75Ve2aQ_0 person HRVMd5SmF8Y_0 umbrella HRl1VhUfhok_0 person HR1wffFOaEw_0 elephant HR4ExP8Ompc_0 horse HSKpu2UmvBo_0 person HSKpu2UmvBo_1 person HSN6tO3rh-c_0 person HSVWpwFagLg_1 person HSdyrMzM64w_0 cow HS3WVWEFHm8_1 person HS3WVWEFHm8_0 person HTAnAeW5Bhs_0 bird HTS20hgMcFQ_0 bicycle HTTz78R4i0c_0 person HTehrgCQAPo_0 person HTgldgqci04_0 person HUFGafskCjw_0 person HULASsoz03U_0 person HULASsoz03U_1 person HUPxNiCgjn0_0 knife HUfwe7j7IBE_0 person HUgX2V1AkVw_0 person HUiMyxUEC_A_0 person HUv2tT_n5Bo_0 person HUy4cHFX-04_0 person HUz7znJTRNg_1 umbrella HU_HuNQ4TDw_0 cow HU_HuNQ4TDw_1 cow HU_HuNQ4TDw_2 cow HVEmUm86PBo_0 motorcycle HVI1w93kCfo_0 person HVOWKezX_bo_0 horse HVOWKezX_bo_2 horse HVYf36PFglw_0 dog HVY9hWgMujc_1 truck HVeqzrLyVtk_0 person HVkFV2q27S0_1 person HVkQkPaQbrw_0 person HWAW-J3ZpIs_0 cow HWA45moBwMo_0 horse HWEI24n2tHY_0 person HWItJuo6DSM_0 bus HWXgDvYdlHE_1 person HWZSmtWVH54_0 person HWZenKFJqkY_0 person HWZenKFJqkY_1 person HWZenKFJqkY_2 person HWfpkRSnZp8_0 train HWfpkRSnZp8_2 train HWjaeLf99dU_0 bear HWr9Kqi0B2A_0 person HWsTMfZok5E_0 person HWtKIjJacjk_0 person HWtyII4CMWg_0 car HWtyII4CMWg_3 car HW7FTNqTKhs_0 train HW7yQK_j65g_0 horse HXARJhNURSs_0 person HXH_F5SX6FU_0 truck HXH_F5SX6FU_3 truck HXH_F5SX6FU_1 truck HXKnqbEGfVw_0 bird HXKnqbEGfVw_6 bird HXKnqbEGfVw_1 bird HXKnqbEGfVw_2 bird HXKnqbEGfVw_3 bird HXLA3nbxgh4_0 person HXWoqdza4oA_0 dog HXaAJtjX1mE_0 bicycle HXaAJtjX1mE_2 bicycle HXaAJtjX1mE_1 bicycle HXa-0NlFTP4_0 person HXcSrTLsF9c_0 train HXhYYfE4uN8_0 person HXvgiezvrYI_0 truck HXx4tRTfGRM_1 dog HX0kjr3XYHI_1 bear HX7P1ipPByA_0 dog HX-gTvdUaOE_2 motorcycle HYLAdzbqvC0_0 person HYWEWmMMrsU_0 cat HYW3dAv02gE_0 cow HYW6VucwAEg_0 person HYXFGMzivds_10 truck HYbuNzqXmyY_0 person HYiN6skKjfY_0 knife HYoonHvZXCc_0 motorcycle HY1aAYxxlQo_0 person HZC5bba_V4Y_0 knife HZJ-JQkt590_1 bicycle HZKExvpKLQ8_1 person HZLdGfto2mI_0 car HZSPPN3TMx8_0 bird HZZadt4SIl0_0 dog HZceU_BV2GM_0 person HZceU_BV2GM_1 person HZceU_BV2GM_2 person HZd4rCCsNMs_0 skateboard HZd4rCCsNMs_1 skateboard HZd4rCCsNMs_2 skateboard HZkmrVeoUV4_0 person HZscUISrdww_0 person HZ-tGW__JOI_0 cat HaE1N8Q1b7s_1 train HaMpIMApSi8_0 person HaO3z-4gcBs_2 train HaRliuOtm7s_1 person HaiLotzzEXk_1 elephant HaiLotzzEXk_2 elephant HaiLotzzEXk_0 elephant HarW34izH-M_1 person HauA239AM7I_0 dog HavxbX8tng0_0 person HayoEz1x5Ks_0 person HayoEz1x5Ks_1 person Hay4Nx9S5-k_4 bicycle Hay4Nx9S5-k_1 bicycle Ha8XGRvxQxs_0 person Ha_OuYxLXIs_0 person Ha_w-xJsHAY_0 zebra HbBCtCXKIEE_0 person HbH7DpR0WUw_0 person HbJufGCjdSE_1 person HbKh31cncOI_0 bird HbLoxqqdYsQ_0 cow HbQu1mfGg4c_2 elephant HbQu1mfGg4c_3 elephant HbQu1mfGg4c_0 elephant HbQu1mfGg4c_1 elephant HbcyjRGbMBY_0 dog HbcyjRGbMBY_1 dog HbhmBauZqxE_0 horse Hbq35QImz2w_0 person Hbq35QImz2w_1 person HbuCy2fsJk8_2 knife HbyKQdGpxhA_0 boat Hb3INTcuOVk_0 person Hb5zCzD4J_E_1 train Hb5zCzD4J_E_2 train Hb5zCzD4J_E_3 train Hb5zCzD4J_E_6 train Hb5zCzD4J_E_7 train Hb5zCzD4J_E_8 train Hb5zCzD4J_E_11 train HcBQXS22BDs_0 person HcJTaK6Q9P8_0 person HcXN4Pwnaeg_0 horse Hcfxwdbwk8c_0 person Hchet3FQwII_0 person HcxL3_INS_0_0 person Hc5ZM6UWTbY_0 person f4OI46BYh08_0 skateboard f4Oj9uMeFdI_0 elephant f4PgAt4YpfE_0 cat f4P-R7h_gTU_0 person f4QyVWC6yrw_0 person f4XkIcezAd8_0 person f4XkIcezAd8_1 person f4Y2tjwOV2k_0 cow f4Y2tjwOV2k_1 cow f4bys9o_Z2M_2 bird f4s0cImpNBM_1 cow f4xLPprxm30_4 knife f49BXPlU-iI_0 knife f4_Mfc9Ccg8_0 truck f5BIXG_nLok_0 bus f5HsrI3Codk_0 bird f5J7yrE24eY_0 person f5LuupUslCU_0 person f5Q2iD7VUx8_0 skateboard f5W37dv91tU_0 person f5apNjAecEc_0 person f5bVoAXze0Q_0 motorcycle f5d1IXK1Tz0_0 bird f5rzpIRd4wA_0 train f5wHsLucnf8_0 person f5zEWaDr1jg_0 cat f50eMXA_-bM_1 person f50eMXA_-bM_0 person f53Jmsa7Jkc_0 person f6AcbdJ77A4_1 train f6E2ODGGF28_1 person f6Px5vjTeRI_1 elephant f6Px5vjTeRI_0 elephant f6Px5vjTeRI_3 elephant f6Px5vjTeRI_5 elephant f6UBVcEIt3I_1 person f6cXiuO-MvQ_1 truck f6dVANLzPTY_0 person f6dVANLzPTY_1 person f6o6ukW_Qog_2 bear f65c6sEDtkE_0 person f7A6AOC8fOg_0 person f7A6AOC8fOg_1 person f7ExsvPto-E_1 motorcycle f7Fs7-jGglk_1 bear f7GJgMh9xt4_0 person f7WvltLziTI_0 boat f7cI-B4pJso_0 cow f7kLnCuNTQo_0 cow f7lmZQGcfBA_3 elephant f7lmZQGcfBA_4 elephant f7lmZQGcfBA_0 elephant f7lmZQGcfBA_1 elephant f7oBEoL94vw_0 person f7pnt1rB9kI_0 person f7x074oihas_0 person f73BEqi2_DM_0 person f7-S_iQAyKU_0 car f7-htlH5qd4_0 bird f7-htlH5qd4_2 bird f8A1o9Nbs64_0 skateboard f8BXIJnggCI_1 boat f8BXIJnggCI_3 boat f8BXIJnggCI_4 boat f8Dp8Yvyr_0_0 person f8PVrlhAIV4_0 person f8T4DHNu6MY_1 truck f8ZxXHSqC_8_0 boat f8cW6kw6240_0 person f8mzzGhPBaw_1 car f8q3fKwf5PY_0 knife f8yFyIwDCQ4_4 giraffe f8zLCa1oGOE_0 horse f8z83D9vGPo_2 knife f80hjE6vabs_0 person f80hjE6vabs_1 person f84ypk41ULc_0 elephant f9H0LrBLc9Y_0 person f9H0LrBLc9Y_1 person f9H0LrBLc9Y_3 person f9H1bUagACA_0 horse f9H6UaPUITk_0 cat f9LOlCLfsJs_0 person f9N4Jxt-kUs_1 knife f9N4Jxt-kUs_2 knife f9TCFTluRIc_1 bus f9e12AC1jXM_0 bear f9oWC3kSP1M_0 motorcycle f9ovukmKaq4_1 person f9sPt8HIN0w_1 skateboard f9sj-0ZFV6E_0 person f9sj-0ZFV6E_1 person f9v2ONFCiwQ_0 person f91XzUXz11U_0 person f96d9EwxAB4_0 person f9-IyW9tVLY_0 person f-FxqFk0TdM_0 person f-JXaNm7TBw_0 person f-J7SQBHRN4_0 truck f-Yei4idfG8_0 airplane f-dhfS-geuI_1 elephant f-dhfS-geuI_2 elephant f-h9L-PN1ZM_1 bird f-iLJUDdrD8_0 person f-niuVrgiIc_1 person f-rp_CghH-E_0 skateboard f-s-4lM4qPA_0 truck f-w51BH60RQ_0 person f-1WVe76te0_0 cow f-4EyKUawVo_0 bear f-7ZEGsCz9U_0 person f_GKi-DGmzM_0 person f_Gf2hpt7y4_0 giraffe f_GudF8uST0_0 person f_NsA6enCZE_0 person f_OOyDOAAOU_7 elephant f_QhMhkyUSY_3 truck f_QhMhkyUSY_1 truck f_QhMhkyUSY_4 truck f_Us8TvJMUQ_0 person f_VwDCt9HTc_0 dog f_WQIaZ5PjY_0 boat f_bXOtZjzfo_0 person f_b0IaRqtbs_0 person f_jLGz53IpQ_0 person f_jLGz53IpQ_1 person f_mo54sXCc8_1 person f_mo54sXCc8_0 person f_rC1JIAMBU_0 person f_wk-NOqceY_0 horse f_yMF9tkk70_1 car f_yvJuTzFHc_0 motorcycle f_yvJuTzFHc_2 motorcycle f_2I0S-EYu8_0 dog f_3x9qJXCjA_0 person f_49EFLQ02I_0 person f_8S2hHC2rc_0 bicycle f__fXHkVh5E_1 cow gAKFUl9e_kg_0 person gAQ92hISW6g_0 person gARNWQDyaYM_0 boat gAYbqApcfGs_0 person gAdIZN7_0SM_1 airplane gAeHmfC6t5s_0 cat gAetQXcftXM_2 dog gAnOylz1kDY_0 person gAnmF0EFcB4_2 elephant gAorjWC_59o_0 cat gAo9Rsd6xwg_0 cow gA2FDYNulg8_1 person gA22uEcTAuY_1 dog gA84cp5Keqk_0 horse gA_a2Ajm7B8_1 horse gBFsvbfVaLg_0 person gBJgWZcXu9o_0 person gBK7NwUcSoY_1 person gBOpan7nm6M_0 horse gBOpan7nm6M_1 horse gBPipHCII3M_0 bus gBRc8zqsL78_0 dog gBUOzZPs_o4_2 person gBUOzZPs_o4_0 person gBYqrtFnN_Y_0 person gBYqrtFnN_Y_2 person gBeaBC0u9cQ_0 person gBhKhiEJUCM_0 horse gBiq_BH15FM_0 dog gBoebgAjbVw_0 person gBoebgAjbVw_1 person gBs3hPLJTGs_1 horse gBwCej92lKg_1 person gB0wConR2VI_1 skateboard gB2QHXkiiHs_2 elephant gCDBnQV_G3c_0 cat gCDBnQV_G3c_1 cat gCGtBmntCiI_1 motorcycle gCHegjuq0os_0 person gCHegjuq0os_1 person gCI1E3Hezdo_2 cow gCI1E3Hezdo_1 cow gCTp3CdMHCo_0 person gCT0VAdPm98_0 cat gCuOoA6aZ5U_0 cat gC7K3OeQFHo_3 bird gC7XtkA9y_Y_0 dog gC-xUbdM-tU_0 person gC-xUbdM-tU_1 person gDAPPFBC9Gw_0 train gDEpD9ek-O8_0 skateboard gDGLrPPl_PU_0 cat gDMsKJ61KPo_1 skateboard gDOGAHsBM_o_0 person gDTs0BOj8Fw_0 cat gDU0hHsqtbU_3 knife gDU0hHsqtbU_5 knife gDU0hHsqtbU_0 knife gDVGs8wTXCQ_0 cat gDkDXOm8z5Q_1 cow gDkDXOm8z5Q_0 cow gDk-zDBsv7g_0 dog gDnSIxaiPzk_0 person gDn3-DCSgNg_0 train gDsBFuJE6D8_2 dog gDvOoWXI3yg_0 person gD2GATPADlA_0 person gD5_x_Bz1z4_0 person gD5_x_Bz1z4_1 person gED4_ImWufA_0 truck gEE_GCrAqF0_0 person gEJi9Jawk2A_0 person gEOxDCDD97k_1 horse gESEn7ZZELM_0 person gESEn7ZZELM_1 person gEai3uMvvFg_0 airplane gEai3uMvvFg_3 airplane gEai3uMvvFg_4 airplane gEhLmQnM720_0 car gEu4mV0DWRQ_0 person gE0ZQD1rCy8_0 person gE0mBxOEwRI_1 skateboard gE0mBxOEwRI_3 skateboard gE8ErAnVuzY_0 bird gE8ErAnVuzY_2 bird gE-GVN9ErhI_0 person gFEnoylVci0_0 person gFac0jUOjCE_0 horse gFcIMdm4qtI_0 train gFdHQTLSmnc_0 airplane gFfVZSPVYmY_0 person gFiSl9m-w0k_0 person HdBc9ySq76E_1 bird HdCyMGZFJhM_0 person HdFYXjdN5_8_0 person HdO2lmXvENQ_0 horse HdR6VoZEwAU_0 cat HdSXU0fhHbM_0 person HdT_9pXdxuc_1 person HdbZzqJGLo8_1 cow HdcXcqUlgI4_0 skateboard HdhKF0UWx4g_0 person Hdh3nOzwVW8_0 person HdjbDB8UvCY_0 person Hdo3_NQiVKw_0 knife Hd85XlwoOMc_0 person Hd-wT5OTZDE_0 person Hd-wT5OTZDE_1 person HeIrGQnIMOE_0 dog HeLNz5XJe08_0 person HeTGT7JfvB0_0 person HeUD1Hrzswg_0 bird HeYNsU-PKJs_0 cow HeYNsU-PKJs_1 cow HedUVNznPK0_0 car HedUVNznPK0_1 car HeoyKd78htI_0 person HeoyKd78htI_1 person HewdFRJAXH4_0 person He08dewEgbY_3 motorcycle He08dewEgbY_0 motorcycle He1OQxCPk_w_0 person He5cucK-e48_0 person He6bAMDkCss_0 elephant HfDHvE46LYU_1 bird HfDzCPRQ2nw_1 elephant HfEXlJ0dOhU_0 person HfEZYvYqq_Y_0 cow HfHNi93ZHoo_3 cow HfHNi93ZHoo_1 cow HfOcLeLWchM_0 person HfZ871F0xSo_0 cat Hfnnbr4CeTg_3 bus HfqI5BIpp0s_0 person Hfq3_YJ7BpY_0 motorcycle Hfq9JFmquE4_0 person HfvJc2dxUR4_0 boat Hf1Iyyz2DMY_0 person Hf1Iyyz2DMY_1 person Hf8JWsbSYYk_0 person Hf8-8h45g-g_1 elephant Hf8-8h45g-g_0 elephant Hf8-8h45g-g_2 elephant HgDimNCaxF0_1 bear HgFCKM4ndEc_0 car HgMYuCtsOwc_0 person HgMYuCtsOwc_1 person HgO57Npp9Yg_0 train HgexaoNeZJk_0 person HgiYmNrxUzg_1 person HgkeptGXNt4_0 motorcycle HglF9x-ORXU_0 person Hgr5__oevds_0 person Hg2vqnLAc8I_0 dog Hg4DJ-x85Dw_1 elephant Hg-R_RMIEN8_0 airplane HhASNiFpJlw_0 truck HhF6cAtp7Xs_0 knife HhGGJNmwWHk_0 person HhVSLU0A-wk_0 car HhcMy4KZ9mY_0 skateboard HhfSUB2LOTU_0 person HhiUVwHWmwM_1 person HhiUVwHWmwM_2 person HhiUVwHWmwM_0 person HhjGAeK-XWg_0 person HhoRf1Ovlf8_0 person Hhvq-cwBJgo_0 person Hhwzl9x_m34_3 cow HhxV27YhiqI_0 skateboard Hh1xD0M0N8Q_0 person Hh6x850teNQ_5 airplane Hh6x850teNQ_7 airplane Hh6x850teNQ_8 airplane Hh6x850teNQ_9 airplane Hh6x850teNQ_10 airplane HiBUWbOyqcQ_0 person HiGZ2EdJh2o_0 person HiMItbtVHcY_0 cat HiMItbtVHcY_1 cat HiNt0G1AIO4_0 motorcycle HiTE5nqzjBw_0 zebra HiUz61ffgHA_0 person HiZDjdREbmc_0 umbrella Him7gJ7sArU_0 person Him7gJ7sArU_1 person HinGUsliCKc_0 truck HirBTVnhNls_0 cow Hi4ITByGP0Q_0 person Hi4mzrYdRBQ_0 horse Hi4mzrYdRBQ_2 horse Hi4mzrYdRBQ_3 horse Hi8Ey0o5mCQ_1 person Hi-7ZtG_JWI_1 person Hi_YHp3Jz48_0 cow HjAtN_MbguE_0 person HjLLTWwaCB8_0 horse HjNfykX021M_0 person HjNfykX021M_1 person HjgdNiVfO9M_0 skateboard HjlX9nu9Vf4_0 person Hjo13y8dFy4_0 motorcycle Hjt_y0CW-dY_0 person Hjt_y0CW-dY_1 person Hjxd2cno65M_0 skateboard Hj0J8FVxBjg_2 person Hj0J8FVxBjg_0 person Hj0J8FVxBjg_1 person HkApyQz8MTY_1 horse HkQ4tzUFCUU_0 truck HkW_wLkAKpg_0 person Hke6h3Sv5bA_1 bicycle HkzYNIDq0q4_0 train Hk45sdCRh9g_1 bear HlEkgK08UfY_1 person HlTQbPXnzu8_0 dog HlWsih27OmA_0 bird HlaPVZM-53c_0 person HlfpirtC6oQ_0 person HlmuHGoCGAI_0 cow HltyUzvtugM_1 bicycle HlurUBv4bh0_1 giraffe HlurUBv4bh0_3 giraffe HlurUBv4bh0_4 giraffe HlwSaYwFLRE_0 horse Hl3qik9GRX4_0 person Hl5MXwWiXWM_0 person HmDDLtJcD5g_0 person HmORePbYJkk_0 skateboard HmPvsdwo_fY_0 dog HmRm2phIiGo_1 bird HmY8zwmIiac_0 cow HmaGylwEFxw_0 person HmbTCfB3Vkg_0 person Hmk4dZnPtRY_0 bus Hmn3xf-zqWI_0 person HmqV_7hAxdw_0 person Hmr0jbygomI_0 giraffe HmwxDK0zo6U_0 person Hmyj1zKgToA_0 person Hm0kxS31F_U_0 person Hm0kxS31F_U_1 person HnNJeASG0-M_3 person HnNJeASG0-M_4 person HnNJeASG0-M_2 person HnNzkYDhWks_1 person HnNzkYDhWks_2 person HnNzkYDhWks_0 person HnP7iXcgg8g_0 truck HnSHJ_iCdi4_3 truck HnSHJ_iCdi4_1 truck HnUrGKpAsOk_0 cat HnbNOJpzYPE_0 person HnjhdtM8qSI_0 skateboard HnptRKjBUF0_2 boat HnwYRWj3fk4_2 knife HnxaJbaAiUI_0 person HoH5exlgIxk_1 skateboard HoLifxKZUpI_0 person HoLifxKZUpI_2 person HoLifxKZUpI_1 person HoNs_4V1pNs_1 bear HoNs_4V1pNs_4 bear HoP_nMgAxAk_0 boat HoeeRkyNozc_0 cow Hon64st5_6g_0 train Ho2ixBE8dzE_0 giraffe Ho5TcUOlb3Q_0 motorcycle Ho5o7aBqNAc_0 person Ho6N0OgD-1M_0 person HpBBda_pbf8_0 motorcycle HpGr16tW9dk_1 person HpQ90KkREGo_0 person HpUPD5_WMYI_0 train HpZ3IzUfsGg_3 bus HpbQsLdUHN4_0 boat HpjyvLHus3Y_1 skateboard HpkTeQdQ03Q_0 skateboard Hprw9lNWGGs_0 person HptcjVcfzgY_0 cow Hpwk73qvroU_1 elephant HpzTTAS6Qt8_0 person Hp0SQy5w9Q4_0 person Hp-eaTbVfLY_1 bear Hp-2Gb7Fwns_0 cow HqxhhM71S2g_0 horse Hq1KLztJBrE_0 person Hq6tGHLzg4Q_0 person Hq814Tfrblw_1 airplane HrHPBJOnFgg_1 train HrHPBJOnFgg_0 train HrHPBJOnFgg_4 train HrHPBJOnFgg_6 train HrdVu5J3rZQ_0 person Hr-keYNRBhA_0 train HsLZwGFHYUg_1 horse HsNcZZ6iwHQ_0 person HsOiHc1moVk_0 person HsOkCwZLv_w_0 bus HsOkCwZLv_w_3 bus HsOkCwZLv_w_1 bus HsOkCwZLv_w_2 bus HsR2xk4I1as_0 person HsVKw_8AQtM_0 person HsZgeesgCZQ_0 person HsjVUPs3XB4_0 boat HslbDMoiABY_0 car Hslld67XdsY_0 person HswufOfUGyk_0 truck HsyscFWIPZs_0 bus Hs0HRqYcYqA_0 car Hs6bVSOu98U_0 dog Hs_vQr20HdQ_0 skateboard Hs_vQr20HdQ_3 skateboard HtErHV_tZqs_0 elephant HtIbfC8DDos_0 truck HtNaGNO6nnc_0 person HtRiNzzfakk_0 person HtUPhgHKN9c_1 boat Hth8t7jhKPs_4 horse Hth8t7jhKPs_7 horse Hth-I5KYVsI_0 cat Ht054jKgWfE_0 person Ht9C8ABsxrg_0 person Ht_bczKGV-0_0 person HuNIgJEUelo_0 person HuNIgJEUelo_1 person HuOzcY9ybpo_2 dog HuVl7peYYF8_0 person HuVoecmBgpM_2 bird HuVoecmBgpM_1 bird HuZPTuSe7Zw_0 person Hue6Q5JKEKw_0 cow Hun4T6fv3cs_0 person HuqC6CX9uRA_1 person Huyd-7WlWWU_0 person Hu3xpcZqwRg_0 person Hu9DGxLcg2c_0 person Hu-VYy60p64_0 person HvHJi-EkL8c_0 skateboard HvIubGltpPY_0 dog HvLq5xDKM6E_2 bicycle HvP4rcOll6k_0 person HvQGnFuiwtg_0 cat HvTvaPx2hXw_1 train HvhkLhJ4YFQ_0 person HvuLPfhVT3s_0 person HvyIg5RMLbU_1 person HvyIg5RMLbU_0 person HvyzpBvy40o_0 person Hv5sH0eTE_M_0 dog HwSP55CmiCk_0 person HwS3weg4aQc_0 dog HwS3weg4aQc_2 dog HwY6kiQlICc_0 person HwdEYJ2bZkg_0 airplane HwdyzravQpY_0 cat HwfLycybCD0_0 motorcycle HwgmR0Qlm_I_0 person HwipRH29Hr0_0 bus Hwnqezsko-Q_0 person Hwnqezsko-Q_1 person HwxnH--ot8o_0 car Hw0JhQaRYcA_0 cow Hw2Bhz2SkUI_0 person Hw2Bhz2SkUI_1 person HxMniz8r1x4_0 person HxP056QWsGY_0 person HxP056QWsGY_1 person HxaFZyog34E_0 person HxaFZyog34E_1 person HxgU1Dh8wMs_1 person HxiBpvG82Ys_0 motorcycle Hxq1wNRv5Yg_0 person Hxv6y6I4mvE_0 horse Hx19D3w4xGI_0 giraffe Hx_Z9TOIV8U_0 motorcycle HyJgfYNotwk_0 truck HyUY7bqdm9Q_7 dog HyUY7bqdm9Q_0 dog HyVLne6RE-A_0 person HyXjUWAQ970_0 skateboard Hygs9OBUgg4_1 person HyuQCu-z558_0 motorcycle HywSTw3dtgs_0 person HywSTw3dtgs_1 person Hy4E2NZEc34_1 train HzAOQnmw_bo_1 elephant HzAOQnmw_bo_2 elephant HzCClfShiwM_0 person HzCClfShiwM_1 person HzDzb9xxc6o_0 person HzESeh3ZV4g_0 person HzHWWeZEU6E_1 skateboard HzJgpBBIk1o_0 cat HzLm3QfIx9w_0 person HzLm3QfIx9w_1 person HzXBY-SJECY_0 horse HzYY4-iAvrk_0 cow HzdSxrJ2oBw_0 skateboard HzkmlCJwvqo_0 horse Hzlcc_lAGVo_2 skateboard HzqIVSJNXAU_1 person HztbwJhPXyk_0 person Hz6I6jLi4NA_0 dog Hz8qayZDGpU_0 person H0Adt_c6kJo_2 elephant H0EEB1bPOjE_0 person H0VjOJvg49Q_0 bicycle H0Ym6NE2ny8_0 cat H0gWl9KRbHo_0 person H0k2WZec6aA_1 train H0k2WZec6aA_3 train H0k2WZec6aA_4 train H0k2WZec6aA_0 train H0u061QsnHw_0 cat H0yhw97jkkY_0 person H0z8VqDW-vg_1 airplane H01F2fhFpr0_0 elephant H097WsXpask_0 person H097WsXpask_1 person H1C2ZZeeVs0_0 cow H1Hd5Japfbc_3 train H1Hd5Japfbc_0 train H1Hd5Japfbc_1 train H1Hd5Japfbc_2 train H1JIvu1dbbk_0 person H1JIvu1dbbk_1 person H1MTfTrQrE0_1 person H1d68B_jDjI_0 person H1hg-0_AS9A_0 cow H1xBJoYM7rE_4 truck H1xBJoYM7rE_5 truck H117IshzypA_0 knife H144B0rpQh0_0 person H144B0rpQh0_1 person H1-_3CvKDzc_0 bird H2Q-46IlKEc_5 truck H2Q-46IlKEc_6 truck H2RoEMwxEAk_1 person H2TqEPsubdM_0 bear H2iTxNLOK1Q_2 motorcycle H2iTxNLOK1Q_0 motorcycle H2iTxNLOK1Q_3 motorcycle H2vkpfO2yqU_0 person H22P5Z4GfkE_0 person H29Xe5gG_-s_0 person H3A2DSw_xNU_1 elephant H3GcVWKTVd4_2 truck H3NrFrjQlfc_0 person H3exbzmmPQY_0 person H3jC0oToDjU_2 person H3jC0oToDjU_3 person H3jC0oToDjU_0 person H3o1VsopVFM_1 bicycle H3o1VsopVFM_2 bicycle H3pifBCagTI_0 person H30IPtBzf_s_5 skateboard H30ifg3HO_I_3 dog H33IRr1Z3-w_1 train H36UOsilz4M_0 person H4Hp-UJYZ_g_0 bicycle H4JiUp8EH3s_0 zebra H4VZD26aqe8_0 skateboard H4VZD26aqe8_1 skateboard H4bN1hcXw9Q_1 person H4dTHFeYa30_0 motorcycle H4eE_LAeWXQ_0 person H4eE_LAeWXQ_1 person H4gxLA7vTo4_0 person H4lBmXOi3Uc_0 dog H40G2dsVha4_1 train H41XJMKpfFM_0 bus H42hQSjU97o_0 knife H5NqMNaMEiM_0 bird H5YO56LD_dY_0 elephant H5YO56LD_dY_1 elephant H5iHzuWmtDw_1 dog H5sijKl_Xi4_0 cow H50EXfjT2O0_2 airplane H50EXfjT2O0_0 airplane H50EXfjT2O0_1 airplane H50-_mqAU14_1 cow H55Ru4hgats_2 elephant H55Ru4hgats_3 elephant H6OhYxXS1So_0 cat H6UwkC3sYic_0 cat H6ZHYEOcjCI_0 bicycle H6ZHYEOcjCI_1 bicycle H6Z8sZ34ZGw_0 motorcycle H6dXJIZnH-k_2 train H63oHdGMBAs_0 bird gFunUi36tVM_0 horse gFunUi36tVM_1 horse gFvhLM1k-IY_2 truck gFwCuQBtZiU_1 umbrella gF7IM-CiOdU_7 bicycle gF7IM-CiOdU_0 bicycle gGBEKYXUhbE_0 truck gGMxVO2zmP4_9 bird gGMxVO2zmP4_1 bird gGMxVO2zmP4_2 bird gGMxVO2zmP4_5 bird gGMxVO2zmP4_8 bird gGSCGkm00jM_1 bicycle gGYN2hnw1SQ_1 elephant gGdKtY4p1E0_0 airplane gGt9CVOzJOI_3 knife gGzaN_8PxZw_0 skateboard gG8tfb-eSuo_0 train gHC3HqRbW6g_0 elephant gHF9PM2MVuw_1 train gHvzU7dfBU8_0 giraffe gHyK46CyQtA_0 cow gH0LLPcn-H8_0 elephant gIBZr7Mh05k_0 bird gIMq_fnjtSM_0 cat gISy0wedyW4_0 boat gInHAdlbB60_1 skateboard gIsXFCo7Nt4_1 dog gIxuS1GwPPo_0 train gJV63DGM7Ew_1 car gJa0yNDBFio_3 person gJa0yNDBFio_0 person gJa0yNDBFio_2 cow gJfD9eHnos4_1 elephant gJn5fXk7dCs_0 airplane gJuZGVWuQQ8_2 bicycle gJ-k_oHkqYc_0 cat gKHR68FmKE8_3 airplane gKHR68FmKE8_0 airplane gKHR68FmKE8_4 airplane gKmF78OWCUc_0 motorcycle gKqUwiPYSh8_0 motorcycle gK7dud30V7k_0 giraffe gK_K33gm3SA_1 motorcycle gLQWgnWqQ1Y_0 bicycle gLRU7lXCgNw_1 dog gLRexWYaW_Q_0 skateboard gLbADp0AlZU_0 bird gLtnBhTBpkA_1 boat gL3uBv5NWJU_1 bus gL7JySv9H4I_0 bicycle gMAW4Am5_pc_0 cow gMBTewi9VZg_0 cow gMCCgBzug_U_0 knife gMFgEtqbTXs_0 boat gMJuszEOURk_0 cat gMMJH4UYboM_3 bus gMXt8X-xC_g_0 dog gMlNev_l4Yg_0 bus gMlhd1gczF4_0 airplane gMsGe7w79Hg_1 car gM9tFNvc1xw_0 cow gNDSQ2l9FYg_1 elephant gNMkDmfkZ1E_0 motorcycle gNcGXjn7g9o_0 skateboard gNwKVPIi010_1 skateboard gN2aKPpTpzQ_1 dog gN7-cLfUlt8_4 giraffe gN7-cLfUlt8_6 giraffe gOOB0RZmnUA_0 cow gORdlzUa3nQ_1 bird gO48FZrUm88_0 skateboard gO-8RNI2Puc_1 dog gPhcXlQLLRU_0 horse gPrWvEE7yjw_0 cat gPteWZyyJeo_0 cow gP3SQErTTOg_1 motorcycle gQBW4py4GhY_0 skateboard gQEGmIhhEQ4_0 train gQEGmIhhEQ4_1 train gQEGmIhhEQ4_2 train gQFqppfDRRk_0 umbrella gQLZ5H-n0Uk_4 knife gQVlREJXkik_0 knife gQWTTEHj5Hs_0 cat gQeqE3dgZoM_3 airplane gQe5gykuyi4_1 train gQpWY94Fx5E_0 motorcycle gQpuEhphXHk_0 car gQpxfwrF7Sc_0 bus gQ6AUvEXuaQ_0 bicycle gQ9HhxeKI4A_0 motorcycle gQ_SF2MtsUc_0 elephant gRFcteFGpLM_0 skateboard gRJGd_HzC-8_0 knife gRJpf6JwJeU_1 giraffe gRNKgw2D_mE_0 knife gRVrvJioWZ8_1 train gRoGrhv1ebI_0 elephant gRsOR1tKh8U_0 truck gR3ihf3rch0_0 car gSXDTJjj1jk_0 train gSi2fNTUsy8_0 horse gSlT3ALqvTM_0 skateboard gS0DTbVQ2x8_1 knife gS25yLrNO98_0 bear gS2-SAccVh0_0 skateboard gS7U-6Z8M2g_1 knife gS_9D3OWXAk_0 airplane gTqgARR0BBQ_1 boat gT27MQBhatA_0 skateboard gUDoTzwZlso_0 dog gUL0-NbHvuA_0 motorcycle gUMLascwbtU_0 train gUNCDmbzxq8_0 train gUbc_OUTnOs_0 airplane H7ONEeAkBFo_3 motorcycle H7ONEeAkBFo_2 motorcycle H7YUH_GBWdQ_0 train H8B-3STVp6E_0 cat H8LitQV6pNM_0 cat H8SccYIiPs8_0 zebra H8coORJpR80_1 skateboard H8k1E1i7AvQ_0 knife H9AQUC0N1zI_0 horse H9JfwPhdCjg_0 boat H9KjlXZYxJU_0 train H9KjlXZYxJU_8 train H9TUml4LflE_0 cow H9UTvMwaoRg_0 cow H9bbSssKl2o_14 umbrella H9eutGBn3zw_0 motorcycle H-C6EBylvh4_1 cat H-IoiGsEU5Y_0 train H-QKbNwtoH8_1 car H-gh485Om10_0 bus H-gh485Om10_1 bus H-kkRVEs3Bg_0 motorcycle H-uiufHSb3s_0 knife H-uvqjsUCLc_0 dog H-uvqjsUCLc_1 dog H-5Ynjv0dQI_1 train H-62b99sK_s_0 train H-62b99sK_s_1 train H_Ei1gRODpw_0 dog H_KMZLSAxMw_0 train H_iI201Iqws_1 truck H_iYHl4pFuQ_0 horse H_mRfG30Gzo_0 skateboard H_1O-OBZ3BA_0 horse H_6vxd3ckIY_0 cat IADSsAb2KSo_1 umbrella IADSsAb2KSo_2 umbrella IAFApeJ5FvM_1 motorcycle IAOiNYVeqzE_0 bird IAaINtcnO7A_0 bicycle IAcbsZcN_pM_1 motorcycle IAkSntQ2Aso_0 horse IAlz_evs7fU_3 car IApV0rfD9oQ_0 dog IAsXYmK1baI_0 motorcycle IAwKojHnvtU_0 train IBD9tJNb9_o_0 train IBFp5y96q78_0 motorcycle IBFp5y96q78_2 motorcycle IBKLgBXZFzw_0 motorcycle IBYJQU6-nGg_2 cow IBYg-hMbb04_0 knife IBm1C4qJtTg_5 umbrella IBm1C4qJtTg_8 umbrella ICQbVnaJL_0_0 bus ICZ4tinBQZg_1 knife ICZ4tinBQZg_2 knife ICZ4tinBQZg_3 knife ICg3W1-Prhk_0 elephant ICnAWjPDzRw_0 cow ICtLhp-qveM_0 boat IDCBO7W7xpo_0 cow IDNvFEra8mc_5 horse IDNvFEra8mc_1 horse IDNvFEra8mc_2 horse IDNvFEra8mc_3 horse IDNvFEra8mc_4 horse IDO6jw3u3_w_1 airplane IDcxChwEqDs_2 horse IDeGA2EV3WY_0 airplane IDeimFOIbVc_0 train IDmwsXLZKUs_0 cow ID1faW2L3rM_0 cat IEOg-ZulFR0_1 bird IEPYJyHfP2E_1 elephant IEYC-aYAQ40_0 boat IE5qZDd7tWw_0 elephant IFGohfPURX4_0 person IFfS7hatV0s_0 truck IFkUMGE7bbc_1 elephant IFkUMGE7bbc_0 elephant IFrHlldbUdQ_0 cow IFvO1O-6vqk_0 truck IHQvg9gYLjw_0 dog IHSCfRs-J38_2 skateboard IHY0eeHfBcY_4 truck IHjI35oW0T4_0 car IHxX0fKU9iM_1 skateboard IH3E7RS6Hn8_0 cat IH9BmEg26Cw_0 person IIBN7FGNNEs_1 train IIBN7FGNNEs_2 train IIBN7FGNNEs_3 train IIBN7FGNNEs_4 train IINTapIzzes_2 skateboard IIw0KKAeBeQ_0 skateboard II0JbbQq-Sg_1 bird II61z65eDCY_2 cow II61z65eDCY_0 cow II94vSsb4Uc_0 car II_okDlDaO0_0 cat gUt0vA8_1Ow_0 airplane gUvZ3RC9tEU_0 knife gU3SNUS1_ng_0 bicycle gU4mBoB-b7k_1 train gVAp7rt84ic_2 bicycle gVCrRXledlU_1 boat gVCrRXledlU_0 boat gVV-5JdLuXk_3 car gVXzT_h1SFI_3 horse gVXzT_h1SFI_4 horse gVXzT_h1SFI_2 horse gVaB7hwBhTA_0 cat gVjL5txcFMI_0 knife gVrTFXdPWJ8_0 elephant gVxqk8tLXL8_0 truck gV27xS9pqNQ_0 train gV3Xmwy3RKo_6 train gV3Xmwy3RKo_13 train gV9A5NfFexQ_0 car gWcacGgcxYU_4 bear gWlmYVY4kW4_1 bicycle gWnhQi-zfEE_0 skateboard gWpNWuo7vio_2 elephant gWpNWuo7vio_3 elephant gWsOR7UiwDs_0 airplane gWz5ZMzC58s_0 car gXBIzdmmHbA_1 bird gXEHUZgPCGg_4 bear gXFmghAzaVg_1 motorcycle gXGvO4k4xQY_0 truck gXHsyuynhso_2 knife gXW33K91X7c_0 bicycle gXn0Y5X5MJE_1 zebra gXn0Y5X5MJE_0 zebra gXt0u16Y6ZY_0 boat gY_Ey8Ps_ZE_0 cow gZhsGXSn5bU_0 motorcycle gZqGyIMgMbs_0 bicycle gZxcxQBlx0s_0 cat gZzmloffFW4_0 bus gZ8kZt451Ww_3 horse gZ92ZDty9wI_0 skateboard gaCEAVQd1-M_1 bird gaS7x3F3gpk_0 bicycle gaS7x3F3gpk_1 bicycle gaS7x3F3gpk_2 bicycle gaS7x3F3gpk_3 bicycle galykATgRC0_0 cow gaqS-4IaQ5c_2 bus gbA3ItatxL8_0 skateboard gbE0vzWpHj0_1 knife gbE0vzWpHj0_4 knife gbGl_-TnPjk_0 bird gbI95ZXEUz0_0 knife gbTTJah5oMw_0 elephant gbTTJah5oMw_2 elephant gbgbqiiEKVs_0 giraffe gcBaPcA_1_0_0 train gcExbr9FO94_0 giraffe gcJ7XqXHPwM_0 elephant gcT_dy3neEk_8 bicycle gcXhYL06Acs_5 bicycle gcYBNx0fUg8_0 truck gchz9HDvVDk_0 train gc80cGOHyKM_0 knife gdCpPYwBVlY_0 knife gdEBkAYaDPw_1 elephant gdELg0NrkdA_0 dog gdvUXfsBMIk_0 train gdzzJI7xjBg_0 train gdzzJI7xjBg_1 train gd2O-Z5dOIk_0 airplane gd4r5aA8jeg_0 bird gd4r5aA8jeg_1 bird geBwGOC-lX4_0 train geBwGOC-lX4_1 train geBwGOC-lX4_2 train geBwGOC-lX4_3 train geQCe6Cq5MU_1 elephant geQCe6Cq5MU_2 elephant geWChvEotKU_0 train gefGPLN-abw_0 person gfGsOzQ7gto_0 bear gfS7FJH6Vkk_0 bear gfUC20NWtjU_0 motorcycle gfVlQhN0BBU_0 bicycle gfuVNdXffSs_0 airplane gf1mvdt9kbI_0 horse ggIyqAThI1g_0 bird ggPHtWoCcKs_3 umbrella ggTFLaNIJck_0 train ggVLptkmsys_0 truck ggpz03j1REI_0 bus gg3sG7O2P-g_0 bus ghEfyxUaVGs_1 cat ghIGC_DOfuk_0 horse ghqqgJWnVEU_0 knife ghyp-SKVuC8_0 motorcycle giVGzMF1Yo4_0 skateboard giVGzMF1Yo4_1 skateboard gipHWMPB-W4_3 bear gipHWMPB-W4_1 bear gitOEvGnoYk_0 airplane gi9bnW7uLkE_0 cat gjGlUXCT9A4_1 knife gjK5A6cIEnw_0 dog gjRhqzTAkWw_0 cow IJFaomtLVDE_0 cat IJNUwvacbKY_0 cow IJVUMGoBSQs_4 cow IJXVtb2GeJ4_0 train IJdYiBYP31A_0 motorcycle IJlBmhH72m4_1 cow IJ6g4ZRBksE_0 cat IKLj0LJIMKs_4 airplane IKLj0LJIMKs_5 airplane IKLj0LJIMKs_2 airplane IKftyV_zwkE_0 skateboard IKqmWAu3GF0_0 dog IK7Mnvty4VY_0 person IK8IJWsxg3M_5 airplane IK8IJWsxg3M_6 airplane ILAGhYr9yts_1 motorcycle ILLYlwlFTzA_0 elephant ILmTjHZqkCo_1 truck ILqxie6aqXg_0 bicycle ILqxie6aqXg_1 bicycle ILqxie6aqXg_2 bicycle IL1HokSKOyY_0 cat IL9r35lU8So_0 skateboard IMD3U_DzO3E_0 motorcycle IMD3U_DzO3E_1 motorcycle IMde-053G78_0 horse IMulJdQXZvM_0 train IM7vwh5qua4_0 cow IM8dlwNTjXU_0 cow IM8v82x7ovA_2 train IM8v82x7ovA_1 train INFs2lfikXE_1 knife INULdzdrdys_0 horse INXkuJ9WvIU_0 train INZhGblywrk_0 bus INkhg9y4asY_0 bear INtj4nfjRA0_1 bear IN2TGHJrQEg_2 skateboard IN2TGHJrQEg_0 skateboard IN2TGHJrQEg_1 skateboard IOPYEZzmeqg_0 car IOPYEZzmeqg_1 car IOQuWawPM3k_0 bird IOfUvlEkN7g_0 bus IOiqrNof90k_1 knife IO3Z-ebx_f8_5 bus IPI2_GXx1tI_0 bird IPWixEFBDOY_0 horse IPfYf-nFKic_0 airplane IPfYf-nFKic_1 airplane IP1CH8MMir0_0 knife IQOfCy4FW8w_0 skateboard IQXAYnslAnc_0 car IQoVuUTZILY_0 airplane IQsV_hTCyMA_1 bicycle IQwk7Ge6Apk_0 truck IRK6-ixyaVI_0 elephant IRSbjN-mnJI_0 skateboard IRZBnQJoKiU_0 skateboard IRztQZ4bigY_0 car IR9A3u83crI_4 elephant IR-PGdIPgcE_0 skateboard ISAnMprDgCk_0 skateboard ISJW4GuahWg_2 dog ISSTEs8xDWk_0 umbrella ISYwpUKxHJU_1 elephant ISYwpUKxHJU_2 elephant ISYwpUKxHJU_0 elephant ISud5E9hZxU_0 train ISud5E9hZxU_1 train IS9s3kJzTcA_0 airplane ITCcMWC_RW8_0 umbrella ITbwhPVxFv0_0 umbrella ITrisbHlaJw_1 truck ITzBy7T7_fI_1 umbrella IT6TArZww6A_0 cat IT8VqGbdH_A_0 horse IT_zQ44PPOo_0 dog IUH4PYmObvU_0 dog IUO1sDZgGHs_0 bird IUdyfRMOyX8_0 elephant IUdyfRMOyX8_8 elephant IUdyfRMOyX8_1 elephant IUdyfRMOyX8_2 elephant IUdyfRMOyX8_3 elephant IUdyfRMOyX8_4 elephant IUdyfRMOyX8_5 elephant IUdyfRMOyX8_6 elephant IUdyfRMOyX8_7 elephant IUf7a2WuoBw_0 train IUgkMOA3siY_1 bus IUlDlS2KD-k_0 bicycle IUlDlS2KD-k_1 bicycle IUzpvnXep7M_0 bear IU7x7I53cng_0 elephant IVFq204Rr9c_0 airplane IVHx3I13xdQ_0 boat IVSJSu0PlsI_0 train IVVFeaTw6IE_0 bicycle IVjCZS2Fo7k_0 bird IVpmCnL5cE8_1 giraffe IVrBPzhFMi8_1 motorcycle IVrBPzhFMi8_2 motorcycle IVzxeeJEtiY_1 bear IV6EMw4XYco_0 skateboard IV6EMw4XYco_1 skateboard IWCZ1PDW99k_0 motorcycle IWVIIKxipc8_0 motorcycle IWVIIKxipc8_1 motorcycle IWn16DCfLbc_1 knife IWumeAEXWVo_1 boat IWu47p4l06Y_5 umbrella IWu47p4l06Y_6 umbrella IWu47p4l06Y_3 umbrella IWu47p4l06Y_4 umbrella IW1cFMDjPUk_0 bear IW2mFJ8iw6Y_0 bird IW4ZnmQeNtA_1 elephant IW4g0kfA3GE_0 truck IW5Vgh3SE-I_4 elephant IW7TwQ-hY7I_0 motorcycle IW7TwQ-hY7I_1 motorcycle IXTgztKfRQU_0 skateboard IXVCCLG3_cw_0 bird IXyV2vpIEA8_0 dog IXyV2vpIEA8_2 dog gja4H3sGrqQ_0 car gjdlZhmnGbk_0 airplane gjfdI7hO92E_0 bird gjquLAxFRWw_2 umbrella gjx4xu1TyWU_1 cow gj7W2zjQApw_3 knife gkEoTLpAw7g_0 airplane gkLRnt1OCH4_7 horse gkRqNmGQbPI_0 skateboard gkXKCuc0Moc_0 skateboard gkXKCuc0Moc_1 skateboard gkb4Ya5QW9M_0 bird gkb4Ya5QW9M_1 bird gkf0Bcsuhlc_1 car gkf0Bcsuhlc_3 car gkf0Bcsuhlc_4 car gkiUpdrObXo_1 elephant gkz49y5qcvc_0 horse gkz-LCZcGtc_5 bird gk1x_qYyDl4_0 cat glNWqIolkq8_0 skateboard glOskJOtnTU_0 knife glOskJOtnTU_2 knife glSdaND81E8_0 person gltHxIp_ma8_0 bird gmCT9tUPTB4_1 giraffe gmdxOMQMgnw_0 airplane gmnvPoB2cNY_0 motorcycle gm53_sbr85Q_1 bird gm53_sbr85Q_2 bird gm9M-m4mCZ4_0 car gm9M-m4mCZ4_2 car gnA9QVNkmTU_1 knife gnD6mU9A2oo_0 elephant gnEttGTQqQ4_1 train gnEttGTQqQ4_0 train gnF9YJM1jaE_1 cow gnGvXHS4UDs_0 airplane gnM9SRiFh7M_0 truck gnM9SRiFh7M_1 truck gnPrHGB85WY_0 bus gnTj3krZROI_4 boat gnVo44q-XDI_0 knife gnb1N_MLdcY_2 elephant gnwCzU63_YY_0 person gn2XuCFK-hE_0 truck gn2bME2rmGw_0 truck goIfg0C9kmM_0 dog goOIZE0j6DM_0 bicycle goSyNORcJ00_0 airplane gok9kHQ77dY_0 skateboard gollBTymf8I_1 bus gomnpeJd5zw_0 boat gonzAOezSOQ_1 train gonzAOezSOQ_2 train gosq350N9dI_2 skateboard goyIWrU1Lbo_0 cat gpBoXY6MM5E_0 dog gpEiPRMcPwo_4 bear gpY-o8xPA3w_0 bicycle gpa4WfWCLa0_1 elephant gpa4WfWCLa0_0 elephant gpa9p4XNeKc_3 bear gpbdiDEPd-s_0 skateboard gpjqG97-SyQ_0 horse gpmdLMUX53k_0 bear gp2SDJHMADo_3 horse gp2SDJHMADo_0 horse gp2SDJHMADo_2 horse gp9q0jvTKo0_0 bird gqNgT7LxZSQ_1 bus gqOfm9XTr6M_3 airplane gqOfm9XTr6M_0 airplane gqOfm9XTr6M_1 airplane gqOfm9XTr6M_2 airplane gqbDkeOx0mA_0 motorcycle gqgQpw4DWZA_0 giraffe gqhweewmNn8_0 skateboard gqkLzCkKKtE_0 skateboard gqucExXpPys_0 car gqxvRzuWcrI_0 bird grBVFo1wSjs_1 bird grFPTYaKb7Q_0 bus grI0uf6IwBw_0 bear grNkPqf-ySE_0 dog grWw42izM6M_1 train grWw42izM6M_2 train grWw42izM6M_0 train grbP7mKMX_A_5 airplane grbP7mKMX_A_1 airplane grbP7mKMX_A_4 airplane grdEE264TwM_0 motorcycle grdIYaNewv0_0 motorcycle grhIgcHgpOw_0 bus gsCvhqZCWX0_0 dog gsUrGSN-k00_0 horse gsbJ13WiSvE_1 horse gsfIYIQ1siA_0 skateboard gsvn88OsH_8_3 knife gsv7RJk7dtY_0 dog gs_C12A8Wq4_1 bicycle gtFIMtVrAGk_0 bicycle gtNJSexRjxE_0 car gtNdVTTd0tg_0 bicycle gtNdVTTd0tg_2 bicycle gtOa6rSatLA_0 cow gtQ_uFTKEck_1 horse gtii5vwjSTY_1 dog gtuj1cOmYSs_1 train gtuj1cOmYSs_3 train gtz5ClHTSVo_0 cat gtz5ClHTSVo_1 cat gt_WHCkauOA_1 knife guVl_gp0sJE_0 bus gugP5f2JRJ0_1 bear gugP5f2JRJ0_0 bear guh1OUkdIGE_0 horse guktzkv1els_0 boat guv5reh2NH4_0 boat guxRXiegac0_4 bird gvNxDnFriAI_0 skateboard gvcioONBIcE_0 train gviQTbs7dIk_1 bird gvjcggbLXRo_0 elephant gvjcggbLXRo_1 elephant gvjcggbLXRo_2 elephant gvk0hzlYu9E_0 umbrella gvraCN0RYko_0 dog gvtY3fwbgdc_0 cow gvuBfR3HXac_0 elephant gv4sQFTuJ-k_0 elephant gv7qY66lOhs_0 giraffe gv8pF9t1zYM_0 elephant gwKq56_M6Kc_0 horse gwN_p_IRuoo_0 horse gwP-6gOPn2c_0 motorcycle gwTc-69C_P4_0 knife gwTyjJwBgRk_0 horse gwy7eePYryM_1 boat gw9MjutMhLs_1 airplane gw9MjutMhLs_3 airplane gw9MjutMhLs_0 airplane gw9MjutMhLs_2 airplane gxKnyBP8_cs_0 elephant gxejG9D0guY_1 person gxgZg6BU3ds_0 dog gxgZg6BU3ds_1 dog IX4HjI_9vLY_2 dog IX4IwgbTdCk_0 dog IYBF45M9nTc_0 skateboard IYBzvotFEYo_0 motorcycle IYZZ-K_Ygpo_0 bicycle IYdXz1cOCWc_0 giraffe IYukRQKxhFI_0 person IYukRQKxhFI_1 motorcycle IZESZPVT0zk_3 bear IZGady38Nh8_0 bird IZIPpBl_h0Q_5 truck IZIPpBl_h0Q_0 truck IZIPpBl_h0Q_6 truck IZJ1PO3Fkuw_0 umbrella IZLMXYU4A-0_0 airplane IZLMXYU4A-0_2 airplane IZTfd31H0AI_0 bicycle IZUO1x0QT1I_1 elephant IZ2nFUgP-Pw_1 elephant IZ2nFUgP-Pw_5 elephant IZ2nFUgP-Pw_6 elephant IZ2nFUgP-Pw_3 elephant IZ2nFUgP-Pw_4 elephant IaAPZOFgclo_1 elephant IaG7siKVlak_0 giraffe IaxZJVx5ptw_0 truck IaxZJVx5ptw_1 truck IaxZJVx5ptw_2 truck IaxZJVx5ptw_3 truck Ia0DjYXcBWc_8 elephant Ia0DjYXcBWc_4 elephant Ia0DjYXcBWc_5 elephant Ia0DjYXcBWc_6 elephant Ia0DjYXcBWc_9 elephant Ia0DjYXcBWc_10 elephant Ia0DjYXcBWc_11 elephant Ia0DjYXcBWc_12 elephant Ia0DjYXcBWc_13 elephant Ia0DjYXcBWc_14 elephant Ia0DjYXcBWc_16 elephant Ia0DjYXcBWc_18 elephant Ia0DjYXcBWc_19 elephant IbEpwiOUFEI_0 dog Ib15GlTvqTQ_2 skateboard Ib2u6u-j2vk_0 skateboard IcEs4vbIcDM_0 umbrella IcSumCpVOy0_0 skateboard IcZ2D-MawSg_0 truck IciJuq7ZY6o_0 elephant IckUkdfRndY_1 knife Ic1cufihs-0_0 elephant Ic1cufihs-0_1 elephant IdSlvHXTrmE_1 skateboard IdXPNOQD97w_0 motorcycle IdabN3kTjSk_0 skateboard IdrTVVio1U4_0 dog IdvQme2elLk_1 truck IdvQme2elLk_2 truck IdvQme2elLk_3 truck Id6HsaEvZ0k_0 person IeB4Nf3h7T4_0 bus IeENvG3Qtk0_5 elephant IeFUkGY1b4Y_4 elephant IeXb8CHr4ms_0 train IefPtlA5ebA_0 motorcycle IehTemq8EYc_27 bicycle IehTemq8EYc_28 bicycle IehTemq8EYc_0 bicycle IehTemq8EYc_6 bicycle IehTemq8EYc_11 bicycle IehTemq8EYc_15 bicycle IehTemq8EYc_17 bicycle IehTemq8EYc_19 bicycle Iejh8w6egIA_0 umbrella Iek9nAfsymA_0 bus IewJcdqOzCY_0 train IewJcdqOzCY_1 train Ie4Ct_HRDNw_1 dog Ie5lfGQndBs_0 airplane Ie8dc7EO7VI_0 bicycle Ie8dc7EO7VI_1 bicycle Ie8dc7EO7VI_2 bicycle IfBft2ltqqE_0 skateboard IfBft2ltqqE_1 skateboard IfFnkz6EUno_1 horse IfGZXa16ZnQ_0 knife IfTrYE-Ox50_0 cat IfZDLHBP_qk_0 bus Ifpbe7xlKp4_0 truck If4WPZY4LIY_0 elephant If8EotoXQVQ_1 truck IgO9_kN8D5I_0 cat IgRs6nmhv2w_0 cat Ige9Idj8fDw_3 cow Ige9Idj8fDw_2 cow Ig0Luv6UlkE_1 bicycle Ig1JdzucmLI_0 boat Ig9jZPM0n2A_0 car IhR6ePM1wRw_0 bird IhXAXy3VAqA_0 cat IhdGvFfk3Ks_0 bird IhlRPxknT9E_1 motorcycle Ihp3YZGcRjM_0 horse Ihp3YZGcRjM_1 horse Ihsr3gT-u00_0 cow IiBzrow5m9w_0 cat IiH0f7VOXTY_4 airplane IiH0f7VOXTY_2 airplane IiH0f7VOXTY_3 airplane Iie6uM_sdLE_2 truck Iie6uM_sdLE_5 truck IiscR53FEz0_1 airplane Iiy_W2tIOWI_0 boat Ii9URMIXJjc_0 dog IjHqTBt-tzY_0 horse IjMLYR0bH6g_0 cow Ijf2ZMTxDUs_0 cow Ij57BoIbMws_0 train IkOG3ZnCvY4_0 cow IkVifrtYlcI_0 skateboard Iklc7ijgOtA_0 horse Ikl-nlqwUJA_2 train IkqFTjEXf4g_0 motorcycle IkrKcORoFLI_0 cat Ik4UxtlIrw0_5 airplane Ik4UxtlIrw0_13 airplane IlJT6oek8KQ_0 dog IlNV-gFlp3Q_0 umbrella IlshSY2CGU0_0 cow Il1TKTSRPO4_0 train Il7GtfxmBlQ_0 skateboard ImCV4d0kYxY_0 skateboard ImEKl15Aipo_2 bear ImOLHl6gwLE_0 giraffe ImO7oG_YuSU_0 train gyBWGyhFuWg_0 elephant gyBWGyhFuWg_1 elephant gyBWGyhFuWg_2 elephant gyBWGyhFuWg_4 elephant gyBWGyhFuWg_7 elephant gybSfaDRdVA_2 airplane gy3zF39Y7B8_0 airplane gzQrsNwx8MQ_1 truck gzTHA0tMocM_0 bird gzUj7KfvRPY_0 train gzVdw-5l3sY_5 bear gzWwT4ufwFY_0 bicycle gzwzd6nOPoI_3 bear gz13nfjIblU_0 skateboard g0Jq0uIY3i0_2 knife g0LufqNJtss_1 elephant g0LufqNJtss_2 elephant g0SdZmm5Mm0_0 horse g0W6U-p-T2c_0 horse g0om0nrfC4w_5 airplane g0om0nrfC4w_1 airplane g0tXovGqqSE_0 cow g0zcJWO1MbU_1 airplane g02OQmAgfo4_0 train g02OQmAgfo4_1 train g04xUjb4z0w_0 bicycle g05TJKB5TL0_0 elephant g05TJKB5TL0_1 elephant g05TJKB5TL0_2 elephant g1HtoWJ3NjA_0 airplane g1UUBEfyzJ4_0 horse g1UUBEfyzJ4_1 horse g1ZtaoEqtjI_0 bus g1j_9A4-PL4_0 cow g1n74kWqKFM_0 truck g1vq3JO3eH0_0 skateboard g12DCVqfKjM_0 airplane g13JzTyNCPY_0 truck g17hrSF1YN8_1 bear g2Hh_97o7jY_0 person g2KeNy_WECo_0 bus g2MUK80Ht8k_0 horse g2MUK80Ht8k_1 horse g2MUK80Ht8k_2 horse g2MUK80Ht8k_3 horse g2MUK80Ht8k_4 horse g2cCr0rRIeo_0 motorcycle g2vRpfpQuNE_1 motorcycle g2-SNBvYdNc_3 car g2-SNBvYdNc_2 car g3DAFznLlXw_0 elephant g3e6vDSvpN4_0 skateboard g3g7M2Xv3JY_0 zebra g3ytRwjgoMI_2 horse g3ytRwjgoMI_3 horse g30xOR9j3_A_0 skateboard g30xOR9j3_A_1 skateboard g38MDXW9ndc_0 elephant g4BX8_C-NeQ_1 dog g4KzjuhixSo_0 motorcycle g4KzjuhixSo_1 motorcycle g4R5jZXlnl4_0 truck g5OKbEXlegI_0 cow g5SIvfoi7tE_2 bird g5S-76eh6vs_0 car g5ztjA03q5k_0 horse g55_MKVNAE8_0 motorcycle g57hZ17etp8_0 skateboard g5-55T7AzUE_1 skateboard g7Qk-cV3IFs_1 car g7YvJasRFj0_0 skateboard g7fZhFRdYJs_3 zebra g7oMLF6ZfT8_0 horse g7oMLF6ZfT8_1 horse g8aScpqmhVU_0 umbrella g8iDSRkz_go_1 boat g80ZYUNhRME_1 dog g9Am-b3OqbI_0 truck g9RM9VSJPIY_0 knife g9WrMIn5AkI_0 skateboard g9sD-4RBa3Y_0 motorcycle g9uOEJm7wdw_0 elephant g9yESRreg5k_0 boat g9zLmd4IZ78_0 bus g91mK1sMiSI_1 elephant g9-6tclIBcc_0 motorcycle g-Dfzs3HQ8w_0 boat g-EVS_QxLxA_0 horse g-F4Eig_Rxc_0 motorcycle g-F4Eig_Rxc_2 motorcycle g-F4Eig_Rxc_1 motorcycle g-JTM0dCFFA_0 cow g-SJXmYYHqI_0 truck g-SlOveVnAs_0 cow g-Z7CA3qr1A_0 skateboard g_B2r70EsjY_2 horse g_XW0YLzND0_0 motorcycle g_XW0YLzND0_1 motorcycle g_XW0YLzND0_2 motorcycle g_dN59QhubM_0 person g_jq8Uy4P2s_0 truck hAHsYyTOJoI_0 cow hAIBcR5MAVE_0 boat hAUD4Cy2GiM_0 cow hAUD4Cy2GiM_6 cow hAUD4Cy2GiM_1 cow hAUD4Cy2GiM_3 cow hAUD4Cy2GiM_4 cow hAUD4Cy2GiM_5 cow hAVbFSsRfOY_0 airplane hAcx9u12Rd0_1 cat ImZcOQCdJng_0 skateboard ImiKNikVSsM_0 horse ImqKWexMOEA_0 bird Imuhe4E1pxo_0 car Imy4SpqoC4k_0 cat Im3ooIguQHk_4 train Im3ooIguQHk_5 train Im3ooIguQHk_6 train Im3ooIguQHk_0 train InEZSi4Zz08_4 train InEZSi4Zz08_1 train InEZSi4Zz08_2 train InTq6s23Ygc_0 cat Inn1lo0hbX0_0 train Invv-JPzV-0_0 elephant In_dBFPRoso_1 airplane IobdPoAtEB0_0 bear IoqRCAQzibw_1 skateboard IoqRrAswOwY_5 bear IoqRrAswOwY_4 bear Io5wOOkpkdE_0 skateboard IpOFHasyloc_0 cat IpQQ9QabgiU_0 bear IpVCKTRou10_1 truck IpVCKTRou10_3 truck IpVCKTRou10_4 truck IpYZmcVrqdQ_0 cow IpnTUQCHioc_0 giraffe Ip0c_3xCHRA_2 horse Ip-N_PYIqhA_0 motorcycle Iqv9963BN8w_0 cat IrAxUS0aBTQ_0 bird IrDY9nE1V2I_1 motorcycle Ir4CkmTmSXQ_0 cow IsSCjgdAQiE_0 dog IslqPZDUBHI_0 motorcycle IssSzh7Z-vo_0 umbrella IsxRqs7KcbQ_0 cat Is2E8gFNBWo_4 bear Is2E8gFNBWo_1 bear ItL-C-szpU8_0 truck ItiAXqRQm3A_1 knife ItkxwET4PNc_0 dog ItzhXkBVmEY_0 car ItzlBA8cl3c_2 airplane It_dJluX63g_0 cat IuN_risviek_0 giraffe IuZ6JD-k2nM_0 dog Iuk4W5KJbQ8_0 bus IuwJz5d-8J4_0 umbrella Iuw0f-Y8t6I_0 airplane Iuw0f-Y8t6I_1 airplane Iu26NyEUoGY_2 boat Iu5GqI9oVnk_0 motorcycle IvJLhgaveaw_0 skateboard IvMiQ2e-5hQ_0 bus IvMiQ2e-5hQ_1 bus IvX1MeQN-e0_0 cat IvZSk33MtAc_0 motorcycle IvZqPTK9DEQ_0 motorcycle IvZqPTK9DEQ_4 motorcycle IvZqPTK9DEQ_3 motorcycle IvfWyYn_ifg_0 elephant IvjNeTpV6hs_0 horse IvyftS2bPuo_10 airplane IvyftS2bPuo_0 airplane IvyftS2bPuo_2 airplane IvyftS2bPuo_6 airplane IvyftS2bPuo_7 airplane IvyftS2bPuo_9 airplane IwcC1J_ImAs_0 car Iwd7i4kvS5c_0 car IwfpNUPSvpw_0 motorcycle IwgX5DfmIQo_0 bicycle Iwhf27USDD4_0 motorcycle IwmwVP_e5Ag_0 bus IwxmbUX4fcg_0 cow Iw6-0LYvEmQ_0 bird Iw6-0LYvEmQ_1 bird Iw7zBsW9W5Y_0 train IxLbLqfxrhg_1 boat IxNA0hdkWGg_0 cow IxObyCZ6OfY_4 giraffe Ix8eS24W75g_4 airplane Ix8eS24W75g_5 airplane Ix8eS24W75g_0 airplane Ix8eS24W75g_1 airplane Ix8eS24W75g_2 airplane Ix8eS24W75g_3 airplane IyNmzxdv8-Q_0 bird IyQlh0wdd9I_8 boat IyQlh0wdd9I_7 boat IyU3NizvZuM_0 horse IyU3NizvZuM_5 horse Iyj9D6cwI5o_0 bicycle Iyk9-k1RP-M_0 car Iyk9-k1RP-M_1 car Iyk9-k1RP-M_2 car Iys_rL0bPcc_4 boat Iy4SrujSLuQ_1 elephant Iy4SrujSLuQ_5 elephant Iy4SrujSLuQ_6 elephant Iy4SrujSLuQ_3 elephant IzC8vjFriRE_0 horse IzPS29ghTxo_0 knife IzQjjBqimYw_5 elephant IzQjjBqimYw_10 elephant IzQjjBqimYw_11 elephant IzQjjBqimYw_0 elephant IzQjjBqimYw_1 elephant IzQjjBqimYw_2 elephant IzQjjBqimYw_3 elephant IzQjjBqimYw_4 elephant IzQjjBqimYw_6 elephant IzQjjBqimYw_7 elephant IzQjjBqimYw_9 elephant Iz4_9EtiVXc_0 motorcycle Iz8cco4VLow_0 cow Iz8gKIZcfqo_0 bear Iz8uzZuBiXs_0 bird I0eY-kKi2FM_0 umbrella I0iEaW1Qg_o_1 bear I0oVkr613Rw_0 skateboard I0voLEPKkG8_0 horse I0voLEPKkG8_1 horse I0voLEPKkG8_2 horse I0voLEPKkG8_3 horse I0voLEPKkG8_4 horse hAplCSSZqAs_0 airplane hAplCSSZqAs_1 airplane hAteY2rkmVg_8 bus hAteY2rkmVg_1 bus hAuFEp75jVo_0 train hAzefhyFMN4_0 truck hAzsdnh5Iq8_0 elephant hA_YzyjSVZM_0 bicycle hBDc0K6CvHg_0 bus hBDvdp2RCCw_1 airplane hBDvdp2RCCw_2 airplane hBDvdp2RCCw_3 airplane hBDvdp2RCCw_5 airplane hBDvdp2RCCw_7 airplane hBKuHV_S8lM_0 skateboard hBOhA_sljfE_0 umbrella hBcYx5Uc-vw_0 car hBcZZeXsCaw_0 bicycle hBgxILtRUIc_0 cat hB23PCerELA_0 skateboard hB-M9w3C_Tw_0 boat hCB731pKdcg_1 train hChqLLLAmF4_1 bird hChqLLLAmF4_0 bird hCkn4pJxSkk_0 bear hCrrYhe3x9Q_0 train hCsCAXkiQ4Y_2 train hCynNRrrTKI_0 cow hC5Wac-AzgM_1 elephant hC5Wac-AzgM_2 elephant hC5Wac-AzgM_3 elephant hC5augWtBcQ_1 bicycle hDAv3aPvZjc_1 truck hDLAKS4hCfc_0 car hDM4sCvlRoA_1 airplane hDVx_yYysaA_0 cow hDXpSU7bq44_0 bicycle hDYV-Vz3xwA_1 dog hEAQZIsaIew_1 train hERCXzHI2nA_1 elephant hERyFpl4aDk_0 dog hEWJZ4dCcIY_2 cow hEZt4InN7Eo_0 elephant hEZt4InN7Eo_1 elephant hEZt4InN7Eo_2 elephant hEdpC8HEa-A_0 motorcycle hE04tUrJzXo_0 truck hE7N0N5vik0_0 bird hE7N0N5vik0_1 bird hE-VIrAVcBA_2 bus hFFrC0_rJYA_0 airplane hFTTcrUxPeg_0 cow hFTTcrUxPeg_3 cow hFdi9yxVkys_0 motorcycle hFdi9yxVkys_1 motorcycle hFixbos35O4_0 truck hFnKIVp-Dcc_0 cow hFnKIVp-Dcc_1 cow hFzR4bgxihU_0 bicycle hGH72iljdzU_0 bear hGRdOlSIQRU_1 train hGRdOlSIQRU_2 train hGRdOlSIQRU_0 train hGiCVP3Z8l0_0 umbrella hG6vW_xUZgA_0 train hG6vW_xUZgA_1 train hG959XPTh_8_0 bear hG-quo0MZM8_0 elephant hG-quo0MZM8_1 elephant hHEIEEdrXYE_0 cow hHIyy4Vda6M_0 cat hHjzciM78AA_0 cow hHtOM5_wiWM_0 truck hHtqPiAg32Q_0 umbrella hH_akvS98jo_0 skateboard hIH6LuoXbpE_0 cat hIXTbG6ho4E_0 person hIXTbG6ho4E_1 person hIXTbG6ho4E_2 person hIz3ONvP-Bo_0 zebra hI3P4BxIr-o_0 bear hI3eGFKYRuc_1 horse hJP8qg-kSZA_0 cow hJTl4NJ0qIs_0 person hJhBQsD0_hw_0 bus hJkgoq_T4Pk_0 train hJmxsYAKHdc_0 umbrella hJtloiw4D-M_0 car hJ_uvoDrzkI_0 giraffe hKJQH8VbGk4_0 airplane hKJQH8VbGk4_1 airplane hKYJZqP-44M_0 airplane hKYJZqP-44M_1 airplane hKgtNPTirdc_2 elephant hKgtNPTirdc_3 elephant hKlKPyuUYps_0 bus hKtHZYDaoXA_1 train hKtHZYDaoXA_2 train hKtHZYDaoXA_3 train hKtHZYDaoXA_0 train hK6w0B1cu-I_0 cow hK7VoN3cI74_0 cat hLGnjjoilbo_0 skateboard hLHaPstpghQ_0 motorcycle hLKzDOp8XLc_1 zebra hLNcuJAwfDo_0 cow hLVZsqfElxI_0 dog hLX1LeVKgi8_0 cat hLjDO37EQ60_2 dog hLjDO37EQ60_1 dog hLscdjfkeho_0 cow hLte0Y4VWR0_0 knife hL_QAgWBkJ4_0 cow hL_noZA6D8E_0 truck hMGVdq71lME_1 horse hMLkMrqUtA0_0 horse hMRIDt-1dY4_0 train hMgp2oyTB80_0 cow hMjke9g_Ysw_0 horse hMuO0MHPIOQ_0 elephant hMuO0MHPIOQ_1 elephant hMusKbJqZDY_0 skateboard hNHGh8N1XGg_0 knife hN_-56Oxma0_0 dog hOJJ65CVNuM_0 bird hOOwQSSrFVc_1 cow hOid-qo2Ozw_0 cow hOky3qIMxRY_0 skateboard hOpJoO7UciM_1 bicycle hOrAXl-jATo_0 airplane hOxMkI1d3oc_1 airplane hOxMkI1d3oc_3 airplane hOxMkI1d3oc_4 airplane hOxMkI1d3oc_6 airplane hOxMkI1d3oc_7 airplane hOxMkI1d3oc_9 airplane hPEsz5u87CI_0 bus hPIDFIwLI8c_0 car hPWhKQfDoXg_0 airplane hPWhKQfDoXg_1 airplane hPW2NpCU668_2 elephant hPW2NpCU668_0 elephant hPW2NpCU668_3 elephant hPW2NpCU668_5 elephant hPW2NpCU668_6 elephant hPW2NpCU668_7 elephant hPW2NpCU668_8 elephant hPa5hUze91s_0 elephant hPa5hUze91s_1 elephant hPb_Rq2yKRA_0 cow hPo5Wd-otbY_0 dog I0yz1LGLl08_0 elephant I1Ejpa2UWSk_1 bird I1Pdo-p11tI_0 motorcycle I1Quuhyu2UI_1 motorcycle I1YfOiyQW_8_0 truck I1wfW86V8So_0 dog I1wfW86V8So_2 dog I14JWDgkllE_0 truck I14JWDgkllE_1 truck I19kQsgjFRA_0 bicycle I2IfiPw2aKE_0 elephant I2OQlELjXvU_1 truck I2OQlELjXvU_2 truck I2OQlELjXvU_3 truck I2hmFe1pYes_0 horse I2o_4OyrJlI_0 horse I3DSZk-7nG8_0 train I3JCCqGY3c8_0 truck I3KJj6GQ5QE_0 cat I3OWw4AK0MI_0 dog I330kG5lk5A_0 knife I3-xBh-IrIo_0 airplane I3_lU2I_AaU_0 bicycle I4BMptNse7c_1 train I4BMptNse7c_0 train I4CMNv-VRDo_0 bird I4Gi7kq5XAs_0 horse I4HuQ8DDxoM_0 skateboard I4WNAfBvm5E_1 skateboard I4z-3IGHMW4_0 dog I5KNdt1NT8g_0 skateboard I5QNP3-QHLw_0 cow I5SA8N1JKwM_0 cat I5WNgPfoaZQ_2 motorcycle I5pU9zWz4Fg_0 motorcycle I6fJWB7DpAM_0 bus I6oT6dLeq7A_0 motorcycle I6wEvIOC-Pk_0 train I7GbkWE2A0M_0 bus I7aUrrDieE4_0 cow I7bKlZxD6Fs_0 bicycle I7xOURJQUps_0 train I7xOURJQUps_1 train I7x_od8h4iw_0 cow I7-iLB-NVGg_0 dog I8FoWQrnHGY_0 bird I8Ms0rXjfXU_0 skateboard I8Ms0rXjfXU_1 skateboard I8Ms0rXjfXU_2 skateboard I8Qx-qd0eLg_0 boat I8Qx-qd0eLg_1 boat I8UlumMtAG8_0 horse I8Vr0DzHV9U_0 cow I8rww3UUjYI_0 person I9AGRokco_M_0 train I9FPkgdc-5E_1 cow I9XcFcBW-HM_0 motorcycle I9oAq_x5pqg_0 bus I9yrFs_JpWc_1 skateboard I94qZUJmKP8_1 bicycle I94qZUJmKP8_2 bicycle I-SRTsDkhLM_0 cow I-TshjRdh74_1 knife I-blRAakQjM_0 boat I-h3cTJlsRc_0 dog I-nb60BTO_g_0 train I-raj-aLy8s_8 horse I-ywD5MDZZ4_3 cow I-ywD5MDZZ4_4 cow I_LhSNsRHMs_0 elephant I_kI39ZHymk_0 horse JAEzOCIew2Q_0 airplane JAEzOCIew2Q_1 airplane JAb3p7VYLzI_0 bear JAb3p7VYLzI_1 bear JAcHxxzG1vA_0 motorcycle JAf3nC1hYS4_0 dog JAp2_UJfFao_0 person JAqAH7n-3lA_0 bus JAzD-VzDxfc_2 bicycle JAzD-VzDxfc_4 bicycle JAzD-VzDxfc_5 bicycle JAzD-VzDxfc_8 bicycle JAzD-VzDxfc_11 bicycle JAzD-VzDxfc_13 bicycle JAzD-VzDxfc_17 bicycle JAzD-VzDxfc_18 bicycle JAzD-VzDxfc_19 bicycle JA2PLZmRABc_1 umbrella JBGewEMeWIs_1 dog JBGewEMeWIs_5 dog JBKG_tl08RU_0 cow JBMhOrDLcho_0 cat JBYr3VbJLoM_0 person JBkymGnh5mA_1 bicycle JBkymGnh5mA_2 bicycle JBkymGnh5mA_3 bicycle JBkymGnh5mA_4 bicycle JBlCFCV4sdw_0 horse JBlCFCV4sdw_1 horse JBxFgwl0To8_0 cow JB0SELYSRXA_1 bear JB-hzl-gILo_2 truck JCIJbwBevro_2 bird JCSRBZQpYCw_1 bear JCSRBZQpYCw_5 bear JCTYAwT6ppk_0 motorcycle JCTYAwT6ppk_1 motorcycle JCTYAwT6ppk_2 motorcycle JCciDn0O6X0_0 airplane JChsfz-p2KI_0 cat JCuE5X37xIE_3 boat JCuE5X37xIE_4 boat JDJWapHD_kM_0 boat hP8Jfo1RaSk_0 elephant hP8Jfo1RaSk_1 elephant hP8Jfo1RaSk_2 elephant hQWcyTkfPeU_1 dog hQZDg__nxQA_4 bear hQZ5lNlAXBI_0 truck hQe3_1EvqIY_0 cow hQfYabI9_ec_0 bird hQkbXGwGwyg_0 skateboard hQve0ugvy6s_0 motorcycle hRAbtgVJiWI_0 bear hRJ0Qk_qdAY_0 airplane hRS45wmOq9c_0 elephant hSR-ZVA-vMU_0 dog hSWyYOzvh0g_0 dog hSf3uEm8r9M_0 bus hShwtMLieCc_0 boat hSiozs1nz7o_1 motorcycle hSzgOCvRfq4_0 bear hS-h8AUEibc_0 cow hS-h8AUEibc_1 cow hS-h8AUEibc_2 cow hTHBMsKC5ZI_0 cat hTZr7OF0VuY_5 dog hUJMSp4rMrc_0 train hU0EbblT2vQ_2 airplane hU388mZGPGg_0 cat hU9B31AVZNg_0 bus hVJjOdU5-yQ_0 car hVNKN_qFEUA_0 bicycle hVOImOLBY1g_0 skateboard hVdb-Q3aJ9E_0 dog hVhNOzZA40E_0 cat hVnD8rlLRgM_0 bird hVq6NOrBwlM_1 motorcycle hVsAAQqAHyI_1 skateboard hWHUct-PLfY_1 motorcycle hWHUct-PLfY_0 motorcycle hWNyVxx4a94_0 cat hWn0ddeHF0I_2 zebra hXAQH1xVKB8_0 cow hXWQ710-JZQ_0 motorcycle hXagj4A6N-s_1 elephant hXbMo03RQWk_0 train hXflTk4WVAA_1 bear hXf7dimd2bo_2 cat hXhtGcCMf5Q_0 airplane hXsCNMb3eTc_0 bicycle hYD7HKMKa3k_0 elephant hYFW5XhMxyg_1 knife hYIPy3eyC9k_0 cat hYQBaiC8d6Y_0 horse hYTIV5X87S4_0 horse hYgzs0gDiiU_0 elephant hYkPL7spYMo_1 elephant hYlmhAuVVh8_0 bird hYtFyx0799o_0 boat hY0vkwEtjLM_1 bear hZAOhuPJTho_0 horse hZAXlQqCmCI_2 train hZCGOP3PHOM_2 knife hZHjTTvcQ88_2 bicycle hZOhuOcxTP8_0 skateboard hZPYHGzIYh0_0 cow hZeekc0i_b8_0 motorcycle hZiXqP-WaQk_3 bird hZiXqP-WaQk_0 bird hZiXqP-WaQk_1 bird hZiXqP-WaQk_2 bird hZygBhv-nDg_0 motorcycle haC0TZbvBEU_0 cat haMtzn-TnOQ_0 boat haTl-PeSssc_0 dog hakWXvIYvzo_1 dog hanKUxPHFbA_1 car hanKUxPHFbA_0 car haxabA27SnU_0 horse ha3C2hPzaiw_0 dog ha8hX-68TqI_0 bear ha8hX-68TqI_2 bear hbKjt5OBryI_0 truck hbKjt5OBryI_2 truck hbKjt5OBryI_1 truck hbfiyMHycSs_3 knife hbvJ3t9lpUo_0 truck hcXtsyICD30_1 skateboard hcpMT5qGQ0U_0 bus hdZkNo0t6wg_0 boat hdbKePdCemQ_0 cow hdqiOcfXejc_1 zebra hdwZF4C-vYs_0 cow hd_yXL53Z9E_0 elephant heQRV9di86s_0 train heTgOW6o1ho_0 zebra hedgcDGNngs_0 bicycle heucaATRtbI_0 cat he_j-GZdCNs_0 person hfCbKe627p0_0 airplane hfEl_mnX9X4_0 skateboard hfGEkaEADUw_0 motorcycle hfGEkaEADUw_1 motorcycle hfGEkaEADUw_2 motorcycle hfcKFLBuJ_g_1 dog JDcAM9ieTp8_0 bicycle JDe9ulv2Nmo_0 elephant JD_njBej6V0_0 truck JD_njBej6V0_2 truck JEU2rZzAxRU_0 skateboard JEbIHUJTFsM_0 airplane JEdl8GROiQM_0 truck JExlAUEYZwc_0 cat JE8SV6FOlC0_0 truck JFH3n9kI6aA_0 boat JFO_Qz1y8-s_4 elephant JFQ_GztsLs0_0 cow JFQ_GztsLs0_3 cow JFZG_ebR2mk_0 elephant JFZpmduYfv4_0 motorcycle JFfYNQ2FmHU_0 cow JFk4Qyn58CY_0 train JFvQ7wc6c0o_0 airplane JGDf9kSc-v4_13 dog JGDf9kSc-v4_15 dog JGDf9kSc-v4_17 dog JGDf9kSc-v4_19 dog JGDf9kSc-v4_1 dog JGDf9kSc-v4_2 dog JGDf9kSc-v4_6 dog JGGj1z6Kujc_0 dog JGGj1z6Kujc_1 dog JGMfEFj5PVM_1 truck JGWBjvjqVhw_4 skateboard JGanm9yGTJk_0 truck JGanm9yGTJk_1 truck JGanm9yGTJk_2 truck JGmHpQtJzic_0 horse JGn6Ifa5bWI_0 bird JG0B4rV4KEI_0 dog JG6H3R9rErg_8 airplane JG6H3R9rErg_0 airplane JG6H3R9rErg_1 airplane JG6H3R9rErg_2 airplane JG6H3R9rErg_3 airplane JG6H3R9rErg_4 airplane JG6H3R9rErg_5 airplane JG6H3R9rErg_7 airplane JG6sceNvlnI_3 boat JG6sceNvlnI_2 boat JG872iaucFc_0 umbrella JHBhDpq4HNs_0 cat JHBtawKoltc_0 car JHTt9PSzrhU_0 elephant JHb8IVsjgMs_1 bus JHdc9jvf4qA_0 motorcycle JHmG34eTWow_0 train JHr57YE7IRs_1 airplane JHy85i0So5U_1 dog JH0Jzb0wOXw_3 elephant JH0Jzb0wOXw_4 elephant JH0Jzb0wOXw_2 elephant JISA50Bfj4U_0 boat JIamGji7w9U_3 bird JIiA0pG-MKk_0 skateboard JI6MyG7aTvM_0 bird JJSp2fu3lk8_4 dog JJSp2fu3lk8_3 dog JJq7YAYUado_0 umbrella JJx7GdAuDQY_0 skateboard JJyJR7TlQ7o_0 motorcycle JJ0Ja1ju2ec_1 horse JJ0NBly53IU_0 cat JJ8Vv2hiCCA_0 cow JJ8Vv2hiCCA_1 cow JKBJuICyV50_1 train JKBJuICyV50_0 train JKCFS8k_Qis_3 bus JKGV5hbm5g8_0 skateboard JKJQPHspLBs_0 bird JKJQPHspLBs_1 bird JKNRKGSvtsQ_0 elephant JKYPluJPL7c_0 dog JKa7rPKrAwY_0 train JKgPYc0K_hI_4 car JKgPYc0K_hI_1 car JKuhG9WLM2k_0 airplane JK42K36SYLs_0 bird JLE_jNuNoA0_0 cow JLHP-3UxtMU_0 boat JLb2dnuNhqs_0 bus JLoS7DZH_ik_2 airplane JLoS7DZH_ik_1 airplane JLsEcZUU7FM_2 truck JL64rU6Jvmw_1 giraffe JL71b_9Cy9I_1 umbrella JMDFSes_w0E_0 cow JMMmrEdfRbk_0 boat JMPKtdq9b0Y_0 train JMR4IvE2sDo_0 bus JMaahZTxRLk_6 boat JMgbgNPBIJI_0 bird JMnp6FLLbtw_0 horse JMnp6FLLbtw_4 horse JM1jSU4FEPw_2 airplane JM4yr2pj-zg_0 airplane JNDZBgXZBU8_0 knife JNDZBgXZBU8_3 knife JNDdt_ZPl1s_3 elephant JNNbk6jVfB4_0 cat JNZDx8Ro_mM_0 truck JNe7ZednqQc_2 horse JNkz_3Qtdfc_0 horse JNnnm9ixKrM_3 car JNnnm9ixKrM_4 car JNnnm9ixKrM_5 car JNpuJeqVFxk_0 motorcycle JONF8-3gEoY_0 giraffe JONF8-3gEoY_1 giraffe JONF8-3gEoY_2 giraffe JObYghNlZas_6 train JObYghNlZas_7 train JOmeD6G33Dc_1 horse JOoNVY1C6qI_0 train JOoNVY1C6qI_2 train JOqHfu-WVu8_2 horse JOuB1UkVvKI_0 airplane JOue8LphKc4_0 truck JOztmtwKz-k_0 cow JPAjGBsi-rE_0 bird JPMFXg-BXDE_2 car JPTFJk9f2nM_0 dog JPevMGnX92M_0 airplane JPiSmPAIpOI_0 knife JPlZOEew4wg_0 elephant JPuDmwlAXzI_0 skateboard JPwUpTvlZDA_0 person JPwUpTvlZDA_3 horse JPw4R6t-0j4_1 bird JQDX7gVR0qM_2 knife JQDX7gVR0qM_0 knife JQKDDMvCtt8_1 horse JQNsIqNLn40_0 truck JQRxu6RVGMg_0 car hfwhbInEJAk_3 train hfwhbInEJAk_2 train hgFfz_RTcx4_0 truck hgFfz_RTcx4_1 truck hgxvhMjH_68_0 motorcycle hg6Z6JIwRMU_0 elephant hg6Z6JIwRMU_1 elephant hhM2TSF2GhA_1 horse hhNlkY3SS6w_1 bus hhYOJb0v5Yw_0 cat hhlt4dfZmFE_0 horse hhyzKC353Jo_1 car hiJ-OdPj_8c_0 bird hiPDdAi1Qs8_0 motorcycle hiUH1zOfsfo_0 cat hiZLv2E5zI8_0 elephant hjBLAHakI9c_0 boat hjRlztwK-vg_2 bicycle hjhbMbrRUWI_0 truck hj2P25O-nIk_0 skateboard hkR10EU8YPI_0 train hk0cDE4A_b0_0 boat hk7M3PGcOhw_0 train hk-IVoljyKE_0 elephant hk-IVoljyKE_1 elephant hlFPCpe8Akk_0 airplane hlLrYrrOcY4_0 dog hlNOQO4BIHg_0 train hlnNVsSGjxA_3 car hlnNVsSGjxA_1 car hl4yLAJiWjQ_0 elephant hl7z1gnPPW0_0 knife hl_YHwW5mrM_1 bird hl_YHwW5mrM_0 bird hmThCl2HK8E_0 skateboard hmThCl2HK8E_1 skateboard hmdH0Olcbx4_0 bicycle hm98pilx9dE_5 horse hm98pilx9dE_1 horse hm98pilx9dE_2 horse hm98pilx9dE_3 horse hm98pilx9dE_4 horse hnJ2wDmXD6w_1 bicycle hnbZY12P-7g_1 elephant hne72NMSPuc_0 bird hnffUBbBFoQ_1 horse hnrSBT9miTE_1 bird hnvbE27mWwI_2 train hnvbE27mWwI_0 train hn19XaR_wIs_0 knife hn7ollCkAy4_5 bicycle hn-1W1O8kZs_0 boat hoLnPrkJ6sE_0 horse hoLnPrkJ6sE_3 horse hoNPAcq_5Ac_1 bird hoNPAcq_5Ac_0 bird hoYDTU50MTk_0 cow hoe88GhFhq0_0 truck homQXuwbe04_0 cow homx5sSuNr4_2 bear hoozxxjd57c_1 bus hotrXXenVAk_0 cat ho5YZstr1XE_1 cow ho7yo7nJk3o_1 elephant hpG2eG_hduA_0 motorcycle hpRxBuFhZ4M_0 train hpRxBuFhZ4M_1 train hpRxBuFhZ4M_2 train hpRxBuFhZ4M_4 train hpkXlhfYZfw_2 motorcycle hpkXlhfYZfw_1 motorcycle hpmC3OjLnZM_2 boat hpmC3OjLnZM_0 boat hpo-lwBTbFw_1 dog hp3aTxzS9ms_0 skateboard hqGhmP1u07Y_0 elephant hqoQm68UbGo_3 airplane hqoQm68UbGo_2 airplane hqsoIR9v8IY_0 motorcycle hq7f1_o4eFg_0 airplane hrLkVz3_xGw_2 bus hrW-pkK9osE_2 bicycle hrW-pkK9osE_3 bicycle hrgh69NXZqw_0 cow hrj6I8n8nAc_0 bicycle hrj6I8n8nAc_1 bicycle hrrpTPwLZHA_0 bird hrtiCeqnqLg_0 cow hrziTee4b2c_0 airplane hr5Q08OMeAU_0 train hr7wUBMikww_0 zebra hr7wUBMikww_1 zebra hsMptx7tOLo_0 elephant hsMptx7tOLo_1 elephant hsMptx7tOLo_2 elephant hsM1eKbrqLs_0 cat hsPK4wlNtI8_0 cow hsYL355Fzio_0 truck hsfS5oT1y5M_2 boat hskEM8GUmDE_2 train hsmxUKxzapo_2 skateboard hsmxUKxzapo_0 skateboard hsyCfsJx7DI_2 skateboard hsyCfsJx7DI_1 skateboard hs2foQ_Xo8A_0 skateboard hs-OEgnsLZs_0 train htDilkoPA-M_0 airplane htSBZwTBX98_0 horse hteze9Fz1dc_0 knife htkybhLm0uk_0 umbrella htwBHgatd9c_2 horse htwBHgatd9c_3 horse htwBHgatd9c_0 horse huCxpuVT4GI_0 dog huDCqh-KRy4_8 bicycle huDCqh-KRy4_2 bicycle huDCqh-KRy4_3 bicycle huDCqh-KRy4_4 bicycle JQnf7j7HpKY_0 cow JQpJv-SOMS0_0 dog JQ9LtiJVsd8_0 cat JQ_dyIlBnGM_0 cow JQ_6xcOuEfU_4 cow JQ_6xcOuEfU_1 cow JRA3LCwRGu0_0 knife JRBLFsevgg0_0 train JRJjI6mFa6s_1 skateboard JRJnSf2qOXA_0 airplane JRT0FH2KEsc_0 cow JRcTFvzRC10_0 bird JRcTFvzRC10_1 bird JRsNcoTJJjE_0 cat JRsn1likB7c_0 boat JRyc_lxMJzs_0 skateboard JR6JAx7xdGg_0 cat JSA0JWvQbJg_2 train JSdEdTcUHHI_0 knife JSfXE4ExZ1U_0 bird JSfXE4ExZ1U_2 bird JSs6Sa8zR6c_0 horse JS2cbpFwahY_0 skateboard JTE0ABGzb30_1 skateboard JTE0ABGzb30_2 skateboard JTJgZcBM93k_1 knife JTa9HkbXfSw_0 cow JThBohLxRSc_0 cow JTi4Oy6v9mM_1 horse JTi4Oy6v9mM_2 horse JTtjfwrK4Ls_0 dog JT5zUQio3B0_0 bus JUHMTmjUswE_0 knife JUVHXeFTe3Q_0 horse JUVHXeFTe3Q_3 horse JUbPqBVbGQQ_1 truck JUpxTW6_BAI_0 cow JUtd4FLjXio_0 horse JU1N1nqXjII_0 train JVKkxo7adX8_1 knife JVQ6Gx2hGxs_0 airplane JVTIzApj2UA_0 giraffe JVVtcOIACz0_0 giraffe JVg62b0T408_2 train JVg62b0T408_0 train JV2A3zWMRj8_0 umbrella JV3Tbp30yp4_2 motorcycle JV3Tbp30yp4_1 motorcycle JV3Tbp30yp4_3 motorcycle JV-OfjEsQDs_0 umbrella JWKZlCk_cts_0 train JWXSXvHgoo4_0 car JXEyPb4Nzro_0 skateboard JXP_CNg8grg_0 cat JXi5KrVPz0M_1 bird JXj_lj5QUp8_0 person JXmBBTT0YXQ_0 cat JXobiO1_7Ts_0 train JXwfPpl53Fs_0 dog JYYAwimr2XQ_0 truck JYi7bWDL5os_0 person JYsWtLH_mjM_0 bus JYsWtLH_mjM_1 bus JYsWtLH_mjM_2 bus JYvBo5FwjSg_0 elephant JYvBo5FwjSg_2 elephant JY2d1dohCDs_0 elephant JY3rSX-blgA_0 cow JZBJ35lKlXw_0 truck JZOZuTiifHM_2 boat JZXr-dGLkpU_0 boat JZcy1T--d4M_0 skateboard JZ_ri3awsso_0 cat JaI9UR2n7ZE_0 horse JaLswoS3xO8_0 knife Jaumrq8clZY_0 truck Ja9rAQpB2_M_0 cat Ja_ofQ1ynAc_1 airplane Ja_ofQ1ynAc_2 airplane Ja_ofQ1ynAc_4 airplane JbA11YWHpW0_1 skateboard JbBxvvoOvpg_0 bear JbK17NE3dvk_1 train JbK17NE3dvk_0 train JbK17NE3dvk_2 train JbK17NE3dvk_3 train JbPP4AwiNEc_0 cat JbSkoHG6Vq4_0 airplane Jbfzd9wIyi4_0 cat Jbw0KUJqWpE_0 train Jb03yqEB5WI_1 bus Jb03yqEB5WI_4 bus Jb5lFDvKqXA_0 bus Jb6FIuynIuw_0 bicycle Jb-q7z_Mygg_0 truck JcJKjdDKuc4_0 train JcRvhoBwgNg_0 cow JcU-cdQmKV8_3 bus JcU-cdQmKV8_1 bus JcixSQRUnY4_1 elephant JcmTLrQZ7sE_1 cow JcmTLrQZ7sE_0 cow Jcwl0kCsUTw_0 umbrella Jc5PS0Ejejw_1 elephant Jc8eE1ayaX8_0 cow Jc9PdqC1rpg_0 train JdUehtxAfys_1 bicycle JdUehtxAfys_7 bicycle JdwSAFvKg74_0 car JeAykU3MiKg_2 airplane JeET8zb_gPQ_4 knife JeNu9WVQOHY_4 bicycle JeNu9WVQOHY_1 bicycle JeNu9WVQOHY_7 bicycle JeYCd0VP5EY_0 horse Jeb4SSyyZD8_0 dog Je_fuH6-34I_0 skateboard hujF3CEgAXI_0 skateboard hulFEZUNu10_0 train hutTW7ORN8g_0 bicycle hutTW7ORN8g_1 bicycle huy9NXPynro_0 cat hu6nRmzUcAw_0 train hvWHb1kiV5g_0 dog hvWs1FhyQlw_0 umbrella hvhWoRQZMUU_0 cat hvjNVTle8bQ_6 airplane hvjNVTle8bQ_0 airplane hvjNVTle8bQ_1 airplane hvjNVTle8bQ_2 airplane hvjNVTle8bQ_3 airplane hvjNVTle8bQ_4 airplane hvjNVTle8bQ_5 airplane hvkIo-dZUUY_1 bird hvlXyPikLUY_0 bus hv49V2RzgHw_0 horse hv7b1I-cRvI_0 truck hwOL2G-Lo54_0 umbrella hwPkgOB1mEU_0 cow hwTVAkfjjCY_0 cat hwikEC2Jc0c_1 horse hxC7dFDqfXo_0 car hxUn2A7Ko2g_0 cow hyMlfx_ZEeI_0 train hyMlfx_ZEeI_1 train hyX6rKHZcLs_0 person hyb_qBoKG9Y_0 train hyjjdUcyanE_1 dog hyj8BJ_PMgQ_2 elephant hyrBL1wMHts_1 truck hy9Ml-3zAtM_2 knife hy9jrpamopE_0 umbrella hzBqPVIC7IQ_0 train hzUTA7mGyKE_0 bicycle hzeHyMcUmO4_0 motorcycle hzeHyMcUmO4_1 motorcycle hzz9JBRYjFs_0 bicycle hzz9JBRYjFs_1 bicycle hz5anqtArdI_0 train hz5anqtArdI_1 train hz7PXI6R6DI_0 train h0IiMbTwz1Q_0 truck h0IiMbTwz1Q_1 truck h0hIpf9O0Vg_0 bus h1MxYGy1SBc_0 dog h1XtVmXF7CQ_1 elephant h19z0Ap_5Pc_0 bus h2R46pcCEVg_0 cow h2SNrfK0yQQ_2 bus h2X0to3hDA4_0 bicycle h2b9t_pnnNA_0 cow h22FyeO_lyE_0 umbrella h23R8X1WKjU_1 horse h24uuiI34yI_0 skateboard h27DK_oMwYY_0 dog h3FnAKBB9Xc_1 elephant h3Lz61ficjc_2 motorcycle h3aEao1bRIY_0 cat h3aZGHTjBwc_0 elephant h3o5ZykGOxI_4 elephant h3o5ZykGOxI_2 elephant h3o5ZykGOxI_3 elephant h3qOwaRYAi8_1 bear h3uPELFKoCc_3 knife h3uR99WtOh4_4 bear h3_cWsxi4Qw_1 skateboard h4CySJb83XI_2 elephant h4KXG16xA_Y_0 dog h4LE2YVwHL0_0 motorcycle h4jU8ZrDZd8_0 skateboard h4kmvN6NmyA_3 train h4kmvN6NmyA_2 train h4wsDcj7kcE_0 cow h45-zE2gKFA_2 person h45-zE2gKFA_3 elephant h47dExP6oXQ_0 elephant h5C2RKknWfg_3 bicycle h5C2RKknWfg_5 bicycle h5C2RKknWfg_6 bicycle h5KSLdybLIE_5 bicycle h5KSLdybLIE_1 bicycle h5KSLdybLIE_3 bicycle h5dsU3N4joc_0 cow h5hkvWWp7Qg_0 knife h55Exp2rpSM_0 knife h6FtP-5VnYM_2 cow h6FtP-5VnYM_1 cow h6McnZDPX3I_12 elephant h6McnZDPX3I_1 elephant h6McnZDPX3I_2 elephant h6McnZDPX3I_6 elephant h6McnZDPX3I_7 elephant h6McnZDPX3I_9 elephant h6McnZDPX3I_10 elephant h6Mvzt5e_eE_0 horse h6jGPQLkE48_0 person h6ztcoDHYaY_0 cat h62bO9Mfl9Y_0 cat h64dmoPNWw0_0 car h7OZUnDKWbA_0 truck h7cXxMNxlcY_0 horse h7uwd7opKjI_0 motorcycle h7uwd7opKjI_1 motorcycle h8BDqFH8e_w_0 train h8BDqFH8e_w_1 train h8BDqFH8e_w_2 train h8EHrA_OM7c_0 person h8LiHNo4904_4 airplane h8LiHNo4904_5 airplane h8LiHNo4904_6 airplane Jfb3XGdt6VE_0 cat JfdoYsRxF5k_2 knife JfnHVMyUT0E_4 bicycle JfqHeWyD5DQ_0 skateboard JgLXpgcnjAA_0 cow JgQbvDmM2Nk_0 bird JggJWWHhlc4_0 umbrella Jg8FXSKMvTQ_1 elephant JhDNC6XRVG8_0 cow JhDNC6XRVG8_1 cow JhFvJHfP_NY_0 car JhPLC0PS9I0_0 knife Jh87zKRgN68_2 boat JiMyZFGmGgM_0 dog Jifa2spqYV8_0 airplane JijtEhm-Dk8_0 bus JikSLpJ2xKw_0 cow JinIHVE4_MI_1 bear JioS9DumyIM_1 car Jixd9HKGzWA_0 train Ji6bpPIPScI_0 umbrella JjIvWQ-198c_0 knife Jja500M50Yw_0 cow Jja500M50Yw_1 cow Jj4KvC3TXro_0 car Jj4KvC3TXro_1 car JkC1Udoysk8_1 cat JkC4nV8LcTE_1 bicycle JkH8ZtuvzDQ_0 dog JkpQkpiRpVI_0 bird JkzNUiOu1GI_0 bus Jk28bpr063o_4 airplane Jk28bpr063o_0 airplane JlJQlaoy3ec_0 cat JlrPaJIAP9k_1 horse JluvPpeI2DY_0 train JluvPpeI2DY_1 train JlzsUphxgIY_0 truck Jl1bEdoRG9I_0 cow Jl6gTtZcQH0_3 horse Jl6gTtZcQH0_0 horse Jl6gTtZcQH0_2 horse Jmblo1iMURo_0 motorcycle JmdMhGsyZvk_0 boat JmvNubLPYGo_0 bird JmxixgKAKzc_0 truck Jm0S-kE2yVc_0 truck Jm3dtu8GTos_0 dog JnAaSoaN3FI_4 boat JnHUNCeHEDc_0 bird JnMkFSGB6Vw_0 truck JnXmNI53DWE_0 person JnrrNu9udj0_0 bear JnvIx5y-ijs_1 umbrella Jnysuevt_4A_0 train Jn1gvGhxU5U_0 bear JocAgPv-ZJo_0 skateboard JohmecnKktI_0 boat JopGEGMo-DQ_0 dog Jo50LBwjHIk_0 bicycle Jo50LBwjHIk_2 bicycle JpDOBaNBwkc_0 truck JpFiApmpoHA_0 cow JpL4Mv-uFi4_1 dog JpRMc6MtCH8_0 truck JpWh1yQThRo_0 train JpZwF6hOCDg_1 truck JpjAxQ_vsZw_7 bicycle JpjAxQ_vsZw_1 bicycle JpsOsewgXAg_1 bird JpuCWzsE35k_1 bird Jp0GKZ9vA0c_0 airplane Jp1tvS1y4eI_0 boat JqCaTxH5Ovk_0 motorcycle JqC81ViWFeE_0 bear JqPkaGRIz6c_2 elephant JqT_Bx4fd1Q_0 cow Jqauh1bsJy4_0 bear Jq2ml2xQkHg_0 cat Jq8D628IlV8_1 skateboard Jq8D628IlV8_2 skateboard Jq8OMvgG6wc_0 cow JrAvVMnkKEo_3 bear JrKxxhHGR7E_0 giraffe JrZTstVj2wg_0 horse JrbrXXDuxnc_0 horse JrmyPAW-ItI_0 dog JsNQXxg1PvE_0 person JsPtP21j3f8_3 bear JsPtP21j3f8_1 bear JscnB4QfAhY_0 train JsiSPt3nv1Y_0 cow JsiSPt3nv1Y_2 cow Js2ZDfWZWtc_0 cat Js69iFgcic0_2 bus JtMMD0aJnPI_0 train JtMMD0aJnPI_1 train JtQzeWNt8IA_0 umbrella JtQzeWNt8IA_2 umbrella Jtfp49L4LHg_0 train Jt1zVsUQGhI_2 elephant Jt1zVsUQGhI_3 elephant Jt8ikZGW768_0 bicycle JuGusvu6Z7o_0 skateboard JuKJKHykMKM_0 horse JuKgukJ63eM_4 skateboard JuME8_jaVdE_2 car JuME8_jaVdE_3 car JuMNRsOc0nU_1 cat JuMNRsOc0nU_0 cat JuNubQtCvrU_0 bird JuNubQtCvrU_1 bird JuO7qvp2GBs_0 knife JuXqLoCgK4o_0 bear h8OcTR0Z4yo_1 airplane h8OcTR0Z4yo_2 airplane h8OiIYhIPTs_2 train h8PJps4Sj1E_0 airplane h8PmDAKiKVc_0 dog h8oTFl4XWTc_0 bus h8ysn_L9udY_0 train h8ysn_L9udY_1 train h9FtsOFR3p8_0 cat h9veoEpzRH8_0 cow h9w20ChZ_7Y_0 bicycle h9w20ChZ_7Y_1 bicycle h96rR-VkJZA_1 bear h96rR-VkJZA_2 bear h966cxQyjvc_1 airplane h-PS5v6ZTBY_0 truck h-VSmS49g5M_0 skateboard h-npKkPbHSA_0 boat h-qRpUteJV4_0 bird h-vGllteZnI_0 train h-1NdCqoxdU_1 bird h-2DBPzbKUM_0 cow h-27oWBBirE_0 dog h-9WCj8sB6o_7 airplane h-9WCj8sB6o_8 airplane h-9WCj8sB6o_10 airplane h-9WCj8sB6o_11 airplane h-9WCj8sB6o_12 airplane h-9WCj8sB6o_0 airplane h-9WCj8sB6o_1 airplane h-9WCj8sB6o_3 airplane h-9WCj8sB6o_5 airplane h_DH9wUjJZA_0 cow h_Ey7gQJCSc_0 cow h_KKvY3cK4o_0 cow h_KKvY3cK4o_1 cow h_XHdrNdD98_0 bus h_tQ-ZVYe1M_0 bird h_6GMOpsIOk_0 cat iACKPRGNEOU_0 bus iADpOEGdwQI_3 bird iALubFRPBXQ_1 knife iAL5KD5BwGQ_0 horse iAuV09oxF_c_0 bus iAzvkn-2C9s_4 horse iA_tYzSGuVg_0 dog iBDVD9if3VA_1 bear iBDVD9if3VA_3 bear iBDVD9if3VA_4 bear iBF1Cfv7RpE_2 train iBF1Cfv7RpE_3 train iBO6oNBr4hM_2 train iBmHl4vB2p8_0 boat iBmHl4vB2p8_1 boat iB2e_0wI6Cs_1 bird iCA5LKIvUak_0 horse iCUmfkHj2MM_0 elephant iCWBysiT4fE_0 airplane iCoklLBZGi0_0 truck iC-r2odD6Ss_0 dog iDBWSSj3Yag_0 bus iDMMfw0zrvQ_0 cow iDy5BzJGt50_0 skateboard iD0ptJ7ucww_0 horse iD0ptJ7ucww_2 horse iECVUNZOPOM_0 cow iEIRSDANY7g_0 bird iEcsL-BdEp8_0 skateboard iEeZD9_-mw4_1 train iEe9Qed4A6w_0 elephant iEfRHR6In04_1 dog iEnwhpHkWPA_0 dog iErN5WNQuZ8_1 bear iFLG6c3XcMw_1 knife iFgR4_OYpgU_0 boat iFk_jNFfItI_0 car iFsAXsW8t-8_1 bus iFsAXsW8t-8_2 bus iGB1OkMGELk_1 elephant iGE04YY7P68_0 motorcycle iGE8oPBzavo_0 airplane iGKh6_bzEe8_9 airplane iGKh6_bzEe8_5 airplane iGWCy-zysHU_7 horse iGWCy-zysHU_0 horse iGWCy-zysHU_2 horse iGWCy-zysHU_5 horse iGf0rCvWhZE_1 bird iGivgJkDWVo_0 elephant iGivgJkDWVo_4 elephant iGivgJkDWVo_5 elephant iGivgJkDWVo_1 elephant iGivgJkDWVo_2 elephant iGmHR-MYdts_2 skateboard iGtwAlGgpuQ_0 motorcycle iG3IZAIpSos_0 cat iG4w2A16Qy0_3 boat iG4w2A16Qy0_0 boat iG7OG-yAmkg_1 boat iHNSjj9GO9k_0 horse iHZNqzCjd7k_0 train iHbirUiASog_0 skateboard iH0SvXt_QEE_0 cow iH9qrmQO5wg_3 horse iH9qrmQO5wg_1 horse iH_5naROy0I_0 motorcycle iIYyUq4tPbc_0 cow iIZw5oU3kz4_0 dog iIa2i3Fyyp8_0 cat iIgi9EuB83A_0 train iIlu4DSMMwM_0 skateboard iIoEhVh0sac_0 bird iIoEhVh0sac_3 bird iIoEhVh0sac_1 bird iIwKnWnoXd0_0 skateboard iI66ySv1M1E_0 bear iJcYkYS6CgE_4 airplane iJcYkYS6CgE_0 airplane iJcYkYS6CgE_3 airplane iJqRpAI5q0M_0 cow iJ0Pe8-N6i4_0 bus iJ5fEZLxnPw_0 knife iJ5fEZLxnPw_2 knife iKLuvvisn6Y_0 airplane JvHU5ncnmtc_1 cow Jvkp32eVZyc_0 cat Jvm2k8MgJ5k_0 cat Jv1ayezpka4_0 bird Jv6b9zItltw_3 bird Jv6b9zItltw_0 bird JwNWcW7nUBE_0 elephant JwNWcW7nUBE_2 elephant JwaPyA7kWhc_0 cow JwnMWPlx6KU_0 cow Jw_nc2U4pKs_0 skateboard JxKJB-QdFUA_1 umbrella JxRKwF7KNOA_0 bird JxSYbvgXcT8_0 car JxVoSlh710g_2 bird Jxc3ArJpyuY_0 motorcycle Jxc3ArJpyuY_3 motorcycle JxdIZhohCtg_0 cow JxlB8wLncYc_0 elephant JxzCLy2VyJA_0 skateboard Jx03EEph0bw_1 truck Jx2PgBxlrLY_3 airplane Jx6xyX5sPMk_0 cat JyKJFochwIQ_0 truck JyLFLF4shyY_0 airplane JyLqTlaGOww_0 knife JyM0FDmoMyQ_0 airplane JyePA4nzTx8_0 truck JyhAOfW608o_0 cow JyliijVyyUc_0 elephant JyliijVyyUc_1 elephant Jy1hmMPCNks_0 dog Jy1hmMPCNks_1 dog Jy37u1dt8Qc_0 dog Jy_3PqINBss_1 bird JzGkRevP9mU_1 truck JzNvJYTN1Pw_1 bus JzNvJYTN1Pw_0 bus JzNvJYTN1Pw_2 bus JzNvJYTN1Pw_4 bus JzNvJYTN1Pw_7 bus Jzm0H_o-LyA_1 bicycle JzwF2_O5qho_0 cow JzwF2_O5qho_1 cow JzwF2_O5qho_2 cow J0Gb34OfhGs_0 airplane J0m2haAO_Pg_0 truck J0uOEHqVD0g_1 elephant J01a05fNHz8_0 airplane J05eYTq5pFE_0 cow J1BVFlR3Pzc_2 bicycle J1VVax1uIGc_0 elephant J1YSacTJR64_0 bear J1YqrkAsUIs_1 truck J1YqrkAsUIs_2 truck J1YqrkAsUIs_3 truck J1rYOpOlNqs_0 cat J1reV7ZinzE_2 truck J1sQZHaGRVY_0 cow J1uF4oCMmtU_0 car J10PTSVhLnQ_0 car J10PTSVhLnQ_1 car J10PTSVhLnQ_2 car J142X1ly-gY_0 cow J17uKo2HgxY_0 bird J2R5C_XNnek_0 train J2Sh2XKvWOA_2 horse J3EToJg72Es_0 horse J3d48McH1L0_0 elephant J3gk0p9Hm0o_0 knife J3hgEqlUzpg_0 bus J3hva1l0CWM_1 horse J3jOAuADP44_0 boat J3sMC-99CWs_1 cow J3zIT2YwDdY_0 bicycle J315ju7gD8Q_2 train J4eK5nQv9E0_0 motorcycle J4hu4X1Hr7k_0 bear J4ithFdbyKY_0 train J4mDzsuGR1M_2 bear J43AWiRkRAI_0 skateboard J46c4FEAjQ8_0 horse J46c4FEAjQ8_2 horse J5CA6t8d7uA_0 truck J5JNgpMvPks_0 horse J5Ss-cEKg9o_0 skateboard J5TS-1YKlWE_0 elephant J5TS-1YKlWE_1 elephant J51qDcGqOV8_0 airplane J5-O6tDEZO0_0 horse J5_8xLaPuIU_0 cat J6AHeX1RqWk_0 bus J6nRLSf9kms_1 dog J61MSyhI5Xg_0 bird J68NptJ9oRE_0 skateboard J7h1DaonvHY_1 horse J7jTtirQ85g_0 motorcycle J7vNGyyYQ30_0 dog J73WpGWHEuE_0 giraffe J73WpGWHEuE_15 giraffe J73WpGWHEuE_1 giraffe J73WpGWHEuE_2 giraffe J73WpGWHEuE_14 giraffe J79qVoBV6TM_0 car J8Akt0d4r_k_0 train J8Akt0d4r_k_1 train J8dIP05jqRw_2 truck J8dIP05jqRw_5 truck J9SzI8MQm6Y_0 airplane J9ZGJucbLiw_0 airplane J9mX4rrWQto_0 knife J9n9_-FSk4Y_0 dog J916-YD5Qms_0 elephant J-sHEYA-20k_1 giraffe iKjaiW6gHPQ_1 elephant iKjaiW6gHPQ_0 elephant iKlCbkZsFzE_1 cow iLeUN6d8Aew_0 giraffe iLeUN6d8Aew_1 giraffe iLk3v-m1Z0U_0 horse iLvLOw8Jigg_0 motorcycle iL0GMZ7iO3c_0 dog iL5OOut4Jek_3 bus iL9TAERxS4A_1 bicycle iL9hLZ_cXaI_0 person iMfVd5_HBcE_0 bus iMqYyOcO4Fw_0 umbrella iMtt9-ROv_o_0 dog iMukpec9Vmo_0 airplane iMukpec9Vmo_2 airplane iMxzNRMiKMA_0 truck iM3tOs60qxk_1 airplane iM8Lua_zTug_2 train iNQNSmu2BD8_0 skateboard iNWrFmCCfXw_1 bear iNa2jg_1Vyc_0 cat iNghTa86iWY_0 cat iN-bJwlR2i8_1 bicycle iOEuAB0dIs8_0 dog iOH00pYaMhY_0 cow iOJiYp298qc_3 airplane iOJiYp298qc_1 airplane iOd4NCiEBLw_4 airplane iOd4NCiEBLw_2 airplane iOgScMDTX_I_0 skateboard iOvWAp7U61k_0 cow iOzYv5IpFng_0 horse iO7wHeFO6Js_1 cow iO7wHeFO6Js_2 cow iPWL6FSzmS8_0 umbrella iPbg6G7tUVo_1 horse iP98M3c1PJw_0 elephant iQB9bgZJCwA_0 motorcycle iQPn_3iB6aU_0 umbrella iQYiakvHwnk_0 bicycle iQZ1QN-A3JQ_0 elephant iQfs0MyXA-s_0 airplane iQxGihgbiM8_0 cow iQ_2xA5J-Zg_4 bird iQ_2xA5J-Zg_5 bird iQ_2xA5J-Zg_1 bird iQ_2xA5J-Zg_2 bird iRI3AkfYykI_0 knife iRLMFxqd6Vk_0 bear iRTTlG8M9FE_0 car iRTTlG8M9FE_2 car iRTTlG8M9FE_1 car iRWWnw104cE_0 bicycle iRklgBUz8ME_0 bus iRk0aHyYWdM_0 bird iRlBKC_jfE0_1 horse iRlBKC_jfE0_2 horse iRlBKC_jfE0_4 horse iRmfa0b6jJk_0 car iRpibBNFoiY_0 knife iRv5dyfU3ZQ_1 car iRv5dyfU3ZQ_2 car iRw-TCiikqw_0 horse iRw-TCiikqw_1 horse iR3sRTxVGtg_0 airplane iR4rImxKjK0_0 car iR4rImxKjK0_1 car iR5Zew8NcYU_0 truck iR5Zew8NcYU_1 truck iR5Zew8NcYU_2 truck iR5Zew8NcYU_3 truck iR5Zew8NcYU_4 truck iR5Zew8NcYU_5 truck iR5Zew8NcYU_6 truck iR5Zew8NcYU_7 truck iR5Zew8NcYU_8 truck iR5Zew8NcYU_9 truck iSCFoiWm7Xk_0 bear iSLNkNnHOXQ_0 bicycle iSYNvKIuAXc_0 motorcycle iSbXpgu-7qA_0 bicycle iSeR1wQ4sl0_0 train iTF1bWOtrew_1 bus iTF1bWOtrew_2 bus iTWyYCJO0FI_2 truck iTbEmIOM3Bg_2 car iTbEmIOM3Bg_0 car iTbEmIOM3Bg_1 car iT3LIkn9wh4_0 car iT5clmXCTEc_0 elephant iUDGzAPkGLI_1 airplane iUEEnhAvRoY_0 cow iUSZKTFqatw_0 airplane iUX8ST-BSFg_1 bus iUZnCaGp148_0 dog iVH9ehKyau0_0 giraffe iVRs9h04NcM_0 cat iVzRc0RW_Y4_0 bird iV4UGeMqQeY_0 dog iV8NpvUXve4_0 elephant iV8NpvUXve4_1 elephant iV9CFIQTmfs_2 bicycle iWP_wo9OSe4_0 bird iWo66ztRt0o_3 boat iWtj7if5cK8_1 boat iWv1rxdhH1E_0 bear iW1aIV39PQo_0 motorcycle iW2g2j2VhbM_1 skateboard iW2g2j2VhbM_2 skateboard iXKQX0UfOqA_0 cow iXKQX0UfOqA_1 cow iXKQX0UfOqA_3 cow iXh4-KWp9S4_0 horse iXl114K8Y1E_0 car iXxi1CQpbBk_2 cow iXzEoHyipJM_0 truck iX7b9tWhoKg_0 giraffe iYGSi3t8Do0_2 cow iYO5SD120r4_0 elephant iYYdiX4oGjM_0 skateboard iYjiqdn7fVk_0 bird iYsgKLWI96c_2 knife iYtDe_tT_wo_1 train J-6KxfbaI6M_2 cow J_HdQVHBeco_0 motorcycle J_l7W4IMhJo_0 dog J_n_3-KXet0_0 dog KAGadYR0_LM_4 bird KAGadYR0_LM_6 bird KAGadYR0_LM_8 bird KAKn8JmKESU_0 train KAjM8ENV-F4_4 skateboard KAxsc-ratJ4_0 horse KA1A0hH1nVQ_0 train KBIGw8UrUG8_0 cow KBKaaEaIPRc_0 cow KBNqKcj0xoc_0 train KBP3moB3vz4_0 bird KBRkCaaDjxU_3 bus KBRkCaaDjxU_0 bus KBe3_8RL_MI_0 person KBoY6Pa8f_M_0 cow KCbzyGKBwC8_0 train KCdR8nTa3p4_0 skateboard KCipBL5_e5M_0 horse KCy-RKy_KN0_0 bicycle KC1md4Q_DlQ_0 skateboard KDSxlGW6eRc_0 umbrella KDZsS4MjllY_0 motorcycle KDaVTe3RbUY_0 horse KDyYkCLIImM_0 knife KD0Qm4z53a0_0 truck KD0Qm4z53a0_5 truck KD5LwDdfw0o_0 horse KD9qqVSiPu0_0 train KEGLFAbfrxs_0 motorcycle KERo3bKldwM_0 elephant KEW0fAHE_74_0 bus KEW0fAHE_74_2 bus KEagowlFwzI_0 cow KEll3gbyIsk_0 truck KEll3gbyIsk_1 truck KEll3gbyIsk_2 truck KExfLNe3IbY_0 airplane KE2StZtSBfk_0 airplane KE3O7h2RC-s_1 train KE_UJpQulNU_0 horse KFEorB8NRcE_0 boat KFFTHBaYcbw_0 bear KFJtVwXfusI_0 boat KFRZOFB41Jk_0 train KFk_7p6X-zI_6 car KFk_7p6X-zI_1 car KFk_7p6X-zI_2 car KFk_7p6X-zI_4 car KFk_7p6X-zI_5 car KFnvvsS8eIE_1 knife KGYrelsyNbk_0 airplane KGbYHbiOfd8_0 giraffe KGwEL4VozSA_0 boat KG8zBA9Gudg_0 knife KHBsJZVKzks_0 truck KHG1hZsfjwQ_0 train KHHyhgm1jZ0_3 skateboard KHSjivlhX30_1 bear KHcEC33udEg_0 cow KHgLQP4XH9Q_0 skateboard KHsYYKcSCSI_1 cow KH0F1sJXKss_3 elephant KH0k5jfUZGg_0 bicycle KH8QlsYIT1M_1 bear KIPptA8AzYg_0 horse KIjf6QGqdsw_0 truck KIjf6QGqdsw_1 truck KIqePeskBSk_0 truck KIy2LK1jsQ8_0 person KI8Arf5-ekw_1 truck KI8Arf5-ekw_4 truck KJIBdy7_10k_1 bus KJIBdy7_10k_2 bus KJJBVXnnqIw_0 zebra KJcXjJ5S9yA_1 dog KJrPyuVxWRg_0 airplane KJrPyuVxWRg_1 airplane KJvAK-5ExwY_2 truck KJ30mU3h4f4_0 bear KJ7PQiJAKRM_0 elephant KKKiTv_k23A_0 giraffe KKO1QGoVQYU_0 elephant KKpwJEMQYv8_0 dog KKsKKMjHYGM_0 horse KK06xbUhklk_1 bus KLC8OgkQnNQ_0 boat KLEKnTRMmo0_1 cow KLGAT1GQYGA_2 bird KLMz6_P5QmA_0 horse KLNmQqyAs54_0 cow KLUTy4pqLZ0_0 bicycle KLVZqPfRuTg_2 bear KLVZqPfRuTg_7 bear KLlN4H-eGYI_1 skateboard KL6-Iu09-C8_0 cat KMNaWZZK2Os_0 skateboard KMOOcO5yE9E_1 horse KMXuGjMAt7k_5 bicycle KMXuGjMAt7k_6 bicycle KMXuGjMAt7k_3 bicycle KMajGvVnol0_1 airplane KMajGvVnol0_4 airplane KMajGvVnol0_5 airplane KMajGvVnol0_6 airplane KMajGvVnol0_7 airplane KMiZgk_f50g_0 dog KMlZbzTdutw_1 car KMlZbzTdutw_2 car KMsL64iYfOA_0 car KMtu1xThH2k_2 elephant KMyoO6YYfZk_0 elephant KNaoNUMT7m0_1 car KNg4K_bbY5Q_0 train KN5hxi96gW0_0 cat KN-_uhPPfoE_0 cow KOKdrC_foXo_0 airplane KOOd5IO8seo_0 boat KOSUWuFIQjQ_1 airplane KOVZk2ixqc0_0 truck KOgmgqcT21Y_1 bird KOl1EDiK2e8_0 motorcycle KO6T6QdloiM_0 bus KO7Ncyx1-9c_0 train KPJDHcE-qeQ_0 bicycle KPYtlDJa43o_0 skateboard KPfbBNvFcmA_0 skateboard KPj_wrsubOE_2 bear KPkzyHL7IPg_0 cow KPmvpNEHsPk_0 skateboard KPzWIuvRlr0_1 skateboard KP4ApNQiIEI_0 cat KQB-ZyriFmI_0 boat KQg6eO2jr_Y_0 umbrella KQ5mchVgTXo_0 truck KRCLiP-JUsc_0 truck KRCLiP-JUsc_2 truck KRCLiP-JUsc_1 truck KRW0HyqDLg8_0 dog KRjN1nx8mcE_0 airplane KSDxU99SF6g_0 motorcycle KSHVle4SAM4_0 elephant KSZ7nkMWOsU_0 skateboard KSZ7nkMWOsU_1 skateboard KSj7hZ7oO18_0 cow KS1ge4vlv64_0 bicycle KS4vsIYGaCM_4 truck KS4vsIYGaCM_0 truck KS8UAlyHoCg_0 dog KS_fak2guWU_1 dog KTAMaZKxpF8_2 train KTDhNtr8XF4_0 airplane KTDzrCvIVQs_0 dog KTQQtbUbWbA_0 airplane KTZ2Jsj6_ig_0 truck KTdzxOjJNgI_0 car KTsTGNqrFuE_0 umbrella KT7YiBWXqNk_0 airplane KUZxnRyU2e8_0 cat KUbSnz1yWxc_0 knife KUc8Kw30V1Q_2 truck KUc8Kw30V1Q_3 truck KUc8Kw30V1Q_4 truck KUgY_2bsBC0_1 skateboard KUhzqYZoYCI_0 cow KUkcrqulhqg_0 cow KUlpA-cpCpM_0 horse KUumLype4AE_0 elephant KVFlTVdKQVw_0 horse KVJCkQzQbMs_0 person KVmS-yiYu2c_0 bicycle KVzW5MPT25A_0 airplane KV0o55FO4XA_0 skateboard KV3jtdzXA9U_0 dog KV__RQ75-vw_1 cow KWJiCsomGTA_0 cow KWLl4vVumIs_0 truck KWSDQebY3dA_0 cat KWwbFKgHqW0_0 car KWxd8IQ9_a0_0 cat KW10UlO19uo_0 bus KW4ovUCg7uU_0 bicycle KW4ovUCg7uU_1 bicycle KW5S4gsTVaQ_0 knife KW7gAr7kgow_0 dog KW_6RyjLGPI_3 horse KXCQuD9phb4_1 bird KXENib5sk78_0 cat KXLWiz5ZUh0_1 train KXLWiz5ZUh0_2 train KXdF5__0yVQ_0 cow KXf6k7PrX7E_1 elephant KXf6k7PrX7E_2 elephant KXrQkw1WPnk_0 bird KXzu3MDaZn8_0 car KYK_Wg8JlTg_0 skateboard KYK_Wg8JlTg_1 skateboard KYTRCD2p-8Y_0 motorcycle KYZzKKYD7Yc_1 horse KYaB_EEk344_0 cat KYc__uUZkwc_3 bicycle KYd6wCR0jVc_1 horse KYd6wCR0jVc_0 horse KYs4hm9X1Rg_1 bicycle KYvXJXEbUMg_0 bird KY0x7p41Q_A_0 cat KY04L4VTsXc_1 airplane KY04L4VTsXc_2 airplane KY7D2Y5MQSo_0 horse KZAf2uPS-us_1 horse KZAf2uPS-us_0 horse KZFniGi-fes_0 dog KZJcgoY3r3U_0 airplane KZSLQpdbGps_0 motorcycle KZYe6pqrLaQ_1 dog KZhX7tDfYIA_0 bus KZl_XArvSXk_0 horse KZ4OuA1t3ZY_0 elephant KaUGkf-3N-4_0 horse KaiX3d83DWA_0 zebra Kaj5B4nrWJU_0 skateboard KapwOqVyzUk_0 cat KaqToIfNxMY_1 bicycle KauPg8P2kC4_1 airplane KazepPKQz1M_1 cow KazepPKQz1M_3 cow KazepPKQz1M_4 cow Ka978At0k0Y_0 airplane Ka-4ZfE0GMQ_0 motorcycle KbA6UDJg1LE_0 train KbA6UDJg1LE_1 train KbGl5jqOQ7o_0 cat KbRIbBeLBsM_3 motorcycle KbosOWR7ZSg_1 boat Kb3lxArGO8Y_0 bicycle Kb3lxArGO8Y_1 bicycle KcDpzG8kKho_0 cat KcL-zz1sb6I_0 dog KceqMsKO-zc_0 cat KcpGWNCD-uk_0 cat Kct9k6Q2YM8_0 car KcuEc9WwYSQ_0 cow KcuEc9WwYSQ_1 cow KcyLR4RxylE_0 cow KcyMYgt62Go_0 horse iY5Sh73Lem0_0 bird iY6eEC8uY4E_2 train iY6eEC8uY4E_1 train iY9QlFmEBFY_0 motorcycle iZsSK_iIOoA_0 horse iaGO2mTgoPo_1 bicycle iaGO2mTgoPo_3 bicycle iaWSU1ISWXQ_2 airplane iaWSU1ISWXQ_0 airplane iaflfMXT7QQ_0 boat iamGAsKNRhY_0 train iana0Lz1gs0_1 motorcycle iasZRb9p3lg_0 motorcycle ia1XmqAwn7M_0 bus ia6R3fqdlnE_0 bear ibcBDIGpMfo_1 bus ibd-Wxcr_x4_0 horse ibpj369yzbw_0 umbrella ibxmk7cGhTQ_3 horse ib5fWzJWV5A_0 cow icDyRH3P-nM_0 airplane icGjENlINL4_3 skateboard ich9rXZWjGY_0 car icic9NkCnf0_0 cow icnuBKQZNBg_2 bus icnuBKQZNBg_0 bus icnuBKQZNBg_1 bus icxOfJQ-l9I_0 car icxOfJQ-l9I_1 car icy3pC1Q0eA_0 cat ic7k8fkUDXs_0 cow idnOwkwaCm4_0 horse idnSzg_rV_k_3 bicycle idoGYHCXGJs_0 elephant idq0Jqw8Oa0_2 elephant id1yzZ3HkTs_1 knife ieCL4lz7IJw_1 boat ieOpqoYhMOQ_0 truck ieOpqoYhMOQ_1 truck ieOpqoYhMOQ_2 truck ieULzTIs9ls_0 cow iedgnWefCA0_0 airplane iedgnWefCA0_2 airplane iedgnWefCA0_3 airplane iewlg5CteEs_1 airplane ie8gkh6nQcA_0 train ifKKR-gCLSk_0 cat ifRQKBKIRSI_0 dog iff3KW8leKw_0 airplane iff3KW8leKw_1 airplane ifghH4Jo8D8_0 truck if31ci9xz_8_4 bicycle if31ci9xz_8_1 bicycle if31ci9xz_8_2 bicycle igGtS-jZCQM_2 car igGtS-jZCQM_0 car igLVqNKw-yE_0 bird igMWvnK1jEE_0 giraffe igMWvnK1jEE_3 giraffe igMWvnK1jEE_1 giraffe igQUACDrluw_0 horse igU61tmxeE4_2 skateboard igWsPt0nelg_1 bus igcpSvypduQ_0 truck igcpSvypduQ_1 truck igdqmLfZ_cw_0 airplane igjBIRwjlko_1 dog igm6X4CZLmk_1 bus ignREcFRyaQ_7 airplane ignREcFRyaQ_8 airplane igwghbZYjgg_0 airplane ihMDaxeTpZs_1 horse ihTjIMWOjuQ_1 motorcycle ihUpF22zo4M_0 train ihUpF22zo4M_1 train ihWWle00xEE_0 motorcycle ihh0J0AaWBs_0 train ihh0J0AaWBs_2 train iiA0hIRwwJA_0 train iiSWvRk3YfU_0 bird iiextKoe48U_0 cat iigPPpoo0W8_0 knife iiiOUcmwJPw_0 cow ii0PDMs-a0o_2 car ii2ghwDAI3w_1 airplane ii_sG2SkeXM_0 cat ijB2Yh71VIg_2 bear ijJAWtORd2w_0 truck ijJAWtORd2w_1 truck ijVpcnt8HN8_0 bus ijXmwWOLvpM_2 horse ijXmwWOLvpM_1 horse ijdipMmraWc_0 truck ijwhkKzyWE8_0 airplane ij0zLKtr0sA_0 bird ikGzd6ivk64_0 motorcycle ikKFRS8Hivk_0 bear ikVu6XfZ3_A_1 bicycle ikafEc8p6rI_0 bicycle ikafEc8p6rI_5 bicycle ikafEc8p6rI_1 bicycle ikafEc8p6rI_3 bicycle ikafEc8p6rI_4 bicycle ikfmjumoUlM_2 train ik868nOtrZo_4 bus ik-jgdZW4Ek_0 horse ik__zZ1HZNg_1 giraffe ilKErQ8ojz0_0 umbrella ilKErQ8ojz0_2 umbrella ilKErQ8ojz0_3 umbrella ilKW98Qvobg_0 skateboard ilvsheh1Cqs_0 dog ilxXSgvtFgw_0 cow imEWC_Q-BSg_1 car imcRxs0K7H8_0 bus immhpBi8eWw_6 skateboard im_FneG303c_0 dog inEZ7ZLAS7s_5 skateboard inJLKInP5kw_0 dog inZmM8c-9NI_3 horse inedUh-74-A_4 truck inodVLfFogA_0 train inynAJrGhVU_0 motorcycle in061qZJjWI_0 dog Kc8WMzLKvvk_0 cow Kc-f3X7O-pw_0 cat Kc-x73DCumI_0 truck KdGgVhM0Ihg_0 bird KdKlI0ZN6qo_0 airplane KdQQqsAuU7o_1 bicycle KdUSJz6UWLQ_0 giraffe KdXRnPKKeTU_0 bird KddQJwFfv9s_2 skateboard KdjMgSuON5w_5 bear KdpUjVhfjG0_0 person KdyadP7Y1nU_0 car Kd9Em2ABfN8_0 cat Kd-jTE5-2uE_1 motorcycle KeMITKdjHtk_0 cat KenV2bIQf1o_0 bicycle KevYmLAAigc_1 train Ke3R9FrGLcY_0 dog KfJU66erPWo_2 knife KfMO45jz-68_0 boat KfS_UKkbQAA_0 bird KfTV1TFY2b8_0 bird KfaTw0euPQA_0 motorcycle KfjmKiZzSlY_0 cow KfjmKiZzSlY_5 cow KfkKe7q45KA_1 motorcycle KfkKe7q45KA_2 motorcycle KfkKe7q45KA_3 motorcycle KfpCncLoqOw_0 cow KfwbVpPI0nU_1 motorcycle KgAFD_JvgrQ_0 cow KgD3H0APDy0_0 bear KgNS5HwFF_c_1 elephant KgVEQYicksA_0 cow KgY5OrVnzv4_0 cow Kgo7SWtDdz4_1 dog Kg3xuyjNU7w_0 umbrella Kg7Qk4Gx9n0_0 motorcycle KhKZwdKiqms_0 cow KhKcHaH_ALo_0 horse KhPKq8O30VM_0 bicycle KhPKq8O30VM_2 bicycle KhPKq8O30VM_4 bicycle KhuC9snWfpI_0 cow Kh7rAO7jCGc_0 airplane Kh_KwBHfGQ8_0 cow KiHy8IMQ6zA_0 airplane KiaUDlPLxzk_1 bear Kixl-Wmj3kg_0 motorcycle Kjaag6B-MIQ_1 skateboard Kjca1u6P3NE_0 cow KjiI2E3l3Mk_1 truck KjiI2E3l3Mk_2 truck KjqaJ25GUBI_0 bus Kj3dRtd4xQI_1 cow Kj3dRtd4xQI_0 cow KkD23XYUG9c_0 umbrella KkMNGzvNkg4_9 bird KkNYBz9ZaVA_0 bird KkNYBz9ZaVA_1 bird KkPf9AB1HZo_1 elephant KkRq1ogJq-4_0 skateboard KkXTT9C4xfc_0 cow KkdSKHS7P50_1 skateboard Kks6eJqnZLQ_0 dog Kks6eJqnZLQ_2 dog Kks6eJqnZLQ_3 dog Kks6eJqnZLQ_4 dog Kks6eJqnZLQ_5 dog Kk6BgYl9OjA_7 bicycle KlEK-vv3DVo_0 bear KlENnLskuCU_0 cat KlG0czACle4_1 cow KlG0czACle4_0 cow KlG0czACle4_2 cow KlG0czACle4_3 cow KlqbHICh4G4_0 train KmJhshcviXA_0 knife KmbMzgXFdKs_1 airplane KmbMzgXFdKs_2 airplane KmbMzgXFdKs_0 airplane KmfmqwmQneM_0 bird Kmr5uVYVSDo_0 car KmuV8XfAjvw_0 horse Km3GmgNJlL8_0 train Km3GmgNJlL8_1 train Km3GmgNJlL8_4 train Km7w520V5vs_0 airplane KnIxVxIho9w_1 bird KnN2yDre-aM_0 boat KnTu6keaGs0_2 elephant KnTu6keaGs0_0 elephant KnXPxa1RzmU_0 cow KncYvkV6rwc_0 boat Knql8E5Khc8_0 elephant KnuD87lrS8w_0 skateboard KnvGRqLQ5iM_1 train KoA6bPmALeA_0 cat KoXgGmdVCBM_1 bicycle KoXgGmdVCBM_10 bicycle KoXgGmdVCBM_2 bicycle KoXgGmdVCBM_3 bicycle KoXgGmdVCBM_4 bicycle KoXgGmdVCBM_5 bicycle KoXgGmdVCBM_6 bicycle KoXgGmdVCBM_7 bicycle KoXgGmdVCBM_8 bicycle Kosi26dm76A_0 horse Ko5wlBGl200_0 horse Ko_Nx24OGxM_2 airplane KpDzoM2xtwc_2 truck KpDzoM2xtwc_3 truck KpDzoM2xtwc_5 truck KpHFaYsgWrg_2 elephant KpHFaYsgWrg_1 elephant KpVflkpC7d4_3 bus KpVflkpC7d4_5 bus KpVflkpC7d4_0 bus KpVflkpC7d4_2 bus KpXxo2n6AYw_1 motorcycle Kphl0WRacss_0 knife KqAvXx4bN5k_0 cat KqQgFUEAS-M_0 train KqavxpR698k_6 dog KqavxpR698k_0 dog KqavxpR698k_1 dog Kqfo6_qcthc_0 car KqjhaIJMY5U_0 cat KqnqyAczaqs_4 bus KqqyldSpJh4_0 horse KqqyldSpJh4_1 horse KqzkADa-Lqw_1 train Kq1x16QvM1g_0 dog KrGJjt0yq-s_1 bus KriNb3dhqVQ_1 skateboard in9LFcixPXo_0 skateboard ioEMtB2bP6o_0 bird ioESr4H79KY_0 boat ioGc_R8NJow_0 cow ioKahF3aFWw_0 horse ioKahF3aFWw_1 horse ioOHxrHumIk_1 airplane iobYquCNk5k_0 cow iojaZ646ie8_0 skateboard ipLnwxta1Jc_0 boat ipOJVFLMLIk_2 bird ipOJVFLMLIk_0 bird ipgB9KXnzK8_0 horse ipg_y1T2OsM_0 cow ipg_y1T2OsM_1 cow ipqQlNsINy8_2 airplane ipt6gWgCgis_0 truck ip5xVRJOpP8_0 umbrella ip8BFE94TKo_0 airplane ip8BFE94TKo_2 airplane iqDJJqLVBBk_1 elephant iqExYW2fPfc_0 bear iqicuLBaF_g_0 truck iqlKzflOl00_1 bus iq1FaWFylpI_0 motorcycle iq6izTYp-DU_0 motorcycle irBsER6ITHw_2 skateboard irDs_vWExnM_1 bicycle irDs_vWExnM_2 bicycle irU_BJXoU9I_1 cow irWY8s-JuBs_3 airplane irWY8s-JuBs_0 airplane irWY8s-JuBs_1 airplane irWY8s-JuBs_2 airplane iramP9ihj_w_1 bird irgacv6LobE_0 motorcycle iri1MtEgOjQ_0 bear irs2O6YOB5I_3 elephant irs2O6YOB5I_5 elephant irs2O6YOB5I_1 elephant iruY-BU0rpg_4 elephant irzcPf--6uQ_0 train irzcPf--6uQ_4 train irzcPf--6uQ_5 train ir4EYn7Fz5A_0 dog ir5E9O2Tonk_0 boat ir7Dq5dPxOQ_0 horse isPplb7aotI_0 boat isPplb7aotI_3 boat isU4229ndXM_0 cat isfwmnXNmeM_2 cow islz_HxqOnI_0 bird isvvRHvNuIw_4 umbrella isynk11V9s8_3 airplane isynk11V9s8_1 airplane isypXPZMgns_2 boat isypXPZMgns_3 boat itKyPMv5z0Y_0 umbrella itKyPMv5z0Y_2 umbrella itc-A2zwSGM_0 dog itrvgHryhIY_0 train its4C4ty2oA_0 skateboard ittQcsrECUE_1 bear ittQcsrECUE_2 bear it1EatlrBkg_0 cat it3KS-r39EQ_1 knife it3hCzfmyfs_0 cow it6DtEGdhas_0 cat it8Fid-mqRQ_0 truck iuEbY8B4Qo4_0 cow iuEbY8B4Qo4_1 cow iuFmdispR2U_0 bicycle iuRmu4BN6bw_0 train iumTd9IGDho_0 train iusgUMlrYFA_0 airplane iutdZMWA8f0_0 person iuumrgHW8zM_0 umbrella iu3sd1qnr8g_0 car iu9Av4HCmiw_0 knife ivDeIaJYIlE_0 truck ivT103z2bwc_0 giraffe ivdfO5VqKo4_0 cat ivgTXhIqccY_0 cat ivi1frbFnGw_1 giraffe iwFO7lcVjKc_2 cow iwFO7lcVjKc_0 cow iwFO7lcVjKc_1 cow iwX4cgfQn5s_0 bird iwczN64AC9Y_0 bus iwp5aVOXWaM_0 airplane ix8S6CRuUFg_3 bear iyAvqfMVOeA_0 cat iyLZZlL-B80_0 cow iyMbIICjtcg_0 cow iybJfH6iVdU_0 bus iygW3-Ovcic_0 cow iyn1OZFmvXE_2 bird iyz9Lq13Mcg_0 cow izbTUTqkG7c_0 cow izx70OqPYBc_0 dog iz9-Vl4e9po_0 train iz9-Vl4e9po_2 train iz9-Vl4e9po_3 train iz-BT0NAs6k_1 knife i0Eg02B3JoM_0 elephant i0Ez1KT7sTo_0 horse i0ZE0kXl5oU_1 skateboard i0eMgZ0riHI_2 bird i0gg-mJNKlU_0 cow i05OPAsrmJI_3 elephant i05OPAsrmJI_1 elephant i05OPAsrmJI_2 elephant i09cuoC14q4_0 bear i1DfyWe0Jh4_0 cow i1DfyWe0Jh4_1 cow i1NfFxZmBSA_0 bus KrvsSuIgrJQ_4 horse KrvsSuIgrJQ_1 horse KsT2_VxPkb4_0 knife KsXzFCpHMPU_0 giraffe Ksyud0_i1zI_0 bus KtINrfbQSXk_0 knife KtV59qZg7BU_0 truck KtX4x9k3J2A_0 train Kthi3i2WM3s_1 skateboard KtkN77asAj4_0 horse KtplZx6_ecU_1 knife KtqvSap6uig_0 skateboard Ktxb4OmaAjA_0 car Kt3uQcxNltk_0 zebra Kt9neWWjkHM_2 bear KuBa9tep8xk_0 bear KuQgP71vfZ0_0 train KuYBJ90zNYw_0 umbrella KuYjBUvU-ws_0 umbrella KuYrzelSfIw_0 car Kulks153IS8_0 truck Kulks153IS8_1 truck Ku0XhH2YeG4_0 bear KvH6JyHG3H8_0 motorcycle KvH6JyHG3H8_1 motorcycle KvLXxaGooPk_0 cow KvPLPO4A5R8_0 knife KvRsu4xefwo_0 person KvcxzJxNkkU_1 bird KveRZ7dBNGU_0 boat KvgupPBw5rc_0 cat KvjDDIthDDM_0 cow KvkOTtqxJlo_1 cat KvsaKWirK7Y_0 skateboard Kv0ui3mEWGE_0 horse Kv0ui3mEWGE_4 horse Kv0ui3mEWGE_1 horse Kv0ui3mEWGE_2 horse KwkcPYl8Lv4_0 cow Kw7t6l8h2Ns_0 bear Kw7t6l8h2Ns_1 bear Kw8037OwDjc_0 truck KxWI3M2FGOw_0 horse KxZXot9AIY4_0 truck KxflrYttp20_0 bird KxlTxdqDDzo_0 cat Kxuqb_htGwY_0 giraffe Kxuqb_htGwY_2 giraffe Kx40to29YnE_0 skateboard KyDXCruNNj4_0 horse KyUM64yfNCA_0 horse KyWUn_bj5rM_0 motorcycle KyZWWIsQUbg_0 skateboard KyZWWIsQUbg_1 skateboard KyaKfhOfKhE_1 bird Kyt325n06oI_0 cat KywHhzvsm3Y_0 bird KyyS9PYJ9Zo_0 truck KzK3iwncxbY_0 bicycle KzK3iwncxbY_1 bicycle Kzc17TzutkM_0 skateboard Kzc17TzutkM_1 skateboard KzyD-e7N2D4_0 train KzyD-e7N2D4_1 train Kz3zulHzEE4_1 train K0CwoXVMp0M_0 bicycle K0L3_2UquEY_0 boat K0Zt-EcXkj8_1 airplane K0cgwgX_8fo_2 boat K0xs4bH65_Q_1 motorcycle K02fUURwCiY_2 car K02fUURwCiY_0 car K02fUURwCiY_1 car K1Qbgm__2iE_0 cat K1ccfBgR_kg_0 truck K1-s4sk63R4_0 horse K1_J3d_yH64_0 motorcycle K2F6TCgVfR0_0 boat K2hV4KVruLc_0 airplane K2my8qWjyn4_0 cat K2yjgwFV15k_1 motorcycle K2yjgwFV15k_0 motorcycle K26jSjClwaQ_0 skateboard K3Cgw_EFdbw_1 motorcycle K3DniaFnn9E_0 cat K3KhxEuf8mY_0 horse K3KhxEuf8mY_5 horse K3Ov5rPJ2LE_1 horse K3XsEMr7Qt4_0 person K3qgW4Y3yrk_0 motorcycle K30LSGFu6hs_0 motorcycle K4RE7AZWGv0_0 car K4U_AmqQFDY_0 bear K4VnWy2-8xQ_3 car K4ec2MqDkPw_0 train K4fCUNjbdf8_0 motorcycle K4wp52Zn5d4_0 horse K5NooGgwD1E_0 horse K5NooGgwD1E_1 horse K5pBkPv_1sg_5 car i12y-zJl-nA_0 cat i17EaDmRPCg_0 umbrella i2Yjl6kF8iY_2 airplane i2Yjl6kF8iY_0 airplane i2cujNbMSKc_1 skateboard i2diIHrCsbk_1 bird i3AK_cujBxY_1 motorcycle i3BpSeFJdgo_0 cat i3HeGqUyibM_4 bicycle i3HeGqUyibM_9 bicycle i3HeGqUyibM_12 bicycle i3LFAemLFW0_0 horse i3Z5pFF2dH0_0 bird i3a4U770GtE_0 person i31nG3E36WE_0 knife i32p4KoRD2o_0 train i33S_D8TBc4_0 dog i35wpbpl8qY_2 boat i38dpYWvJN8_0 umbrella i38dpYWvJN8_1 umbrella i4CFI7MtlRs_0 cat i4ExemfAEO8_0 bicycle i4IpgDIqTrs_0 boat i4RZtd1cCw8_0 umbrella i4bRNqQ32MI_0 cat i4clJpNvw4M_2 bus i4hqN47R0oU_1 train i45JoRzDdI0_0 cow i46jok5cjyY_0 horse i5GJ6mIp8zc_0 boat i5G6RkcL4m0_0 cat i5OdBE4QG6c_0 train i5g87UeVkBU_0 horse i5g87UeVkBU_1 horse i5sT2ifoPyM_0 knife i6MF-PGtJiE_1 train i6WTNPwIjW8_0 cat i6aJqhBh5wg_0 skateboard i6j6P7ITxYg_0 cow i6vwTWezXmU_1 boat i66Gsq6zzqI_0 motorcycle i6-YQ6rSnDI_0 cat i6_oBTD2-YA_5 bird i7P2tq4TS_4_2 bus i7UQGL5uxvw_1 skateboard i7WeV3CfJV8_0 knife i7a8sQcVRgE_0 truck i7umCLnxVXw_0 cat i791If0qoBU_5 knife i8KQCu2cMAc_2 bicycle i8KQCu2cMAc_4 bicycle i8bVI1667K4_1 truck i8hjK42sseE_0 motorcycle i8lG7Ux3wlc_0 dog i8nbuADJjmE_0 car i8nbuADJjmE_1 car i8nbuADJjmE_2 car i9PUn4sF30g_0 motorcycle i9T-NwSBqPE_1 knife i9VWkuQHBls_0 horse i9nmvkDiFGc_0 cow i9sP7mWuQ_8_2 motorcycle i9sP7mWuQ_8_1 motorcycle i9u4vsQUBTQ_0 horse i90TDb7evCY_0 truck i9_FG4-2VIM_0 skateboard i-CQVFq1JI8_1 bicycle i-CQVFq1JI8_3 bicycle i-T9Q2g8xbk_0 airplane i-kodOT_ufM_0 cow i-nP7aFTZb8_0 bird i-xdWDN7Eys_2 knife i-3aAuwOmxc_0 truck i-8W-K4y3nY_0 train i_HHc85mP4Q_0 train i_h0vOCrd_U_0 airplane i_h0vOCrd_U_1 airplane i_iXTMX4Vls_0 cat i_nZ8ImBf18_1 bicycle i_nwFUP7QJM_0 knife i_4c71HPXOI_0 giraffe i_-PIEIGkQE_0 horse i_-PIEIGkQE_1 horse jAH-80rHWKY_3 bear jAW8iLGAgdQ_1 bear jAW8iLGAgdQ_0 bear jAh4oBD0Bsw_0 train jAnV_6fFGnI_0 cow jAy3VhkJauE_2 knife jAy3VhkJauE_5 knife jA6aZl1f4Wg_0 bicycle jBMmFLPc7nA_6 bus jBMmFLPc7nA_0 bus jBMmFLPc7nA_3 bus jBMmFLPc7nA_5 bus jBTJgbVspOA_0 airplane jBl50J7bOEw_1 airplane jB1IT1aBj-Y_0 dog jCDFU72N7Mc_1 skateboard jCJGjjNBSk8_1 airplane jCJGjjNBSk8_0 airplane jCMWNtCzuqU_0 knife jCUnLxCoYMA_0 motorcycle jCY67ybfyqU_1 cow jCZx5dn_4KA_0 bear jCcW1MW6PTE_0 truck jCcW1MW6PTE_1 truck jCiwgfC1uN0_0 dog jCtFgJ1qhJE_0 bird jC5Px208OVY_4 horse jC5Px208OVY_5 horse jC5YGckTiIU_2 train jDFqxB4rC7M_0 cat jDJNC5fzvfA_1 motorcycle jDYks7hSKbg_0 truck jDbHjQZ5R70_0 airplane jDbHjQZ5R70_1 airplane jDdFavN2eWY_0 dog jDgpggXdBIc_1 motorcycle jDgpggXdBIc_2 motorcycle jD2RjyxG6ow_0 motorcycle jD4621IQz3w_0 dog jD4621IQz3w_1 dog jEASZOuNSS0_3 skateboard jEASZOuNSS0_0 skateboard jEASZOuNSS0_2 skateboard jEEOkCjU9y0_0 bear jEJZ76_xhog_2 bear jEQDhb_Zewo_0 cat jEYG-qIv34o_1 cat jEYG-qIv34o_0 cat jEfwj-JzFXo_0 person jE1Rq_Ot02M_0 dog jFAm4tikj6E_0 horse jFSIX_KuRK8_0 horse K5p31PQkx3I_1 horse K5q4FoXnLwI_0 train K5sQWplX-D8_1 skateboard K5sQWplX-D8_2 skateboard K6JHTga6VU8_0 airplane K6SFafS3Zv8_0 car K6SFafS3Zv8_2 car K6jf51to7dU_0 horse K6jf51to7dU_1 horse K6sKjN_MOsE_1 bear K6srgkSvZdw_1 skateboard K6srgkSvZdw_2 skateboard K6vEY0vOlSg_1 train K66dqG9OJuo_1 dog K66dqG9OJuo_0 dog K6_WEh-eizw_1 airplane K6_WEh-eizw_2 airplane K6_WEh-eizw_4 airplane K7uSHqISah0_0 train K702Tx5vkp4_0 horse K78iEUHTTZc_1 cat K8aa-7brUTs_0 bear K8vGdEhh_jU_0 bicycle K81vEhukX4U_0 motorcycle K9LhqtvfZ10_0 dog K9LhqtvfZ10_3 dog K9LhqtvfZ10_4 dog K9LhqtvfZ10_5 dog K9TPOifKCmU_0 motorcycle K9hTkmr_71A_2 car K9jCx7G3_Mw_0 knife K9kNamc2c5Y_1 dog K9kNamc2c5Y_0 dog K9wE7VzJD00_0 train K-Dz6gr96Lo_0 dog K-s8RPMLRw4_0 bird K-s8RPMLRw4_2 bird K-x3x3kGGqg_0 dog K_PGa9Eo6mo_1 dog K_VS3tyB-Cc_0 person K_Z28TO4stg_0 bird K_h1L3P_j1M_0 bird K_pO-MBS7lI_0 dog K_qFWKniImU_0 skateboard LAKF499FHX0_0 train LAKF499FHX0_4 train LAKF499FHX0_1 train LAKF499FHX0_2 train LAKF499FHX0_3 train LARRHwtW8fE_1 dog LAZoyKF7lbQ_0 truck LAZoyKF7lbQ_2 truck LAZoyKF7lbQ_3 truck LBJEbJfzvW4_1 skateboard LBOXDMZvtBY_1 train LBnsLkuQ8kE_0 person LBwm49n5rKo_0 motorcycle LB6fi4oTKvQ_2 dog LB8Wc8hU4Hc_0 airplane LCGZmNGyPhM_0 boat LCghaNtVeM0_1 knife LCjQb5zLTCs_0 train LCoIwiCBlW4_0 dog LCxiwbrpEFI_2 bus LC5Qly11BZs_0 train LC5q2G2pxT0_0 bus LDEju5sQWOU_1 bear LDH_eiO0aFE_0 boat LDJ9xB-n5Sg_0 dog LDJ9xB-n5Sg_1 dog LDQiOOCMhs4_0 truck LDQqhsLKyjs_0 train LDYFndJjRGA_0 skateboard LDgpZlJ_QYM_0 boat LDh-8GoBSLw_0 bear LDlR_gDbVFk_0 airplane LDvN2rB8p44_0 train LD-8yzPoOIQ_0 car LD-8yzPoOIQ_1 car LD-8yzPoOIQ_2 car LEH61oMv2So_1 train LEIkLV_S5yA_0 cat LEP6ZOl5iw0_0 horse LEUCQjNIm9E_0 knife LEYBNQUwruU_0 dog LEiolk6i9RI_0 horse LEmU61Tdqxs_1 motorcycle LEverFsHygc_1 airplane LE2ks85I17U_0 bird LFDqskJozig_1 skateboard LFMUePhHPAk_1 car LFZYYpjP3FA_0 knife LF4xVBfV5SI_1 bird LGRkVRP-RTs_0 car LGgzD_ng3aA_1 bear LGrMlBi0l6Y_1 boat LGuSLUeKcTo_0 bird LG0w1oTdXgY_0 bird LHEuYW96FG0_0 bear LHEuYW96FG0_4 bear LHbVe_bjGp0_2 dog LHbVe_bjGp0_0 dog LHbVe_bjGp0_1 dog LHmvAqv6kYE_0 zebra jFneoJr36o8_0 car jGCw13fkf0Q_2 motorcycle jGPtq4pO8Ug_0 car jGTNsTUkNUw_0 cat jGTr1LSaGGw_1 bicycle jGTr1LSaGGw_2 bicycle jGTr1LSaGGw_0 bicycle jGlNsqDOz8Y_0 horse jGqRX9IwGI0_8 bear jHK3JYa_Ypg_0 umbrella jHM867g1K8k_1 horse jHM867g1K8k_0 person jHy5deaCjQE_0 dog jH_YxkU_JwE_0 motorcycle jINuUqU6sJI_0 dog jIP9FdmB0_E_0 train jIbmC5sed8I_1 airplane jIjEX8I5SHo_1 bird jIjEX8I5SHo_2 bird jInMbuzvtiQ_0 umbrella jInMbuzvtiQ_1 umbrella jI0xgoZ8QDA_0 boat jI1Swlwj_wc_0 horse jJMefDe4r9w_1 skateboard jJR-emvmi9s_0 bear jJR-emvmi9s_1 bear jJf_N_p-Gjo_1 skateboard jJnz3tS1uME_0 motorcycle jKBU4c1AdSQ_0 cat jKv6Q1RRxVM_1 boat jLBSOa5iDgE_0 horse jLR7LmbNekc_0 motorcycle jLXuZdAveV0_2 boat jLXuZdAveV0_0 boat jMNaKigE1eI_0 truck jMNaKigE1eI_1 truck jMVeJ3RbcH4_0 car jMaYIgpjxlk_0 dog jMmjaxXWaUk_1 bus jMo01X2mBq0_0 bus jM79QETqts8_1 horse jNCq29f3J8Y_0 airplane jNE_FcqbQN8_0 motorcycle jNJJgAg79KA_1 airplane jNJJgAg79KA_0 airplane jNKO9msLe34_1 airplane jNKO9msLe34_0 airplane jNSTcIQwl_g_3 train jNSTcIQwl_g_1 train jNSTcIQwl_g_2 train jNllRQ66Re4_3 dog jNn7v2MFg_U_0 truck jNsEePln1_U_0 bird jNsEePln1_U_1 bird jNt8Vn-WKRI_1 horse jN-BXoM15Qs_0 cat jOQ0W0Z_-Uo_0 dog jOl4m5QdOZQ_0 bus jPaVdR2IRu8_0 airplane jPiVFMGvHbM_0 train jPiVFMGvHbM_1 train jPrY_Xz0CDM_0 knife jP5RhcwO4E4_1 dog jP7mwBStU3w_0 dog jQBc1CqjGOk_0 skateboard jQCrA8Bjbp8_0 bird jQXYSlXk7_c_3 bear jQXYSlXk7_c_1 bear jRIy_wUojcs_0 car jRR6sU59uTo_0 airplane jRTkny0bdY0_2 motorcycle jRTkny0bdY0_1 motorcycle jRh5WphQGDI_0 horse jRqdnQ8HlwQ_0 airplane jR7eq8CAmbs_0 airplane jR-Cbp3qBJI_2 horse jR-Cbp3qBJI_0 horse jSS6b2iz2hk_0 knife jSk-3X-hjyg_1 knife jStwl7WfsVE_0 skateboard jTAz5HO8mQw_0 cat jTHDoLyfTLc_0 dog jTQ5A95TKw8_0 cat jTYsK4JKns8_0 giraffe jT1mDaHStHU_0 train jUDnkkvVKNo_0 airplane LIw68irBLtE_3 airplane LIzgqx7Ykxw_0 airplane LI286rLHd0I_0 bird LJGQA810BtE_0 bus LJJuw5mLJ4Q_0 skateboard LJhCGLht3Rw_0 train LJhCGLht3Rw_1 train LKe9a7L3vkk_0 bird LKhjmARDv7k_4 bear LKhjmARDv7k_6 bear LKoaXogFTbc_0 dog LKyQ2fBNVmw_3 skateboard LK2-EMocZQs_6 dog LK2-EMocZQs_1 dog LK2-EMocZQs_3 dog LK9zoUrrEHc_0 skateboard LLJiqe0d06I_0 train LLOwSRx9hxo_0 bird LLVr7tG42kw_0 motorcycle LLW1jx3S-Hw_0 train LLjDNseEw0c_0 skateboard LL_DiAJ71rc_0 bird LMGo4BXG4Yw_8 knife LMRH29tlDrM_0 cat LMrDuKEYJ3k_0 truck LM1djNtENzA_0 cat LNQHybwdHRk_0 airplane LNX244qUx7M_0 dog LNntRLW2bHA_3 skateboard LNntRLW2bHA_0 skateboard LNntRLW2bHA_2 skateboard LN6DT1DOaTg_5 skateboard LOBD9yc5YPM_1 skateboard LOMTlGqGyHc_0 motorcycle LOjc-npcSjs_0 airplane LOjc-npcSjs_2 airplane LOjc-npcSjs_4 airplane LOjc-npcSjs_9 airplane LOlUKQgr7Qg_0 boat LOosqz3z8Xw_0 train LOzh9vxSHPg_0 dog LPQv6LdOZHo_2 motorcycle LPQv6LdOZHo_1 motorcycle LPZjxIqs8Uw_2 airplane LPd_Y8gk5uI_1 train LPgmaebC-L0_2 boat LPtcpZXDhHw_0 knife LPvsAAlZI_8_1 bus LP3a2L1ZCyg_2 dog LP8dyCxmCrI_2 train LQAF34GzpMY_0 airplane LQO68Aj4ons_0 car LQRuelaTZd4_0 bear LQRuelaTZd4_1 bear LQT4GnnPhA8_1 dog LQbQVeZrwEk_0 motorcycle LQdP4gNX9Aw_0 bird LQjzonTrY2o_0 bear LQr5vK-X1fQ_0 cat LQ2EDJSNIN0_1 dog LQ2EDJSNIN0_3 dog LQ4z96EA6co_2 bird LRSii99-QIo_1 zebra LRgsl5_TJVg_2 skateboard LRgsl5_TJVg_0 skateboard LRgsl5_TJVg_1 skateboard LRtLr32oPAw_0 skateboard LR7IHIbXtrE_0 bird LSE0KHhFxps_0 train LSMKaXjXnhE_1 boat LSi1i5lSUjA_0 dog LSqIpguEI04_0 motorcycle LSqIpguEI04_1 motorcycle LSvVMD-SF48_1 bus LS8qQoB3Uw8_0 dog LS8qQoB3Uw8_1 dog LTEyQSswTVI_0 bus LTQPc_WVFOw_0 airplane LTQPc_WVFOw_1 airplane LTQPc_WVFOw_2 airplane LTQPc_WVFOw_3 airplane LTaExiLK2S0_2 bear LTaExiLK2S0_3 bear LTaExiLK2S0_4 bear LTaExiLK2S0_6 bear LTaExiLK2S0_7 bear LTjSA_-Q5DU_1 knife LTkuM5IoNV4_0 motorcycle LUCDeZOOhlg_0 cat LUUYKUhaYZs_0 bus LUjqWGI9KSo_2 truck LUphe242a5g_0 train LU4-QjhixQU_0 motorcycle LU4-QjhixQU_1 motorcycle LU__7PPUMTo_0 skateboard LVCMA3LXlkc_0 airplane LVfXvn7elFI_0 person LVfrWLnu7T8_0 train LWHshdXjBCY_0 truck LWQhidgjZno_0 motorcycle LWRXboX1o5Y_0 motorcycle LWTYrbFCPl0_0 dog LWY9Y2YVtHA_1 truck jUQUg-qsfgI_0 motorcycle jUWm1Mc1Tno_0 airplane jVEM2JpS4sE_0 truck jVZhyibQ31g_0 cat jV9-Lr_rsf0_0 bicycle jWCpff7m0LE_1 airplane jWCpff7m0LE_8 airplane jWCpff7m0LE_0 airplane jWCpff7m0LE_2 airplane jWCpff7m0LE_10 airplane jWGulD3X0qw_0 car jWIFscsXRmo_0 skateboard jWLv1BQ4PsA_0 bear jWawsbm6dCc_0 bear jWfItNlOURk_0 motorcycle jWfItNlOURk_1 motorcycle jWruD-mHxrQ_0 cat jW4VRs_uVZw_2 airplane jW4VRs_uVZw_5 airplane jW4VRs_uVZw_0 airplane jW4VRs_uVZw_4 airplane jXBBnV6cop0_0 car jXDxesHRKAc_0 umbrella jXLUgu4rET0_1 cat jXkzrsfYgbs_0 dog jX84bwkb-r0_3 bus jYBgSw-woGw_2 bear jYIWAGlIq9c_0 skateboard jYZmjlzKhL8_1 skateboard jYhAd9FFxqI_0 umbrella jY37CiJCKJk_0 cat jY9ihstGQwU_0 cat jZWITYFghgA_0 cat jZZBR49_vR0_0 motorcycle jZiuOZwq7gQ_0 motorcycle jaS19NIXdrk_0 motorcycle jaVgyhuxK_4_3 skateboard jaVgyhuxK_4_0 skateboard jalIqFA40pI_1 motorcycle jalIqFA40pI_2 motorcycle jaoXgM9c7u4_1 car jaovVHNORuA_0 cat jauLT1ElBPc_1 train jauLT1ElBPc_2 train jbN4y-wz5-s_13 giraffe jbN4y-wz5-s_1 giraffe jbN4y-wz5-s_4 giraffe jbN4y-wz5-s_5 giraffe jbN4y-wz5-s_11 giraffe jbhxM5eNgO0_0 train jboQE0Z0280_0 truck jbrhKjPDzhE_1 train jbwSKNFH66s_0 dog jb23jXcxaHE_1 train jb23jXcxaHE_2 train jb23jXcxaHE_8 train jb23jXcxaHE_9 train jb3uct7NumU_0 train jb4crk58m88_0 skateboard jb4672rSRIs_0 dog jcLbvoEUbj0_0 airplane jc2fijpD8vI_0 bicycle jc-IKl7He7U_0 knife jduOxfYHRGQ_0 person jeBcjSSkUhw_0 cat jeFFdyPLUts_1 boat jeWf_4ARan0_1 bicycle je8cw_bajbc_1 cat jfENtrpYNKE_2 bear jfENtrpYNKE_1 bear jfixAXjax5I_1 motorcycle jfixAXjax5I_2 motorcycle jfixAXjax5I_0 person jgAt3qPg7A8_2 truck jgD77Vh-X28_0 motorcycle jgGLyRuFOdk_0 bus jglg4qcOpWw_0 skateboard jg7I2TXyQ2Y_2 bus jhQ4iIJ42Yw_0 cat jhSH0EjNy0k_0 car jhjKdc7FtE0_5 airplane jiAVTB1keAQ_0 bicycle jiCp6fAMISg_0 cat jiJWjndM8hI_0 knife jjDZnXMMhEA_0 train jjKsYbTw1qk_0 truck jjNxX05CDNc_0 bird LWv0LbGIDi8_0 car LWxkJ4fux_I_0 knife LWy-Lhb3YEk_0 bear LWy-Lhb3YEk_1 bear LW3bZPt1qrw_5 boat LW7XQWZjBIw_0 dog LXLI-Bzcsf4_2 knife LXLmpEVYE5E_0 train LXgItdZ5DXo_0 airplane LYLuXQRCIJ4_0 car LYXMPTRr40M_0 dog LYXMPTRr40M_2 dog LYmsSNBP634_0 knife LY-hwswMG4g_0 cat LZJjKCpcAWA_1 knife LZ_qufxYP3I_0 cat LaA51BrvHGw_1 truck LaA51BrvHGw_2 truck Lam8oTdJids_0 car LanX2twvMmw_1 airplane LanX2twvMmw_0 airplane Lan3os3aUl8_0 boat LbC7nqh0Uyg_2 train LbEPmGgzUIE_0 truck LbvEMq_DQTU_1 train Lbv8FZelQCM_0 truck LcD_I0Lkw3k_0 train LcD_I0Lkw3k_2 train LceJwFxs3q8_0 dog LdEeXsYfzE0_0 car LdLtHx09mII_0 skateboard LdL-cFGaJqU_0 bird LdRX8-r4Cpc_0 car LdggIc_gAew_0 motorcycle LeAl87F6eS0_2 umbrella LeOCD9rZsSI_0 bird LeX-zqgzN3k_1 bird LeljDmw2CGU_0 skateboard LfAbAKrmMq0_6 giraffe LfAbAKrmMq0_7 giraffe LfAbAKrmMq0_1 giraffe LfatUu2cH3Y_0 car LfbQRAjsucU_0 cat Lf5ebV_NH78_0 train LgVi03EiPlQ_2 train LgVi03EiPlQ_0 train LgZrI3dxws4_0 motorcycle LgrPr2OxWcw_0 giraffe Lgyj-vOk72M_0 umbrella LhdXtQ8SbGE_1 bird LhgyObbNmLI_0 bus LhhzzaKmVO4_2 motorcycle Lhm6JF_1lQg_1 train LhnNboAgtNg_0 cat LhtrfEijGHU_0 airplane LiMriWExmQM_0 boat LiZxvVZfUdU_2 umbrella LiwliE18fA4_0 motorcycle LiwliE18fA4_1 motorcycle LiwliE18fA4_2 motorcycle Lizh5Kae5Nk_2 knife Lizh5Kae5Nk_4 knife LiznFL6_r2A_0 motorcycle LjLWamF9HyA_0 giraffe LjjGe9bnQ3Q_0 train Lj0zBxRWoIU_0 skateboard LkFbAjpWRAw_1 giraffe LkFlT3d8MuQ_0 airplane LkmioXgRyo4_0 cat Lk7Z-AUDCuQ_0 cat LlA5ioDqRns_2 bus LlA5ioDqRns_1 bus LlNCPsiSjOU_0 airplane LlS3_VvB4Nw_0 truck LlfRY71K2AU_0 truck LliRBHO1A_E_0 train LlplZ9JJtQw_0 dog LlplZ9JJtQw_2 dog LmFx-lJ6-_M_1 truck LmR0Ur4owgw_0 bicycle LmT8BFH5c7k_0 umbrella LmYKmKucl28_0 truck Lm4mghtFu-I_0 train Lm5GStt7KBw_0 truck Lm5GStt7KBw_1 truck LnGeYd1AsoA_1 bicycle LnKLql5jAXo_0 train LnLlD-mNTtE_0 bear LnPyjqgA37I_0 giraffe LndUw9o_3ME_0 skateboard LnhmeU6oRBE_0 bus Lntuuj_mi9c_3 knife LnyfbZ7-fP4_1 umbrella LnyfbZ7-fP4_0 umbrella LnyfbZ7-fP4_2 umbrella LnyfbZ7-fP4_3 umbrella Ln_tNsQVuwc_0 dog LomkA_DJyEM_1 bird Lo2GqBe8-Qc_0 bus Lo8Q0MdVi9A_1 bear Lo8ZEKusM1o_0 dog LpXfY3oQDIc_0 skateboard LpXfY3oQDIc_1 skateboard LpnkxmohHZ8_1 airplane Lpt6bE36Uuw_0 train Lpt8i9V2MK0_1 train Lp88aaB29zE_0 zebra LqOv_DqIWEk_0 boat Lqf8Q1pPNFg_1 knife LrIVNsObdso_0 bird LrKKU5rjq38_2 zebra Lr-9DI7T7JE_0 bird Lr-9DI7T7JE_6 bird LsdHOclMPh4_0 dog LshP_zqoBc0_0 knife LsuQhEjteSE_0 dog LtGXT385l_I_1 dog LtabCE1oaCw_0 bird Ltt24ke9SIA_0 bicycle LtyHCo5uPrQ_0 umbrella LuA9aRIic7s_1 bird LuM1ie5yy70_1 umbrella LuM1ie5yy70_3 umbrella LuQiLJ7-B-8_0 cat LuQxQm7FqD0_0 cat Lua1id9drCA_1 giraffe Luv05fYUS1Y_0 skateboard Lu6WLASNWIM_0 truck Lu6rn2EQSEM_0 motorcycle Lu6rn2EQSEM_2 motorcycle LvPDEznT9Yo_1 bird LvgprOdn070_2 truck LvhxnDPWfXw_0 knife Lvv3Ei45X_4_1 knife Lvz3fP96sew_0 dog Lv7JaIYWXV4_1 dog Lv8u2aPVHmc_2 bird LwChAirlUno_0 skateboard LwMepJ25LgQ_0 bear LwPB4qPCelk_2 car LwPB4qPCelk_0 car LwgyjrFlc5M_0 bicycle LwiTfwL3bCs_0 car LxAhZAbzn7k_2 bird LxjlAGLccRw_0 motorcycle Lxlu3NusDCM_0 bicycle Lx0IybSITTc_0 boat Lx25sZ_GeqA_0 motorcycle LyOo_B0KLAs_0 car LyReFCR-oq8_1 bicycle LyReFCR-oq8_0 bicycle LyiT3ute8W0_0 bird LyiT3ute8W0_1 bird LyiT3ute8W0_3 bird LyiT3ute8W0_4 bird LyiT3ute8W0_5 bird Ly-uIzZCdn0_1 bus LzMxggGTH1I_0 motorcycle LzP0t153jKw_0 skateboard LzY_TxIbKpw_0 train Lzk6uj8FMsE_0 cat Lzp-Yej0-7E_1 bird LztNNlg_fXs_0 knife Lz0Gxxs0FUE_2 bus L0IXFlnu6Qg_0 motorcycle L0US3Aiu1q0_0 truck L0kRKO8zzsI_0 bird L0kRKO8zzsI_3 bird L0kRKO8zzsI_1 bird L1EZ_RVwD8E_0 cat L1LQOPj7NBs_0 truck L1U2YrjRao0_0 bear L1VgJBGpBz8_0 bird L1iiOGDSByA_0 motorcycle L19ZzBwAHrU_0 knife L1_86Xd176w_3 knife L2Efv5kJpc0_0 skateboard L2FE5Lr0wnY_3 bicycle L2FE5Lr0wnY_4 bicycle jjZl3tMuO6w_0 dog jjcoVigCzgg_0 skateboard jjk9P9gQq3E_0 bus jj-p0K2XoQY_0 boat jj_pv9SFrnU_1 umbrella jj_pv9SFrnU_0 umbrella jkGvuOC8azU_0 motorcycle jkGvuOC8azU_1 motorcycle jkKU7T0wpj4_0 bus jkdEq1MRNws_0 cat jkkk9vsCYVA_0 car jkqKyvow-ww_1 skateboard jkqKyvow-ww_0 skateboard jk2gGx6dIWA_0 train jlA3_oF9j-Q_0 motorcycle jluiJgeyCa4_0 truck jluiJgeyCa4_1 truck jlu4Ry8dDus_0 cat jmXmA9egY4s_0 bird jmXmA9egY4s_1 bird jmeVwD4p83w_0 umbrella jm8AZ0aSF0U_0 motorcycle jnD_9KMnzpk_2 skateboard jnD_9KMnzpk_1 skateboard jnQYikiCbAM_0 bicycle jnQgVTaiaXk_0 train jnSm3vCtu1k_0 dog jnu28BEM2j0_0 bird jnwQHd-sNW0_0 cat jous_VGiSK0_0 bicycle joxEhiwL-qg_1 skateboard jpBcdceCHgY_0 skateboard jpCdMdRzmuY_0 cat jpuFdyVJJwQ_0 motorcycle jpuFdyVJJwQ_1 motorcycle jpyidnScqNQ_0 umbrella jpzKefnhMA4_0 train jqHtlrHk5Cw_0 dog jqO4FvS_v54_0 boat jqRXcc7rPaY_0 cat jqWXHWqSVX8_0 train jqu6Gjc1hCE_0 person jq9ZPuTO7Rc_0 umbrella jrAyEPgy1LM_1 truck jrLRiCFtlvY_0 skateboard jrNGiQLJ0ug_1 train jrg8oKSN6bk_1 bird jrg8oKSN6bk_0 bird jsJprPZCPvA_0 boat jskm6kDOao0_0 cat jslKL8yQ7v4_0 bird jslKL8yQ7v4_1 bird jsp_sWu7g7Q_1 bear jsx0cE948y8_2 train jtQGgQPHofk_0 boat jtWerSK0atA_0 umbrella jtqUFmuGnVs_0 person jtx5yVxuLzA_0 bicycle jtx5yVxuLzA_2 bicycle juC5lVOX-R8_0 bear juC5lVOX-R8_1 bear juMoEfLbbI4_11 bicycle juUIMSiDGm0_0 umbrella juownJlkGfA_0 train ju08Y0j4rAI_1 car jvKKm9UbcbE_0 cat jvKqk7Yfq5Q_0 truck jvdYM-W5Kmo_2 bear jvxjOOQa_JQ_3 truck jwxSjxJVyOc_0 dog jxIyftPYPsc_0 cat jxIyftPYPsc_1 cat jxlDJ0D2Tec_0 bicycle jxn5iX8buaE_0 truck L2XOsdnKegA_0 dog L2bV5Mh6tLM_0 dog L2e6nVyZ33k_0 car L2gSKheIL48_0 dog L2zsyBTtcqE_0 bird L21bM4j4bEc_0 motorcycle L21bM4j4bEc_4 motorcycle L21sWlIIkHA_1 skateboard L28I6_ASmq0_0 motorcycle L3F2ir5MPj4_3 skateboard L3Q42kZ8Ap8_0 bus L3oyk4iYySM_0 boat L3urWJiuom8_0 bear L32hlxmCYZU_3 bicycle L32hlxmCYZU_6 bicycle L32hlxmCYZU_7 bicycle L32hlxmCYZU_14 bicycle L4NZ3vAx87A_0 boat L4kK9gTKA3Q_2 bear L4w-P2UsvBE_0 bird L5VC4bXm6Kc_0 dog L508o9A8028_0 bicycle L52ZiKJ5NLM_0 truck L5499EWzDaQ_0 motorcycle L6QaXTuDftA_0 bird L6vLixMpRZg_1 dog L6vLixMpRZg_0 person L63p00d7BPY_0 car L7TR8yCVhN0_0 cat L7ZTQMPeHYo_1 knife L7iHAg6bHw4_0 bicycle L7rQQ4IVPrU_1 skateboard L70Zv9DFAhc_0 skateboard L71JgB-L1mA_0 motorcycle L779-Nw9GV4_0 cat L780lAoEC2M_0 giraffe L780lAoEC2M_1 giraffe L8H_7qqaEOM_1 motorcycle L8SF7xF6Ucs_8 bird L8h9dw2kYRA_2 knife L9EAUBlNvLU_1 truck L9LWOPIuvcE_0 train L9L-OlYNdL0_6 knife L9Tx4-RNDqo_2 motorcycle L9Tx4-RNDqo_3 motorcycle L9Tx4-RNDqo_1 motorcycle L9Vt1klujtA_0 dog L90g72YGdVA_0 cat L97eqv7bBCE_0 dog L985IUAQ8u8_1 skateboard L-S4CNhlvlM_0 cat L-w35NTF7vA_0 car L-0JgkugTvw_0 giraffe L_AcMGC96O8_0 motorcycle L_ZdaWupJcU_1 boat L_xPWB4viT8_1 dog L_xPWB4viT8_0 dog MAJonEdmXNA_0 truck MAVqUxAjlbg_0 skateboard MBAPF4RVq7E_0 car MBLHIupmPNk_2 truck MBLHIupmPNk_5 truck MBl4bkFRZUY_2 truck MBl4bkFRZUY_0 truck MBuwlS32gjE_0 dog MC8Lal5Lp5Y_0 cat MC-KkFD07Ts_0 dog MDxAuy6D1ks_0 skateboard MD5P0EFFnUQ_1 skateboard MD8RTKTEaM0_1 motorcycle MEi_ikuUJoQ_0 skateboard ME0CETCuaK0_0 boat jyY5W5HiWUQ_1 cat jyeqCulSuVM_0 truck jy_Dr_R-svo_1 umbrella jy_Dr_R-svo_3 umbrella jzRWRRcWffo_0 skateboard j0BXwDs11NY_0 train j0OALCZbAJQ_0 bus j0ii12pbeag_0 knife j0yk2O6HAHA_0 bird j0_9iwi_dm8_0 dog j1CQLHBLwew_0 car j1NePJe1agU_0 bird j1XwtnPy1Ik_1 bear j1rU13Z_fxc_0 bicycle j1utZs4pDTc_0 bicycle j10ev-4-0Fg_0 motorcycle j11_jPnp4Pc_0 cat j2-VEpDwbyo_0 dog j3X6elDpZ-Q_0 bicycle j4K9kM9p16o_1 bear j4Qv6RH4lPk_1 bird j4U8EcQ8K34_0 umbrella j4daTphUuBw_0 cat j4mpJ3QE8VU_1 cat j4ofs57G2Uk_0 skateboard j4rMKhohDps_0 bicycle j4zZbJTAcC4_0 train j4zZbJTAcC4_1 train j5EP2UNErRE_0 dog j5Evt1HJ2ck_0 skateboard j5ayq3AbImg_2 bird j5uxE5IUOhk_0 dog j6GdrMPrcNU_0 train j6P1j6Ed1Hg_0 boat j6Ybo1yk-lE_0 motorcycle j7v1htyJtdo_1 boat j7v1htyJtdo_2 boat j7xvqf1mrUo_2 bird j707fRdtbEE_0 train j8jip_gthjs_0 train j8s5sMFYoiM_3 train j8s5sMFYoiM_1 train j82ZCaABxl8_0 truck j8-maioFCxo_2 boat j924hdZilyY_0 cat j-MwElKg8Tw_0 cat j-VN0PFvkDg_0 train j-a26pZGsKA_5 bicycle j-r3lQdwYeI_0 boat j-r3lQdwYeI_3 boat j-x8lbwsObQ_0 motorcycle j-0kVn7sEvQ_0 motorcycle j-0-IDS-OD4_1 truck j_DE_vsqSZg_0 motorcycle j_D7oxUpZqs_0 bicycle j_D7oxUpZqs_1 bicycle j_FCzH1rLDw_0 train kABwo7h7ILg_18 bicycle kABwo7h7ILg_13 bicycle kANh1n3sh5M_0 giraffe kANh1n3sh5M_3 giraffe kAekmn2pgpc_0 skateboard kAekmn2pgpc_1 skateboard kAhVhIYl-GE_0 motorcycle kAhVhIYl-GE_1 motorcycle MFw-_3fTBzA_0 bicycle MF06s9T8iJA_0 skateboard MF06s9T8iJA_1 skateboard MGFx6Irt70E_0 knife MGMJ6ocyKXQ_2 boat MGQw41RhBfc_0 motorcycle MG9MouhNLjY_1 knife MG96iokcNoY_0 car MG96iokcNoY_1 car MHIEOK-O3Q4_1 bird MHT9BbNzNJo_0 knife MHqZCkvaub8_1 car MHsxwUMk-_s_8 umbrella MIHg2KAYh5c_0 train MIHg2KAYh5c_3 train MIHg2KAYh5c_1 train MIKCpSFDh4M_0 bear MIKCpSFDh4M_1 bear MIKCpSFDh4M_2 bear MIKCpSFDh4M_3 bear MInom2mFpwg_0 skateboard MI2d7Rd8_Zs_9 bicycle MI2d7Rd8_Zs_10 bicycle MI2d7Rd8_Zs_2 bicycle MI2d7Rd8_Zs_4 bicycle MI2d7Rd8_Zs_5 bicycle MJOztUhgARo_1 bear MJvPtT5tzRI_0 motorcycle MJ3I-JfOG48_0 train MJ6b6iOY7CI_0 car MK2aqzY-UTQ_0 cat MLXY5iff2rU_0 truck MLZ5bpXr5fk_0 bicycle MLrWgAcIumk_3 knife MLrWgAcIumk_1 knife MLtRUMzqhDk_1 dog MLwCW5HBfWQ_0 bicycle MLwCW5HBfWQ_1 bicycle MLyrsP65yc8_0 cat MMGw177uo60_8 bicycle MMGw177uo60_11 bicycle MMGw177uo60_0 bicycle MMGw177uo60_1 bicycle MMGw177uo60_2 bicycle MMGw177uo60_4 bicycle MMGw177uo60_6 bicycle MMX4my6X-xg_0 car MMfLN7_khoc_0 skateboard MMwk9bxedYo_1 bird MMxfwNbWaxc_0 bus MMxfwNbWaxc_1 bus MMzNcR3qtX0_0 knife MM9D2A52FM4_0 cat MNBfv2S-yco_0 dog MNDWyaUDfAM_0 truck MNKwR4IK04k_0 bus MNnYExmY67E_0 bus MNnYExmY67E_3 bus MNuhuq3FP5Q_0 motorcycle MNuhuq3FP5Q_1 motorcycle MNuhuq3FP5Q_2 motorcycle MORtJq8MelU_2 dog MORtJq8MelU_3 dog MORtJq8MelU_0 dog MORtJq8MelU_1 dog MOR6ErlJIp8_0 giraffe MOcTGHSkER0_0 car MOgN13g3SzU_1 motorcycle MOxIwc0MqZ0_1 car MO5aNU1mc1s_2 boat MPQqmw9gvF0_0 dog MP8ETGMyhnU_0 dog MQAJWDp31ag_0 cat MQimJolkMRI_0 cat MQ5mTW70Ebs_1 train MRzphcX41T8_0 umbrella MSWR-YqRwqk_0 cat MSjYJFNM2HU_0 boat MSjYJFNM2HU_3 boat MSonF1662RI_3 skateboard MSp3-aHmNP4_1 truck MSp3-aHmNP4_2 truck MSvmSEk-UJ0_0 bicycle MSxdHgV7e6o_0 car MS7Emoy0Foc_1 boat MTDl42dubw8_0 bear MTr54KYSQBw_0 person MTvLNcYmHhQ_0 car MT-VkX2ZUYs_1 bear MT-VkX2ZUYs_2 bear MT_GWiXfC2k_0 knife MUAuC-rgc9Q_0 dog MUPAcFVQjlE_0 zebra kAkZoxVhM3I_4 train kAkZoxVhM3I_1 train kAkZoxVhM3I_2 train kAkZoxVhM3I_3 train kAmtMpdj5F8_0 dog kAsA28fm6YM_0 dog kBZZqBNk68M_0 cat kBg_1xTx4Dw_0 car kBsc-5sxeTw_1 knife kBsc-5sxeTw_3 knife kCWupS0PNHk_0 car kC0y-y4Y9zQ_0 knife kC4_7iM24Uw_0 truck kC7fdR62Lto_0 person kDU_m-Zhi-I_2 bicycle kDsGVRUxg9s_3 bicycle kDsGVRUxg9s_4 bicycle kDvYbh9_fvY_0 dog kDwVR3eWyA4_0 train kD0shq5M7Xw_1 skateboard kD_zeOiIsTM_0 train kEw-F2KrxLQ_0 train kE3cb1gtxpM_0 person kFihVzuPlGI_0 truck kF9uWuyPP8g_0 skateboard kGB7yQn8jpQ_0 bicycle kGkvBOa6Ao0_0 motorcycle kHCbADkGOsE_0 skateboard kHEfe-TDtS0_0 motorcycle kHkZCi873e4_1 motorcycle kH2Vmad_zzc_0 train kH9YVTvwmpM_0 bicycle kIGuIdHDwIw_0 truck kIasEX-cJb8_0 cat kIqavvGxvh0_0 bird kIyZZm3zk5M_0 train kIyZZm3zk5M_1 train kIyZZm3zk5M_2 train kI14RuB6ab4_1 boat kI9E5m5l4Uo_2 bird kJFQOFR0l0w_0 motorcycle kJJuX1cGFYg_0 truck kJJuX1cGFYg_3 truck kJR59i4f5HA_0 train kJR59i4f5HA_2 train kJR59i4f5HA_4 train kJR59i4f5HA_1 train kJUDpKKsNQ8_3 boat kJYZ-XE8ZEQ_0 cat kJuBcbws_zM_2 car kJuuymSuBLA_3 boat kJ2eEJ07dR8_0 cat kJ4rlYx4HDQ_0 motorcycle kKJAqMzsMHo_0 train kKOKJLrWCro_0 motorcycle kKSyjiL5foc_0 skateboard kKTvKA8cd-c_0 bird kKTvKA8cd-c_2 bird kKeaUBfwuG4_0 dog kKfiOXnjX0E_1 bird kKtawdL8xDU_0 umbrella kLL_YMFYoQw_1 car kLL_YMFYoQw_3 car kLgtAl-xGI0_0 bus kL3r_JUstGU_0 bus kL7sfsNuNVw_0 giraffe kL7sfsNuNVw_1 giraffe kL777xHctO4_0 truck kMMe5H6THlA_1 boat kMuQLvHlZM8_1 skateboard kMuQLvHlZM8_2 skateboard kM3Ml3gsG1g_0 boat kM3yM5qONQc_0 person kNNLDq_wPc4_0 dog kNQYLVUS5ag_1 train kNQYLVUS5ag_0 train kNTqRDpy6Jg_0 bicycle kNVh6uD0bMs_0 car kNlVF3ROFLs_0 dog kOOlwQ0DrQU_1 cat kOjjXFA4JLo_0 bicycle kOksVTxs6S0_0 truck kPEf41FB6w4_2 bear kPH88UubFMg_0 bird kPLn0enV644_0 motorcycle kPPya6oadAk_0 truck kPSuwjI94G8_1 bus kP4KkSrY81s_0 motorcycle kP4KkSrY81s_1 motorcycle kP7xV2Efw9c_0 car kQBqt_vvAUc_0 truck kQHn-cRLiDk_1 cat MVG65Om9g1k_0 cat MVG65Om9g1k_1 cat MVPQRjLFz6E_0 boat MVRf770zXL0_0 bus MVZinfPagDI_0 bicycle MVhsNNsDFWo_0 knife MVxJBHYueGI_0 boat MVxJBHYueGI_1 boat MV5174rsbEY_0 bus MV-CnX4Gf7A_0 truck MWGRoXhqRgQ_0 boat MW78cTfzq0c_0 cat MXGO41E37k0_1 train MXVOVBJlezc_1 train MXW5J8Fq8aw_0 bicycle MYW0loI0g8M_0 dog MZJtj9J3P2w_0 knife MZU8lpmJhxg_0 bus MZaYMDyaATI_5 skateboard MZaYMDyaATI_0 skateboard MZfxKiKSuFU_0 train MZfxKiKSuFU_1 train MZfxKiKSuFU_2 train MZr4cAj7j28_0 motorcycle MZtheeh470g_0 car MZxz9C8nBdA_0 bus MZ4A6ItKCn0_2 knife MaApAnpbJwE_0 motorcycle MaNGPVuxXqo_0 bicycle MaUrOzoC1qE_0 motorcycle MaV9LY8Yf7c_1 skateboard MaeWb_sv_KU_9 bus MaeWb_sv_KU_10 bus MaeWb_sv_KU_1 bus MaeWb_sv_KU_7 bus MaeWb_sv_KU_8 bus MalEpweFuSM_0 motorcycle MarA93dcZrA_0 train MbCJqlLjY_o_2 knife MbK94OERQUw_1 bicycle MbK-28LCQ1g_0 boat McV3_FGrKNw_1 boat MccB4r2uPG8_2 bus MctKaOAWQ2g_0 skateboard Mc_qufFsRZQ_0 train MdP8tqMgy-c_0 boat MdcfoMlgxyI_0 boat MdcfoMlgxyI_7 boat MdcfoMlgxyI_6 boat MeGIovLiBUs_0 cat MeNT1BqRoSk_0 skateboard MeR6T05EfeY_4 train MeR6T05EfeY_5 train MedPaDPXclw_0 train Me6y3gzfhGA_1 cat Me7wQZBbtkw_1 truck Me9X6zA_WSI_2 car Me9X6zA_WSI_3 car Me9X6zA_WSI_0 car Me9X6zA_WSI_1 car MfEA9RwWf8s_1 car MfKpwmhyptQ_6 knife MfQe_WreL6U_0 cat MfVLnZLXmvw_0 boat MfYYHsKxgn0_0 cat MfYYHsKxgn0_1 cat MfaYiIkR0D8_10 dog Mfe3mmOd7co_0 skateboard MflUSzEyPQA_0 dog Mf1njOx66R4_0 knife Mf1njOx66R4_1 knife MgR0ON5CM-E_1 dog MgR0ON5CM-E_0 dog Mg7Ve43Durw_0 zebra Mg9oRrgGKv0_0 skateboard MhFgGvNvIPU_1 motorcycle MhOdsv74XK4_0 bicycle MhPIl5JGvTQ_2 dog MhdkxaMWwb4_0 dog MhfYe7VajGQ_1 train MijD0ZqMorA_3 bear MijD0ZqMorA_4 bear MixmJ2mkl18_5 motorcycle kQhvp8FqRRI_0 motorcycle kQ0WAbN3uvE_2 bicycle kQ0qYUhkgXE_0 zebra kQ0qYUhkgXE_2 zebra kQ27FYyayCg_0 umbrella kQ9C8T343Bg_0 umbrella kQ97WPM3Qw4_0 skateboard kROqNf1kadg_0 bicycle kRWaghM9Bng_4 knife kRYejzNzz-k_0 bird kRYejzNzz-k_2 bird kRYejzNzz-k_5 bird kRtAJBnrb0o_0 cat kSnUCbQ4k4c_1 giraffe kSxPGqWydhQ_0 car kSxPGqWydhQ_1 car kTBAPJCn4AI_1 car kTNOY900Hbk_0 cat kTVuc-2UjPI_0 umbrella kTbS3XR-Xhc_7 bear kTdT3aGZVmo_0 train kTm1R3GaJzg_1 umbrella kTyJyGREDR8_0 boat kUX28ytNCwc_0 car kUcErGH2rjs_0 dog kU8IsLpAlXg_0 motorcycle kU8IsLpAlXg_1 motorcycle kVCic6S6ITo_0 knife kVmUxntjOEk_1 skateboard kVxw5-K9zZk_0 motorcycle kVyJVrTWLwo_0 cat kVzNGKIHA44_5 giraffe kVzNGKIHA44_2 giraffe kVzNGKIHA44_3 giraffe kVzNGKIHA44_4 giraffe kWHw0OdDAes_0 boat kWHw0OdDAes_1 boat kWo2PlJB2Nc_0 motorcycle kWxJX4oVzMo_3 train kXKTNNclCns_0 dog kXOYPLKJDdI_0 knife kXOYPLKJDdI_2 knife kXVHu_jzgek_0 knife kXj4YpwnHVs_0 car kXliGVQWoAE_0 motorcycle kXwzICrP2CA_1 dog kX-rqtb_n5w_0 boat kYAGyQOUOAw_5 train kYAGyQOUOAw_6 train kYAGyQOUOAw_9 train kYRvBDpWk_0_0 skateboard kYd1dxkZ7Q8_0 dog kYh89aM71_c_0 bicycle kYie2clM8Jg_0 motorcycle kYjiRbFWFuE_0 umbrella kYwzLhWdjYc_0 bird kY1mYWiL24M_2 train kY1mYWiL24M_11 train kY1mYWiL24M_0 train kY1mYWiL24M_1 train kY1mYWiL24M_3 train kY1mYWiL24M_4 train kY1mYWiL24M_5 train kY9lrTOcuxY_1 knife kZNZbhh6P3g_0 cat kZrG7mMww7I_0 truck kZrgKUm3pUs_0 boat kZ1L8FBg_P4_0 cat kZ3A6bY6RHo_0 motorcycle kaKhLfdT3z4_0 truck kaNpALWiNSQ_0 car kadq7fGv_zg_1 motorcycle kao854-T3zw_0 bear kaxFMN_9CfM_0 bear kaxFMN_9CfM_1 bear kazbC0JbsUY_1 boat kazbC0JbsUY_0 boat ka1HMN9Mxho_1 car ka8YGdEujsQ_0 motorcycle kbEenS2dRTc_0 cat kbF3h-YQ7m8_0 skateboard kbuWFd9Vthc_1 umbrella kb2LQHXd2zk_0 car kb-A8wbnvQg_0 bicycle kcBIvi6fhUo_1 bus kcTwHA-N1cg_0 bird kcip1032v3E_1 skateboard kco1LYK4z_w_0 person kdIBzH30zKA_0 dog kdIBzH30zKA_1 dog kdP5V_afg7E_0 skateboard kdRLqCUbWts_0 bird kdUrK5I-cNo_0 car kdU-XJEwZsQ_1 bird kd3DLyL1JMw_0 bicycle keGrBBWcGE4_1 bus keGrBBWcGE4_0 bus kePvCa53REA_0 giraffe kePvCa53REA_1 giraffe kea2UOTXlhs_0 cat kea4eM8Blz8_0 dog ketFGT3U5D0_0 bicycle kexKkPOprms_0 cat ke3yWKL94kE_0 skateboard Mi4HJYsPBPk_0 skateboard MjGAi_5coGY_0 bicycle MjGAi_5coGY_7 bicycle MjGAi_5coGY_5 bicycle MjGAi_5coGY_6 bicycle MjxkMQcgRss_1 car MkF-jfvzRJU_0 bus MkGLvilh-P4_2 dog MkIK8kdqU2I_0 motorcycle MkQzgwai9zk_0 zebra MkYtT0L4_3A_0 truck MktDGOflp1w_0 truck MktDGOflp1w_1 truck Mk82qF_xfzI_1 motorcycle Mk9tGnGNkkE_0 bird MlLHwysBUiY_0 knife MlVr20XSJMY_1 dog MmQIeOEPu9g_2 skateboard MmQIeOEPu9g_0 skateboard MmQIeOEPu9g_1 skateboard MnE1EjTWbTA_2 skateboard MnGGl7pusvI_0 motorcycle MnGGl7pusvI_1 motorcycle Mnd7aZxjoEg_0 bird Mnvqegl_fME_1 car Mnvqegl_fME_3 car Mnvqegl_fME_8 car MnyV8-43fRY_0 bicycle Mn2Nul_w66I_1 motorcycle Mn2Nul_w66I_3 motorcycle Mn2_fRbVluE_0 knife MoHDZuwBO4E_0 cat Mog-qUf6B1c_1 cat Mo6Q7lGmAw0_0 skateboard Mp42DoVxbWY_0 motorcycle Mp91b_edytM_1 dog Mp91b_edytM_0 dog MqAlMygAZto_0 cat MqPKFAIxZpE_0 dog MqlxERdGjdg_0 motorcycle MqvfJOEW4oE_0 cat MrsXy6DL4DA_0 truck MrssB6CtGrM_1 giraffe MrvbaDZm6gY_7 knife MrvbaDZm6gY_8 knife Mrwi7WoPJSs_0 cat MrxYHk0ghfM_0 boat Mr1A4et0ESg_0 bird MsFvL8N-3ds_0 umbrella MsQJkEOyREY_0 bicycle MsY_zz2OeKU_0 motorcycle Ms8x8pjN7Fw_1 bicycle Ms8x8pjN7Fw_0 bicycle MtIjkcXspsU_2 motorcycle MtfpgvzOlW8_0 person MtiQjguNpH0_2 boat MtiQjguNpH0_0 boat Mt_4bFjyYuU_0 cat MuLk_dOouJY_0 knife MuOG8PoK21o_0 bus MuVtFYK_nH0_0 bird MuYixry0epc_2 motorcycle MuYixry0epc_0 motorcycle MuYixry0epc_1 motorcycle Mu51W-lkSEc_0 car MvIYOnRinSo_0 bicycle MvxRpbl0BBk_0 bus Mv6v4w7VDFk_1 car Mv_9l8fWiP4_0 truck MwAM4o2GCuM_0 car MwHQb6ZryRA_0 skateboard MwIKOqSMRwk_0 cat MwLnGflxcqc_2 zebra MwNsM6f6fNY_3 bicycle MwNsM6f6fNY_5 bicycle MwN7iYEim6k_0 bird MwW14_GuwLg_1 bus MwdX3PbgC34_0 giraffe Mwjq136uMe0_0 car kfInF5cUU98_0 motorcycle kfInF5cUU98_1 motorcycle kfLnoXlGBvU_0 dog kfhspLhCU5Y_0 cat kgDOVDDZ9eQ_0 cat kgONObiF8Hg_0 cat kgT-NsRkv1c_0 car kgco3sZv7BY_0 cat kgi1KajW_ZU_0 truck kglv-2P5ow4_4 bus kgrFzgXO9Q8_0 skateboard kgsyAMgjuL4_0 bus kgxQ03-tSek_0 bear kg6RFppR4MM_0 knife khUURgtFYBY_1 bicycle khUURgtFYBY_0 bicycle khVST8w3Zzw_0 skateboard khlqzkfBCfc_0 cat khpJlBWPPr4_1 cat kimZApwsJEY_5 bicycle kimZApwsJEY_6 bicycle kimZApwsJEY_0 bicycle kimZApwsJEY_2 bicycle kimZApwsJEY_3 bicycle kimZApwsJEY_4 bicycle kizrM5CZzPk_0 truck kjBdTAkRijw_2 bus kjM0hJl-L44_0 skateboard kjtOW8OAIeY_0 motorcycle kkC5lqQb0t0_0 umbrella kkR7pnou7hc_0 knife kkeBMT1ixs4_0 boat kkkc9xwKGp8_1 skateboard kkvU3dvMkSI_0 truck kk4KuU5X6Lk_0 car klGHWdeD-qw_2 bear kldR5yJFeOo_1 bicycle kldR5yJFeOo_3 bicycle klgANznh5x0_1 bicycle kl2buVrYbX8_0 skateboard kl3_w8_h6ts_0 skateboard kl4RYG6OCIY_2 knife kmZFQEGncaI_2 bicycle kmZFQEGncaI_0 bicycle kmllekf2nKc_0 cat kmoaGUqL6bI_0 skateboard kmvCtYXRUhM_0 truck km7aR2fTJlA_2 knife km-3wnNLVYY_0 boat knDRZU9u-Lw_1 boat knVcB-GeINU_0 car knqi3OAHNO8_0 boat koOxoaMnXZc_0 skateboard koOxoaMnXZc_1 skateboard kphV7yVMBOQ_0 bicycle kphV7yVMBOQ_2 bicycle kqDbbFz-XQQ_0 bird kqDxyoQKFfE_0 cat kqVaHPJzEro_0 dog kq4tOnX3m2Y_3 bus kq4tOnX3m2Y_0 truck krSKV36ocSs_0 bear krvyahlS1z4_0 bus kryv5em-VHk_2 bear ksB15ebtJeM_0 umbrella ksCempldLAA_0 skateboard ksCempldLAA_1 skateboard ksCjOk8r4rU_0 person ksSVtTRXRyI_1 bicycle ksk5uCVKU7Y_0 skateboard ksxTUcFqlZw_0 knife ksx219-g47A_0 cat ktHzii2XMh4_0 boat ktPLKpH7-mk_5 dog ktcodoKjIvE_3 bicycle ktcodoKjIvE_4 bicycle ktcodoKjIvE_5 bicycle MwtWyQiagOk_0 bicycle MwvYg837DFU_0 motorcycle MxEjkI5fRh0_0 dog MxHBWltYQX0_0 boat MxKuZbSiZ4s_0 skateboard MxK1dXmYQU8_0 knife Mxr-1toRi3s_0 skateboard MyS7UVUc55M_0 car Mybir4gfQaU_3 bird MzB160hQlFE_9 giraffe MzB160hQlFE_2 giraffe MzB160hQlFE_4 giraffe MzB160hQlFE_5 giraffe MzB160hQlFE_6 giraffe MzB160hQlFE_7 giraffe Mz9ZTHPYJxk_0 dog M0Ga521uzoA_0 dog M0qQQArQdTU_0 bird M088XJeXBS0_0 cat M1UsEMPrCc4_0 knife M1cuEQppjNk_0 bus M1p1DBTuqmk_3 bird M1p1DBTuqmk_1 bird M1xxFVktlzw_1 bird M1zDeqozcU4_1 bus M2R_9l38IUQ_0 bus M2uSqd8ohUk_0 bus M3CUpLmpRBo_0 cat M3OhLKUgQho_0 cat M3P38sLk0pc_0 dog M3tK5YBjyKI_0 truck M3tK5YBjyKI_1 truck M3tK5YBjyKI_2 truck M4CENhQ5vWo_0 cat M4Hqq89bZiE_1 dog M40QOQPocV4_1 car M45MyaeogPU_0 car M5BEqJFfJYw_0 skateboard M5NRM7UQv5c_0 cat M5bLnqKDa1U_0 bear M5kj9SEKNAo_0 bus M6POMFHs-ec_0 bus M6bin6X9FSI_0 knife M6eRY9q89aQ_2 truck M6tXmkLy-2Y_1 bird M7465rUWBzY_1 bicycle M8Lhm-CgqH4_0 cat M8cFdveIy4g_0 cat M8drJLCDOL8_0 cat M8ea7gWeDQ0_0 bird M8f0VhN1ZnY_0 umbrella M8i-DGTEw9M_3 skateboard M8i-DGTEw9M_1 skateboard M8sMZ15CLIU_0 skateboard M9McwXGtZnI_0 cat M9QtHKxypyI_1 knife M9UrZSSK1MA_2 motorcycle M9eiVambl5s_1 dog kuRfhOqyXeY_0 umbrella kuzyHmE3SI0_1 knife ku68PhgE8bk_0 bird ku7gA5ZLk1Q_0 cat kvFSzJHIsVg_1 knife kwDNLBoEQq8_0 skateboard kwDX0_2B3A0_0 umbrella kwGGXvXtsjI_0 truck kwY370WQYUg_0 car kwbt-wHLPkY_1 car kwlcEg9G1bE_0 knife kwsp30ykR4U_0 boat kxeSYfuQl-I_0 bird kx1bCqhLcbY_0 bus kx5tIvM-9dE_0 knife kyAEyX8zMWQ_0 truck kyPXCwNh7Rg_0 cat kyW_f8sv5iw_1 giraffe kye1Q_k-_Gc_0 bicycle ky1FAcaT3UE_0 dog ky6uivneqIg_0 bird kzblQQcpTdk_0 skateboard kzblQQcpTdk_1 skateboard kzfxn1c7_xc_10 bicycle kzg7y0rERTY_0 bicycle kzi3zDJR9Bc_0 dog kzpJkBQxgE0_1 bicycle kzp3UEwOkJA_0 knife kzw5a8z9cXs_0 bird kz6HYpF3pLo_0 dog k0cUZwgJzB4_0 umbrella k0uDHQea9sg_1 dog k00mpKYHsuU_0 skateboard k1F_TFA3Bbk_0 bicycle k1LrJEfFKag_0 motorcycle k1NVg8uaPE4_1 skateboard k1Q5wms4euk_0 bird k1TOwPACsvY_2 giraffe k1TOwPACsvY_3 giraffe k1vz1ZSBSoo_0 bicycle k2O0XiVn5kw_0 skateboard k2QiX8c3t50_0 bird k2SEBRgras8_3 car k2Z0W54JwB4_0 skateboard k2bQG12smw0_0 cat k2imYphEfo0_0 car k2ocqQxARpQ_0 skateboard k2yx7C__3wY_1 cat k3HKP8CV3CY_0 bus k3LnBcn5zlU_0 boat k3QuANDFgVQ_2 boat k3QuANDFgVQ_3 boat k3QuANDFgVQ_5 boat k3fZgTTMj1g_0 giraffe k3fZgTTMj1g_1 giraffe k3im7HEvSCI_1 bear k4D-Ql4Fg7c_1 bird k4PWQfz5NGo_0 motorcycle k4U1AP6KV4E_1 skateboard k4c6D3ZsdL4_0 truck k5Pp6BYXono_3 bear k5R3cUyyyWo_0 car k5nvWBLlS2c_1 boat k5nvWBLlS2c_2 boat k5vlZTySXDk_0 knife k5yJqWnvZzg_1 bus k5yyV32-nOM_0 motorcycle k5yyV32-nOM_2 motorcycle k55nlQZwGz0_1 boat k57rVPEq54k_1 bear k57rVPEq54k_2 bear k6Bwd6af64Y_2 bear k6gc4du1FqU_0 truck k6l0hwjaeMA_0 motorcycle k6l0hwjaeMA_1 motorcycle k6l0hwjaeMA_2 motorcycle k60P5osD0rU_0 bus k64DU45ej5M_6 car k64DU45ej5M_0 car k64DU45ej5M_1 car k64DU45ej5M_2 car k64DU45ej5M_3 car k64DU45ej5M_5 car k640Wtpq-mU_3 umbrella k640Wtpq-mU_0 umbrella k640Wtpq-mU_1 umbrella k7TCyTff1aM_0 truck k7uTiiG-Ez0_0 bus M-8Zbj9mU9U_0 boat M_miIFgy1Ro_0 bear NAGKrEjU7Sk_3 bird NAGKrEjU7Sk_2 bird NAkFaQBgOvo_0 truck NA9hxGtSLCM_0 bird NA_DgxP18c4_2 motorcycle NBE97NAHACk_0 giraffe NBdhmPgSS2o_1 motorcycle NCNgKQCU8BM_1 bird NCP6Cna8jtY_0 skateboard NCQ5340WhY8_0 car NCSygygs2Dw_0 skateboard NCWp95If4uM_0 motorcycle NCazYWutlOc_0 boat NCoJmkRt2nE_0 bicycle NDUhlmH9Rz4_0 cat NDYT9jTE54Q_0 bus NDYT9jTE54Q_1 bus ND_GyhH6zgI_0 motorcycle NEQIR06VuP4_1 giraffe NEQOLn6QBuE_8 bird NESQ70PhJU0_1 boat NElB9jKqhLc_0 dog NFjb4XxSoHI_0 skateboard NFye-cUktCg_0 bicycle NFz_zzAU_Hc_2 skateboard NFz_zzAU_Hc_0 skateboard NFz_zzAU_Hc_1 skateboard NF_o01qBrtI_0 skateboard NF_o01qBrtI_1 skateboard NGCjiEfG4C8_0 skateboard NGM0enFRa7E_0 car NGO_7sJEeyk_0 bus NGRBYn2OatE_0 motorcycle NGU-5KGKEJ0_0 bear NGmJtkXyJpc_0 cat NGmKyRRNL_E_0 bird NGw5-auup1k_0 car NG7FgzWn8Gw_1 giraffe NG9SIDqXvic_0 knife NHlayOfSZJc_0 dog NHlsNDcNZqU_0 cat NHmxckr22ws_0 skateboard NIPnaoHgzdU_0 bird NIPnaoHgzdU_1 bird NIPnaoHgzdU_2 bird NIvYcbJIYdA_0 cat NI_YQKOQEvM_1 bird NJeNAw2RnNc_0 bus NJeNAw2RnNc_1 bus NJeNAw2RnNc_3 bus NJeNAw2RnNc_4 bus NJ0O48Pkn2k_0 bird NJ9DpLHaGl8_0 skateboard NKLemqoJ_hA_0 cat NK4942wyYgk_0 bus NLKK4VUbuuI_5 bear NLp8voZylqM_1 knife NLsGPrwnRug_1 bus NLsGPrwnRug_2 bus NL3CG8KGwis_3 giraffe NL5j52SH-yQ_0 bus NL9o4JgV25A_0 dog NMJB2K_UOLc_0 dog NMJLv-oYyNc_1 truck NMJLv-oYyNc_0 truck NMecCV-gtK8_1 dog NM7OVTITkaA_0 cat NNCjf9Qu2RI_0 bear NNHOtBx0FOY_0 motorcycle NNkLZRrMEv4_6 boat NNl4nD5_b_o_0 skateboard k8NHRbiB2Dc_0 dog k8OEoDpqSLk_0 truck k857sWPtmcs_0 cat k9BuU6A21DQ_0 skateboard k9HxprAZods_0 umbrella k9KmR4MNI7o_0 cat k9KtLV0IMgI_0 dog k9PCp-8PFZ0_0 dog k9PX9l8Fnlw_8 bus k9PX9l8Fnlw_0 bus k9PX9l8Fnlw_2 bus k9PX9l8Fnlw_4 bus k9PX9l8Fnlw_5 bus k9VDPqCbqj0_0 bear k9VVUD9wVxk_1 boat k9zLR7VKKpE_0 skateboard k9-PLHxxGHc_0 car k-DOe-pD_MY_0 dog k-Nl-39bZnw_1 skateboard k-SqR4BEw3s_4 motorcycle k-SqR4BEw3s_1 motorcycle k-izgq4Wj4E_0 dog k-izgq4Wj4E_1 dog k_X3oj841SQ_1 motorcycle k_e_YVhclfg_4 truck k_e_YVhclfg_3 truck k_iI2BJQpqo_0 cat k_jXopyxdo0_1 boat k_sLp7QKSu8_0 boat k_tkXRmI_O0_1 skateboard k_tkXRmI_O0_0 skateboard k_vnzrtDfAw_1 cat k_5e1d-vpBU_3 umbrella k_5e1d-vpBU_4 umbrella lAA5eXeYwpo_0 cat lAFonTk_uSA_1 bear lAI9mfwKMM8_1 dog lAQxdRz4PlQ_0 bear lA3btp7QIxg_0 bus lBH0KOGRswc_0 car lBXWSN3ciPY_0 motorcycle lBsOiAR5dAk_2 bird lBsOiAR5dAk_3 bird lBsOiAR5dAk_4 bird lBsOiAR5dAk_7 bird lBsOiAR5dAk_8 bird lByHH7yvxpA_0 boat lB7j8Z4gGtQ_0 car lB_bnqdnexA_5 bird lB_bnqdnexA_1 bird lB_bnqdnexA_4 bird lCYwepuY9qY_0 truck lCZry6FRpsk_0 bicycle lCf6uL_GkYw_2 bear lC0yidNH6B8_2 bear lC4BoFWvHs4_3 bear lDLYtKqlr5M_0 bus lDf9b9Kr-24_1 truck lDgzFjqokik_0 boat lDqk6pRbY3M_0 bus lDybC3N70so_0 car lD63JOjqTDg_5 bear lD63JOjqTDg_9 bear lD63JOjqTDg_10 bear lD63JOjqTDg_0 bear lEG4DGADyEU_0 bird lEIbERGmlJw_0 umbrella lEWOScSt-Ks_0 dog lEaMfPfi9wI_0 truck lEwJRP_FRW0_0 dog lFYONMOuW_o_0 truck lFqrTC4j9AU_0 cat lF3vWAJRnek_0 motorcycle lGPyv8wlqaw_1 knife lGaQV9YhOac_0 motorcycle lGrVM91Cav8_0 person lG5xlt4odEs_0 truck lHKKhuJtJ9A_0 knife lHXHAD73KC4_0 motorcycle lHX5VdjDPMg_0 knife lHuiaqmISAM_0 motorcycle lHyHQQF-8K0_0 car lIE0SbW_gCY_0 dog lIH_in2H5ds_0 knife lIrvgqkirS4_0 car lIrvgqkirS4_1 car lI6hnnAL_54_1 skateboard lI7VzYQQ8DY_1 bus lJBeZTzXuSk_0 umbrella lJJU-pzIbgs_0 boat lJKxeHgRugQ_0 bicycle lJa2bLMFljk_0 knife lJa2bLMFljk_1 knife lJa2bLMFljk_2 knife lKC5LtWPL6s_0 boat lKEgqjR4HeU_0 bicycle lKEgqjR4HeU_1 bicycle lKJZ4AYoO9g_0 car lKJZ4AYoO9g_1 car lKJZ4AYoO9g_3 car lKJZ4AYoO9g_4 car lKJZ4AYoO9g_5 car lKJZ4AYoO9g_6 car lKJZ4AYoO9g_7 car NOEix5l-1TE_1 bear NOVqPOoUWiM_2 bear NOmc38WuhVA_1 zebra NOmc38WuhVA_2 zebra NPX9qxaZXGQ_1 boat NPc_EhpqV9I_0 cat NPlhHkKnD-o_3 bird NPlhHkKnD-o_1 bird NPnIcXU4TO4_0 truck NPnJoNuZw64_0 bicycle NP2YBNp1eMo_0 bus NP8MrtR7UMQ_0 skateboard NQRWmK2DAwo_1 skateboard NQ7XVf2jPCk_1 bear NRBtrgg-ACI_0 umbrella NRGqiXyM4H0_0 bus NRRxMVw0Fv0_0 umbrella NRV62o4HAaI_0 dog NRkeO8cWvlY_0 skateboard NSEdAs2W7io_1 bus NSrCO0JVjrQ_0 bus NS6Z7neTE58_2 bear NS7vapDr5vE_0 dog NTJsuoSzIX0_8 boat NTi-7LowE5E_4 bicycle NTi-7LowE5E_0 bicycle NTurL251ndw_0 bird NTyAmrmpD-w_0 cat NUOXJlGoyJk_0 motorcycle NURGtF3McGo_0 knife NUU3df9bDmc_0 motorcycle NUhIeMVykto_0 truck NUkuVMR_rDA_0 truck NUkuVMR_rDA_1 truck NUo3_VxkQWs_0 truck NUo3_VxkQWs_1 truck NU5WfPjxGO4_1 cat NU60EZnPyy8_0 bird NVAF-TWNge8_0 boat NVeRtjaMVVM_0 car NVz1RXwlQQM_0 skateboard NWOVEKbfu_M_2 cat NWwoSS6oanE_0 bus NW6ZEfS5YY0_0 dog NXHWi70uXME_0 motorcycle NXU1Yxq08KQ_0 skateboard NXe33k8YYzQ_4 truck NXe6DkOAbbo_0 cat NX2FQE2RlgI_0 dog NX2FQE2RlgI_1 dog NYBxFsoPtLU_7 knife NYBxFsoPtLU_2 knife NYVtLPBMGDA_1 dog NYVtLPBMGDA_2 dog NYpkdx_Wzos_0 bicycle NYrd2o8DQhw_0 bird NYsYKDH1T0Y_0 bear NYs9voRwmTk_2 motorcycle NZGyAc3mNmM_1 skateboard NZOBtVvtpfo_0 bird NZoU9njpjBc_2 bird NZoU9njpjBc_1 bird NaOwM5jaBb0_0 bear NaTP9E6Ee6k_0 motorcycle NahvbbnqXN0_0 knife NaszpQMnSmM_0 skateboard NbXn5vr55Ik_0 motorcycle NbnAyKWQOgU_2 truck NbnAyKWQOgU_3 truck Nbz45at2suY_0 bird Nb1nL_IG2Tc_0 umbrella Nb4FhqzK_80_0 bird Nb9Ee0cdc90_4 knife Nb9Ee0cdc90_0 knife NcD7EzR9VKc_0 cat NcODwqAl8wA_0 bird NcODwqAl8wA_1 bird NcnPt-ksZkA_0 motorcycle Ncnr9xhL4RE_1 bird Ncnr9xhL4RE_5 bird Nco2IqVnrXc_0 cat lKiN4UeEuCQ_0 car lKrgSHU_lF4_0 motorcycle lKrgSHU_lF4_1 motorcycle lL9OwfLG-LQ_0 skateboard lMPus-gGijc_0 train lMw3GHYr5nI_3 bear lM2lr9vONXE_1 bird lNDNEdNtW4w_0 umbrella lNLvw0Ga8IY_1 skateboard lNLvw0Ga8IY_2 skateboard lNLvw0Ga8IY_0 skateboard lNShteFjBFI_0 bird lNh4Dhf0JC8_0 truck lNj5zp4Gbsw_1 bird lOGti3Hfk6A_2 bird lOglyCevyZo_0 motorcycle lOzlZJwo_U8_0 motorcycle lO0DJaFrguw_0 motorcycle lO0Nas9ogL0_0 bird lPG5xsRX0U0_0 bird lP3Jv00bEG8_0 bear lQf2-zTERI8_0 motorcycle lQ8AFjrjX64_0 umbrella lRSTcmXYwzM_2 knife lRyY7rtPGJ0_1 dog lRyY7rtPGJ0_0 dog lR-HPtCgbFY_0 car lSefRz_ad2I_0 person lS7IFw-rHNE_0 car lTNivynkdBQ_0 bear lTNivynkdBQ_2 bear lTW53YPXtYw_0 umbrella lTgxSRoCADM_1 boat lTgxSRoCADM_2 boat lTgxSRoCADM_3 boat lTgxSRoCADM_0 boat lTyeSMENfFI_0 dog lT1oYaEt3l0_0 skateboard lT1oYaEt3l0_2 skateboard lT1oYaEt3l0_1 skateboard lUEz6tmtuxs_0 dog lUQr1JtEFAM_0 cat lUSPy6WOhvw_1 boat lUk_G-9RjSE_0 bird lUq042i-r3E_1 dog lUq042i-r3E_2 dog lVCS7_AhLDg_0 cat lVKT0DahELk_0 bus lVKT0DahELk_2 bus lVOqUh5DjZE_0 bicycle lVWFKjMWyF8_0 truck lVWFKjMWyF8_1 truck lVWFKjMWyF8_2 truck lVoO_SiGxpw_0 cat lVohP88BOwU_1 giraffe lWDh4SPr76A_1 train lWGBmSVTvwo_2 skateboard lWLYqz3RhXs_0 truck lWkC8ABD6YI_0 knife lWnVG1WyzTQ_0 dog lW8axrSg7EY_0 dog lXJGVOcVinA_1 truck lXkkzYM416M_12 knife lXkkzYM416M_8 knife lXkkzYM416M_11 knife lXshoTSoReY_0 motorcycle lXshoTSoReY_1 motorcycle lXshoTSoReY_2 motorcycle lYC47pEoyKc_2 skateboard lYEiGk0pa9w_1 dog lYP4KB7dANc_0 truck lYcCLy33mJA_0 truck lYcCLy33mJA_1 truck lYrLCKi7wHw_0 knife lYrvoVOM7i8_1 truck lYrvoVOM7i8_2 truck lYzirpo9X4Q_2 knife lY38gkpHWQA_0 dog lZWg3rt2bp4_0 truck lZWg3rt2bp4_1 truck lZWg3rt2bp4_2 truck lZgIg28WsqA_1 dog Ncs0SIaAZjk_0 skateboard NdDPhB7JjOc_1 car NdFMcVN8fkc_0 skateboard NdFMcVN8fkc_1 skateboard Nd2smOOuPs4_0 truck Nd5Cyi1P2AQ_0 person Nd5Cyi1P2AQ_1 motorcycle NesRw9JE-bc_4 dog NesRw9JE-bc_0 dog NesRw9JE-bc_1 dog Ne_T9PyoaOA_0 truck Ne_T9PyoaOA_2 truck NfQ_F7iyFT4_0 bus Nfoq-vLwXMs_0 cat NfuM3ceM9Lg_0 bird Nf4iPszryRI_1 truck NgA6Mi5Qj6Y_1 car NgHJhpedfLw_0 cat NgfJ42fUH10_0 skateboard NglZtOBkn1M_0 boat Ngp2Yvug4N4_0 skateboard Ng7YPssESZs_1 umbrella NhDdHfwovA0_1 truck NhHYQ1QBPq4_0 cat NhJWY87UJGA_0 motorcycle NhKgTGZXrk4_0 motorcycle NiN42Yupn8k_0 motorcycle NiQLFJ_8gI0_1 bird NifFA8VfbMY_0 truck NjPnw9Ofph8_1 bicycle Njr2CQDoQ0w_2 boat Nj1tu2uzjf8_0 umbrella Nj4IqLuQBd0_0 car NkHiSqSViG4_3 truck NkSVC1QmlzA_2 boat NkXF30FQWUs_0 bicycle NkajkrLx-Pg_1 giraffe NkdGD4jRmVk_2 skateboard NkdGD4jRmVk_3 skateboard NkdGD4jRmVk_4 skateboard NkvfxcYCIfg_0 person Nkxm_Grldgg_0 boat NlKX0Q_a4qM_0 bicycle Nl2e8ERoEYk_1 skateboard Nl27zjpvGZk_0 cat NmCxdejUxjE_2 umbrella NmGnWjSHIGc_1 dog NmGnWjSHIGc_3 dog NmGnWjSHIGc_0 dog NmHo6hH22gY_0 cat NmRjRjuwWGU_0 umbrella Nmm4H7xWWeE_0 giraffe NmnOIU5yzmo_0 truck Nm3Wkz8ClY8_4 bicycle Nm3Wkz8ClY8_0 bicycle Nm3Wkz8ClY8_3 bicycle NnQubFQHcUU_0 cat NnSwVsUnfj8_0 dog NnV7SskfNiQ_1 bicycle NnYCP4YouSI_0 skateboard NnYCP4YouSI_1 skateboard NncDYgsTFic_0 motorcycle Nn1fsXlRDQg_7 bird NoKz0p_h8xA_0 car NoRnxJ4D8OY_0 motorcycle NoglbvaRxAM_1 car NoglbvaRxAM_2 car NopFymjXZBE_0 car NosN0T3He9Y_2 knife NowQILLv6pM_1 motorcycle NoxncYznLDw_0 motorcycle NoxncYznLDw_2 motorcycle NoxncYznLDw_3 motorcycle NoxncYznLDw_5 motorcycle NpbXizTCNgs_0 motorcycle NpciaYlS9Bs_2 skateboard NpptiWtuy7U_1 bird Np0p_ITfRiE_0 boat Np0p_ITfRiE_2 boat NqA0sKGQZbc_0 bird NqD8w0_R9y8_1 motorcycle NqLEhuNiS-A_0 knife NqzZbJJl3E4_0 truck NqzZbJJl3E4_2 truck Nq-mC-BLk1c_0 bird NrGByfXIMJc_0 dog NrGHtOFFLxU_0 motorcycle NrJIz8M3oNM_0 boat laSVNAwUDQc_0 giraffe laiFgjfWMS8_1 bird lajujsJ1J4k_0 bird lajujsJ1J4k_1 bird lauIpA9lVMo_0 skateboard la0ygpbR6t4_0 dog la0ygpbR6t4_1 dog lbCW72FyaQ8_0 umbrella lbC8rsjkZ8Y_1 truck lbDdPmkMwnw_0 motorcycle lbSldeZXn6I_0 skateboard lbZo-rTovyc_0 skateboard lbod3X-5Z40_4 bus lbod3X-5Z40_5 bus lbzHPZpNNjg_0 dog lcSqXrVIbwo_0 motorcycle lcWTw6rAYfI_0 cat lcv8jXnPWQU_0 cat lc6jM9I3ffc_0 motorcycle lc8hZxMLAr4_0 truck ldjVc4u8LUc_1 motorcycle ldqpSPYa-3U_1 bicycle ld5g39_bixY_1 skateboard ld5g39_bixY_2 skateboard lew1kgMUujc_0 car lfPmXUBRa-k_1 bird lfVb7VtGUAI_0 person lfYoLXfvmyo_0 bus lf29DRtjGcY_1 truck lf29DRtjGcY_2 truck lf4Xwro4NOQ_5 bus lgLHq8p_CnA_0 truck lgVXhalKM3w_0 boat lgne-5wGRTg_4 bird lgwnVArDAa0_2 bear lg3udJdBBoI_0 dog lg_4H9FLVog_0 dog lhBsZjQzf8Q_0 motorcycle lhEN_T9FduQ_0 knife lhoMpa49rvU_0 umbrella lh1Brsyb0aE_0 bicycle lh21_LSx_G8_1 dog liDzsyAmMJQ_0 motorcycle liThgzeBkVY_0 cat lite73A-c3o_0 bicycle li8IvNy_DW4_1 bird ljrwXgV0j9o_0 motorcycle lj3DWkRI_HM_2 bear lj3mqLiqSRw_0 knife lj5bI1M_0ZA_0 skateboard lj-BTMsCDdY_0 dog lj-BTMsCDdY_1 dog lkOFpGLmX9s_0 cat lkYuyUsRfWE_1 dog lkg_nXf_W88_0 bicycle llBtQEKaglQ_2 bird llFPEcbP7m8_0 car llWG8M6Fsrg_1 skateboard llu7uI6yzns_0 motorcycle llu7uI6yzns_1 motorcycle llu7uI6yzns_2 motorcycle lmCsOrgM7zE_0 cat lmVNyKFiuQw_3 knife lmVNyKFiuQw_2 knife lm-deiNDAW4_0 motorcycle lnFmVwj7oMg_1 cat lnk0OtCMbBc_0 cat ln5IAoaoPHc_0 dog NrX1AnOpS98_0 bus NroEppStyZI_0 bicycle NrvQhlD_Fuw_0 dog NrvQhlD_Fuw_1 dog NsCdsMqUNFc_0 bicycle NsaAbiSbaCc_0 cat NsdCvelNA0g_0 motorcycle NsgZVfgUWco_0 skateboard NsgZVfgUWco_1 skateboard Ns78CA77Hmk_0 bird NtHFEE2Ii0o_0 knife NtQSi_L3_e4_0 bear NttRY9GKNOE_1 car Nt38ikEgqJg_1 dog Nt-UKy4Uq0o_0 car NuOq_HSf26I_0 boat Nucr0ksCppE_0 dog NumUCmB1MLA_0 bus Nu6g6OfLbKU_0 zebra Nu6g6OfLbKU_1 zebra Nu-gGh3BQo0_0 skateboard NvDafPMMZtg_1 cat NvDafPMMZtg_0 cat NvFUKJ9Y500_0 bicycle NvTRLNn1Tk4_0 cat NwC3jHQ65I0_0 bear NwG3zY4-qHs_0 skateboard NwHv08KS8WU_0 truck NwHv08KS8WU_2 truck NwgEA2yRlYk_0 bird NwgEA2yRlYk_5 bird NwlCLmmFUzM_0 truck NwoCpDkRUOc_0 skateboard NwzkWW45Qx0_6 bird Nw1pLrkHm1E_1 cat Nw8ZySxnzIA_0 cat NxPgLux4spk_0 motorcycle Nxgst3FR84g_0 car NyOC1kV5fqc_2 knife NyOVnxlZw44_0 truck NyQlYlDdA1Y_2 skateboard Nyg0BliJTCI_2 umbrella Ny14oMm9C9k_6 skateboard NzIOn70DDCU_0 bicycle NzfwqHNApI8_0 bear Nzqr9pq3W0g_0 bus Nzwcia0dVls_0 bear Nz5AnTEPNKY_3 bird Nz_Dn60wY8c_0 dog N0p_wrAammI_1 bird N0wFxDTDhrA_0 truck N0yYt90fBGo_0 boat N049Vl1eC9E_0 truck N1C5Wk1HQEk_0 cat N1jUvtD_RyY_0 umbrella N1xm5YdzSfQ_0 bird N13r5ZKqAZI_1 boat N2GiHfyj2sY_0 knife N2Y3LmbOWhM_1 cat N2Y3LmbOWhM_2 cat N2e24fXBD58_0 boat N2u1zVHzrfc_0 cat N3D5PnaCpHs_1 knife N3D5PnaCpHs_2 knife N3Iy7f2RrrQ_0 motorcycle N3OIM_qi7dY_0 cat N3VKNNdiRhs_0 umbrella N3ZGT5VDX7A_0 dog N3vCQPsPb7k_0 cat N3x4Fw8PZ04_1 bird N4BazwxnEJU_1 umbrella N4T6B8WAeyw_1 bear N4bUNLwIt-I_0 bicycle N4gBOlxfYUI_0 giraffe N5T8bgYdTg8_0 bird N5cC5-506Yg_0 motorcycle N5uwMT9YWA8_2 umbrella N6FCEWFj0vc_0 truck N6XH-20xsPk_0 bus N6Xl8e3GRcY_0 bird N6gcbwR93B4_1 motorcycle N6rvYTX52x4_0 car loFhsa4OXsA_0 zebra loFhsa4OXsA_1 zebra loFhsa4OXsA_2 zebra loS5Iy7HDhY_2 car loyp0oi9idU_0 cat lpPnun9oDq4_1 boat lqEgRMyazN4_0 dog lqi9uYhr1lU_3 boat lqybkPUTuGk_4 bird lqybkPUTuGk_0 bird lqybkPUTuGk_1 bird lqybkPUTuGk_3 bird lrbJ-8myxJA_1 skateboard lrd8TXYq2Co_0 zebra lrgLAWtIFbQ_0 bird lrk-LSpxnaQ_0 bus lrsspehYW2Q_0 cat lrusc_A2xpY_1 skateboard lsQ4p_XwS3U_1 skateboard lsW8rve_6F0_0 bird lsslg2HK3as_1 bird ls7K9Ga_TDo_0 cat ls8cJ6QPPdI_0 truck ltfbVFmlGNs_0 motorcycle ltfbVFmlGNs_1 motorcycle lti3EMrk6hA_0 bird ltyDB0DzJ4o_0 bear luZpSqhxjzc_0 skateboard lujnNrfylcM_0 truck lu4gOMv2LmA_0 dog lvW9JvQnv_U_0 cat lvXow0J0_Z8_5 boat lvpmaJx7Ydo_0 motorcycle lvxwGSPs5eo_0 truck lvxwGSPs5eo_1 truck lv79L0E9KbU_0 cat lv8ApAxhQxg_9 dog lwIzp1ny_cc_0 bicycle lwqQ1SyQ6oc_0 bird lwu1229kxGE_0 umbrella lw-_X5H5dsA_1 skateboard lxfLak4qc0w_3 truck lxxazO-lUhg_0 skateboard lxz5eN6gYvE_0 skateboard lx4WDd9A1jM_0 cat lyBbm0su2N8_0 dog lyDsv_jEl3M_0 motorcycle lylbDiRYA18_0 skateboard lym5pBjKK44_1 boat lyx_DnTpBx4_0 bird lzAGCQoeAug_1 boat lzISnRATBZY_0 motorcycle lzrv6Lmaqhc_0 bicycle lz9wsaAdD3g_0 dog l0HBjPE-vp4_0 bicycle l0LztA4KLq8_1 umbrella l0TccajPnLs_0 cat l0YyZLT2r0Q_0 dog l0dbu61iEXU_0 cat l0kogcjKlvI_0 bird l01YbT30Uzw_0 car l1PoAFZPnAI_0 cat l1cfghmMFfA_0 motorcycle l1dkS9dCOZs_0 truck l1eSoNjG7g4_3 car l1smSqKCK4k_0 person l1wXtZDVtTw_0 bear l120CJB_tWI_0 car l2Cytaq3_MU_0 bird l2d3stMmMjs_0 cat l2pGQEcySt4_0 giraffe l23teWgsK_Q_1 skateboard l23teWgsK_Q_0 skateboard N7HX62OM1Jo_1 car N7WtVRWgYEs_0 bird N8RE_7TdVGo_0 skateboard N8wDSOXX8q4_0 cat N9TwNh9IZug_0 truck N9TwNh9IZug_2 truck N-bSoL4tlX0_0 cat N-ehGzRtoj8_0 bird N-4XvHMsGCk_0 person N-9RtI_ifsk_0 motorcycle N_MWs_Dxjio_0 knife OAJTjsjrFlQ_0 cat OATLx4-34zQ_0 dog OAtOdcwMjgs_0 skateboard OBDA-yKAC_k_0 umbrella OBDA-yKAC_k_2 umbrella OBLc4YWkCqU_0 motorcycle OBYJdeMHD3g_0 motorcycle OBlj7XKW4lc_1 boat OBlj7XKW4lc_0 boat OBti9g_xdjg_0 bus OBuDg5pF8EM_0 motorcycle OBvMQQZSs6Q_0 truck OCEGSfdedcM_1 dog OCYvV1-sQQQ_1 truck OCYvV1-sQQQ_0 truck OCijTz38zrU_0 truck OCpuPcuJN68_1 car OCp5hNHBPpU_6 knife OC3VHGBHbMY_1 dog OC3VHGBHbMY_2 dog ODXPmCSXZDc_1 truck ODXPmCSXZDc_2 truck ODbUQUd4jSU_0 skateboard ODdK6tzKWWs_2 bicycle ODdK6tzKWWs_3 bicycle ODlDtYOtoQs_0 truck ODo-zlQ_GB0_0 truck ODp6c6uSvaU_0 giraffe ODuka2U9fkA_0 bird OD4XXIos2Zo_0 dog OEJox-XKatw_0 knife OEJox-XKatw_1 knife OEMh8A9j_pg_3 bear OEQV-Uetx8M_0 truck OE0tYMQn8GU_1 bird OFA22Poj7lQ_0 bicycle OFA22Poj7lQ_1 bicycle OFbK3M6Z_QU_2 dog OFbK3M6Z_QU_1 dog OFdr0zUfrlE_0 bus OF2H-LBDSPk_2 bird OF2H-LBDSPk_1 bird OF6Up9vV9Qc_3 truck OGMTfwEYzHA_0 knife OGNQnbR2jAw_1 bear OGVemy4LnsA_0 truck OGbVuwjdEDU_0 motorcycle OGnQhL7HZyI_0 bus OGsEC0i33BY_0 knife OG7Gqq0yNXc_0 skateboard OHWx9W6ECl8_0 giraffe OJ0c10BvtRY_0 dog l3U_T7n5YD8_0 bicycle l3YBS5nRxUY_0 truck l3lkSnsgzx4_0 umbrella l3qhbFnoRvI_0 car l31h7cMiU1I_0 bear l4LQx_ua4m0_0 bus l4MLa-2lkQI_0 bus l4dzsbhTXr4_2 bird l4lv0qkvs10_6 bear l43lNQ5Vq_s_0 bird l4-nRuAZNyY_2 car l5FUU1e4Y60_2 bicycle l5ecq1OhBsk_1 skateboard l5ecq1OhBsk_0 skateboard l508a0nbyQI_6 bicycle l508a0nbyQI_13 bicycle l508a0nbyQI_14 bicycle l508a0nbyQI_18 bicycle l6NgJ2NHnt4_1 bear l6S8h_QnD7U_0 cat l63MzTHehFQ_0 cat l7p6AfqPX2Y_1 motorcycle l7p6AfqPX2Y_0 motorcycle l8-hpsjvPaw_1 truck l8-hpsjvPaw_2 truck l9PH4iTXdYs_0 skateboard l9ZtaPU3mB8_0 knife l9j2X0rGhIY_0 bird l9qm2_xBYHQ_0 cat l9urEyEnxnU_1 knife l96fQdjYlLs_0 knife l-MCmCPjH7k_0 bicycle l-QCC522u8A_0 car l-eNrq-WUQo_0 boat l-98mL8hxMY_0 bicycle l-98mL8hxMY_8 bicycle l_DmnPQxj7k_0 zebra l_scPJDEOuI_0 bird mAE8hqG3eSk_0 bus mAPlm5rMa-w_0 motorcycle mA5ZTSfwetI_0 truck mBEMpccxmBw_1 motorcycle mBEMpccxmBw_0 motorcycle mBTsr9NKqos_0 dog mBTsr9NKqos_1 dog mBTsr9NKqos_2 dog mBivNgtX2dc_1 skateboard mB2K7Cqy5sA_0 knife mB2K7Cqy5sA_1 knife mB2K7Cqy5sA_2 knife mCA3YMqp59Y_0 truck mCVUS1SHxdc_1 bicycle mCaHiS25d_c_0 bird mCipOiHzL24_0 car mCnfYEJ7_nM_1 boat mCplUoipq_M_0 umbrella mCshfLJNDZc_0 truck mC9gh-poTgc_1 bus mC_yfZI-Kfw_0 car mC_8_BVmM48_0 bus mDOnks0KH3c_0 bus mDO2Jg5oyPM_1 umbrella mDTcvH2cBAk_0 truck mDTxktaf2Z0_0 cat mDio2Blh76Y_3 knife mDio2Blh76Y_0 knife mDio2Blh76Y_2 knife mDoksuME2bk_0 knife mECu0xa8vxM_0 bird mEFIkGBIFT4_0 umbrella OKHhm13mZYw_0 bicycle OKJlHLunIJ4_5 truck OKL9IGXZDqg_0 cat OKOBYUJfsW0_3 bus OKVeF8WX7nM_1 dog OKXlOHWMVYI_0 bicycle OKXlOHWMVYI_2 bicycle OKniUxVle4E_0 dog OK1lt5Hbk8U_0 bird OK1lt5Hbk8U_1 bird OK72g05p_nY_0 bird OLWhwdr2s3U_0 motorcycle OLqz23zKUZ0_0 skateboard OMJA4N9BRjk_0 bus OMJA4N9BRjk_1 bus OMROj6nJzNU_0 umbrella OMscf19CmfE_0 cat OMszUYfxt-k_0 dog OM7YDn8Aj8U_0 cat ONQt1uMKjzM_0 cat ONQ7_XR_YoE_0 car ONvq-WMS04Q_0 bus ON25DCtbtZI_0 bird ON25DCtbtZI_1 bird OOOsedHMhFE_0 dog OPFx79LTPYQ_0 knife OPFx79LTPYQ_1 knife OPNrGuEJKfQ_0 cat OPRxB1VUSzc_0 bus OPZI6LUwe80_0 truck OPny4vHo5EQ_1 motorcycle OQWmlKTZbJA_1 boat OQh45xm5OzM_0 car OQlHcCttP0Y_0 motorcycle OQ5Q0IvSVJw_0 skateboard OROW-2FDArE_0 knife ORjDIPVlrpY_3 boat ORyOEpNkmQU_0 bicycle ORyOEpNkmQU_1 bicycle OR1UJ2WJswk_0 umbrella OR8th1OG-XE_0 umbrella OSia7sePfOs_0 dog OS2Ga4W91oU_0 boat OTGZvd8HEBs_6 umbrella OTGZvd8HEBs_1 umbrella OTGZvd8HEBs_5 umbrella OTK2nAcxHMw_0 truck OTSLZbr15Rk_0 truck OTXkN6YTPBY_2 bear OTvtQllL8ho_0 giraffe OT1tUDnxHUY_1 bird OT1tUDnxHUY_0 bird OUDo6Wi3Mx0_0 bicycle OUaP4Qe7K_k_0 skateboard OU9OQRs4Ff4_3 truck OU9OQRs4Ff4_0 truck OVBUoFuLqko_3 boat OVBUoFuLqko_4 boat OVBUoFuLqko_5 boat OVBUoFuLqko_0 boat OVBUoFuLqko_1 boat OV8AfAYiWos_3 truck OWe4Ah3rUkU_3 truck OWe4Ah3rUkU_4 truck OWwYp5TMtyo_0 dog OW09PhbCZ2c_0 cat OW9poTV3Pw0_0 motorcycle OXDBegRD_hY_4 bear OXleFWP00RU_0 skateboard OXn_z6r4tTM_0 bicycle OX46gFmob50_0 boat OYAOM3GxoFs_0 umbrella mEhLlaG7ivE_0 car mEyJVUti9TA_0 bird mFCrAjplP-s_1 truck mFQSD32phtQ_0 motorcycle mFoVk3mdfVs_0 boat mFpufihJP34_0 truck mF3uYMbMsrA_1 truck mGAgv6gfUIA_1 giraffe mGP0JfjwxXU_0 car mGP0JfjwxXU_1 car mGwC1aGK8EQ_1 motorcycle mGwC1aGK8EQ_0 motorcycle mG6Uz7wciew_0 truck mHNOyEXbwsg_4 bear mHORHQS-7WE_0 motorcycle mHPMxlukQ30_0 motorcycle mHfy3z8lzZY_0 bus mHicqYMm5B8_0 bird mHicqYMm5B8_1 bird mHtWCmdt2ck_0 cat mHwCC0jnHbI_0 bear mHwgF2IQCd8_0 motorcycle mIn-Tkvx0xg_0 truck mIx7ZeZ2Vv8_1 truck mJ0xD-4leB8_0 cat mKRUuWYJC2k_0 motorcycle mKRUuWYJC2k_1 motorcycle mKRUuWYJC2k_2 motorcycle mKRUuWYJC2k_3 motorcycle mKWmMLNNRAQ_0 zebra mKgld1efJss_0 bus mKu97ivRVSM_0 knife mKu97ivRVSM_1 knife mLDjtK6d-W0_0 knife mLGU-BL1agI_1 cat mLG8EyllDhA_0 car mLIp-YLvQaA_0 zebra mLgNPTUe_XI_0 cat mLmtVR-AGCk_0 bear mLpoizHo-v4_0 dog mMG1DT2mUAo_0 skateboard mMUflfP_ZMY_0 cat mMXGos8VYQI_1 dog mMt-gdadsY4_1 dog mNFkEphgV18_1 bicycle mNdM6zfb6FA_0 cat mNeHO27e_i4_0 bus mNeHO27e_i4_1 bus mOMvL5XuAZs_0 truck mOVza6TV55E_0 bicycle mOcxsTLCyfM_0 umbrella mOjLK3sW2lA_0 skateboard mO3CzDojFYs_0 dog mO8cYs6iJlE_0 cat mPCBb4ndGx0_3 car mPCBb4ndGx0_2 car mPPaPa0iD_c_0 dog mPV3eyH3uiY_0 bicycle mPW-nXWaC4U_0 cat mP223OT32Rc_0 knife mP223OT32Rc_1 knife mP553XrHpVs_0 motorcycle mQD1eeRC1Q4_0 knife mQf2FppJTEM_0 bird mRFdLfB4a1s_3 bear mRI6bXmeH0U_0 knife mRMc_QxifPU_0 truck mRMc_QxifPU_1 truck mROsO1LIGpo_0 truck mRYB4i5ld-k_0 dog mRkf0ciWPgI_9 bird mRl54j1LWx8_0 person mRyO8jtjseY_1 car mRyO8jtjseY_2 car mR0m08J8B08_4 boat mR0m08J8B08_0 boat mR0m08J8B08_1 boat mR0m08J8B08_2 boat mSTIz-CdXqU_0 truck mSf7pQlzXuw_0 giraffe mSgbTXZAzDk_1 umbrella mSvLPzkZzps_0 knife mSxrYqw4oqg_1 umbrella mSztwZ01Pck_0 bus OYIPropF-hA_2 knife OYTwB7sOFYE_0 bird OYa8DOvcJkU_0 cat OYf6rSUrwxc_0 dog OYnjEcx19SM_0 cat OZBLMb8bGX8_0 zebra OZcS8vrufig_0 dog OZeialzVvBQ_0 bird OZqsh8FFeFo_0 truck OZqsh8FFeFo_3 truck OZqsh8FFeFo_4 truck OZstdGSfBBw_0 bird OZ2Xf6zzI5Q_1 skateboard OZ2Xf6zzI5Q_0 skateboard OaR_KKoBRYA_0 boat Oai5vIFRADY_0 truck Oaxb1TjNF5A_0 boat ObG3TG10dF0_0 dog ObLBCGg01UY_5 skateboard ObLBCGg01UY_1 skateboard ObLBCGg01UY_2 skateboard ObLBCGg01UY_3 skateboard ObLBCGg01UY_4 skateboard ObMci_3wRII_0 boat Obmxs3FqVc0_0 truck Obol9FzC6qw_0 boat OburzWcRnbc_0 skateboard Ob5o_Ufzxvo_0 umbrella Ob6-UrKFrTY_5 boat OcFGISpeAn0_0 skateboard OcQBa7E9-AI_1 car OcZG24cCgsU_2 boat OchOHb4q-iE_0 bicycle OcmRyP_n53E_0 truck OcuYOC6GylA_0 car Oc1tfJzLD3o_1 bus Oc1tfJzLD3o_2 bus Oc1tfJzLD3o_0 bus OdGHHAUYow4_0 boat Odl4k8y8GfI_1 skateboard Odo1ZvyEbqs_3 bear Odo1ZvyEbqs_6 bear Odo1ZvyEbqs_7 bear OeJet0TZ0Ns_0 cat OecO1BnSygU_0 umbrella OecO1BnSygU_1 umbrella OepCeq6zNOc_0 umbrella OevlneuqSNg_0 skateboard Oe3qCUtDCoI_0 bear OfD7c6vcSKc_0 motorcycle OfFZrl_Ltoo_0 dog OfQ3Y3DEgNI_0 skateboard OfZ9wyeuMaU_0 skateboard Ofcr6xsiMGY_1 knife OfmW_n1WB-0_0 bird OfpLj-uw2VM_0 skateboard Ofv2SMoyg_8_0 boat OgG3xES-A9s_0 bicycle OgG3xES-A9s_1 bicycle OgtTZgAAtrk_0 bear Og77fxfsfzI_0 skateboard Og83XjWPr30_0 bird Og_sRGRP2fw_0 motorcycle OhQqfPIVR_o_0 truck Ohh5X9j8-P4_0 skateboard OhvnlA9rzUA_0 umbrella Oh4vuNdjqGg_1 boat Oh4vuNdjqGg_3 boat Oh79QNRx0m0_0 bus OiHa7vhbW0g_0 umbrella OiT0hP6IU_0_1 car OidiasYmhhk_0 dog Oiuo__vi77s_0 motorcycle Oi3BJVuj3f8_0 bus Ojst9j_7TPs_0 motorcycle OjxLYDs9O2w_1 skateboard Okd1qAIUuZo_0 skateboard Okd1qAIUuZo_2 skateboard OlO9xdVfniA_0 bus OlPObAsvFRE_0 bear OlQykWy5_d0_0 skateboard OlVZS0O7Xcc_0 bus OlVZS0O7Xcc_1 bus OlVofey46c8_2 giraffe OlVofey46c8_0 giraffe Oldv3-_fn3E_0 motorcycle OlufwgkC9nA_0 cat Ol3C5MWakic_0 bus Ol63TPS0wjE_0 skateboard OmTHe4jPR30_0 umbrella mTnSFF649v4_0 motorcycle mTtOhVJYmco_0 bicycle mTuXb1mo6ms_1 motorcycle mTwbZIC2mjs_0 umbrella mUllN4tCjhg_0 car mVWf8BrbbQc_0 skateboard mVZVZPz-0uk_0 knife mVztYl0hyR0_1 bird mWOuUa5VTIU_4 bird mWSWZi7ef2Q_0 bus mWULzZ-r0BE_10 bear mWULzZ-r0BE_0 bear mWULzZ-r0BE_1 bear mWULzZ-r0BE_3 bear mWULzZ-r0BE_6 bear mWULzZ-r0BE_7 bear mWULzZ-r0BE_9 bear mW85x5O3sQM_1 bus mXYQlH9le8Y_0 dog mXt-xLcVJTM_0 knife mXuPzw4I-wQ_0 dog mXu238CeGfQ_0 motorcycle mXu238CeGfQ_1 motorcycle mX3SlrHHN8A_2 knife mX3SlrHHN8A_3 knife mYFsdZ6ZiHg_0 skateboard mYYLIkI65fA_0 cat mYgcUWeYKeE_0 cat mYhujznmuic_0 motorcycle mYtEL2P4G64_2 truck mYtEL2P4G64_0 truck mY6M_QMVm6A_0 dog mY6M_QMVm6A_1 dog mY6M_QMVm6A_2 dog mY6M_QMVm6A_3 dog mZEPBKLKQLU_0 skateboard mZStBRJGz0o_0 bird mZWugKrC8fs_0 truck mZ0LxtaLk9s_0 bicycle mZ0LxtaLk9s_1 bicycle mZ1ae3QtMqY_1 skateboard mZ6SXifL_5I_0 cat maANeKOpibc_0 bus maATqEbCdmA_0 boat maOsv3Gen0Q_0 motorcycle magDXuphf6E_0 truck mavzqjj21eQ_0 motorcycle mbFrW58khSM_0 motorcycle mbtyAyprPhQ_0 motorcycle mbuozxoOynA_0 bus mb9G4GF56RA_1 umbrella mb9G4GF56RA_2 umbrella mb-nes45JeE_1 bird mb-nes45JeE_0 bird mcCOvhuC86Q_0 cat mcxoHsKM444_0 cat mc0A1NsuIBI_0 bird mdDoBuc7jag_0 boat mdZbK8mOA5Y_0 motorcycle mdxbRZzm2Fo_4 truck mdzJDnEx5AI_2 boat md8Xi01GJ0Q_3 bird meRJPfPZTpw_3 bird mebu5O8auic_1 bird mehKWfZTJQE_0 bird mfPFvq57cxM_0 skateboard mf4LyMZ6wyY_0 skateboard mgEZVZrBkrg_0 bicycle mgEkK74q1Lo_0 motorcycle mgTCPe8eM00_1 umbrella mgVB0o0U17w_1 skateboard mgVB0o0U17w_0 skateboard mhSSgOcQwd8_0 cat mhqhGszzAR8_0 cat miC3NPxHofU_1 bird miQyhDocW3I_1 dog Om1q-9YbJu0_0 bus Om-NvWZY9XM_0 bear OnKqSIvDmuM_0 skateboard OnemsYazBrQ_0 truck OnemsYazBrQ_5 truck OnemsYazBrQ_1 truck OnemsYazBrQ_2 truck OnemsYazBrQ_3 truck On3Yd3AHFp0_0 dog On3b0cn9QYE_0 bear On-GcAXLGZ0_0 motorcycle On_5UKUJi7U_0 car OohVLB8HrmU_0 bear Oo2Ux9rWYGo_0 skateboard Oo8VLA_C0ho_5 bicycle OpAPsb8a7ck_1 bicycle OpD07kt9gdg_0 motorcycle OplcFe9OOMA_0 boat OplcFe9OOMA_1 boat OpqmXBQU87o_0 truck Op3764NveuQ_1 bicycle OqKwAAWtANM_0 cat OqPOCcEAHqk_0 skateboard OqbhEJlCp48_0 skateboard Oqc407hvhn8_0 skateboard Oqjbl3c9LYU_0 bear Oqo2P7az_Jw_2 motorcycle OrhDfcZqq1E_0 cat OrhipZ8lZHo_2 bird OrhipZ8lZHo_3 bird OrhipZ8lZHo_1 bird OrmkaB0vrG8_0 dog Or-E2m2p4X8_0 motorcycle Or--toMjK3I_3 boat OsvYa6TnsFI_2 car OtIV8clF1-o_0 bird OtIV8clF1-o_2 bird OtKIh5W3Uro_0 bird Otw43WNrlsM_1 bicycle Otw43WNrlsM_2 bicycle OtyYn5vEHbM_2 skateboard OuBzZzA9Q7o_0 bicycle OuJMq2UqA-s_0 dog OuVznEsiyyA_1 motorcycle Oui9ZgfJiJE_0 skateboard Ou1yCmmAuSY_0 cat OvEJdKYqvF4_2 dog OvEJdKYqvF4_5 dog OvZguhO8UVQ_1 bird OvlqAWflXBs_1 bus OwQktS0dM3k_1 truck OwUWoVRKf7E_2 zebra OxADHlAb7dM_0 boat OxInmNOeLHY_0 bicycle OxInmNOeLHY_1 bicycle OxInmNOeLHY_2 bicycle OxInmNOeLHY_3 bicycle OxInmNOeLHY_5 bicycle OxInmNOeLHY_6 bicycle OxInmNOeLHY_7 bicycle OxZdEZCJtcw_1 motorcycle Oxkx4bWzOMo_0 skateboard Oxp9w62kg0Y_2 knife Oxp9w62kg0Y_3 knife Ox1idrJvs2E_0 cat Ox_8K3szIs0_0 cat OyGSbm149i8_0 boat OzBFCX0vpiU_0 motorcycle OzCvvptC7o8_1 bicycle OzHYG5kpMbw_0 car OzItTAjpb9U_1 knife OzwlwZq46z8_3 bus Oz89_rVdBV0_0 knife Oz89_rVdBV0_1 knife O0JwQIk5pZY_5 knife O0Xl3AF_T0s_0 dog O0dforbCqKM_0 cat O0lfImzhCM4_0 dog O0p5eAP2AyA_0 boat O0rSIIipDT0_0 truck O02oVGyCZDI_0 motorcycle O0_GC-1pCYk_0 bear mj155rqWO3k_0 motorcycle mj5oMHI4Ch0_1 skateboard mkPWdHTd5X8_0 bear mkVYY1EvetE_1 bicycle mkZA72VL1oI_0 zebra mlAzMb61fYU_0 cat mlJYoZVHztc_0 bear mlT2XD9k5Ro_2 bird mlophh4mK4A_2 bicycle mluR2OjQTmU_0 car mlwpiHjyzIA_1 motorcycle ml4BVi7cCV4_0 bird mmnFugXdqlQ_1 truck mmnFugXdqlQ_0 truck mmojCWiaNYI_0 cat mm_Udf1FG0s_0 cat mnB2hBuySsI_1 bear mnB2hBuySsI_2 bear mn_cuBRZu8M_0 cat moZR-AtZJnI_0 cat mobg7uEQTmo_0 umbrella mogyHm8Jiok_0 bird moh4TWSe9Fc_0 umbrella motFo9G-GLs_0 skateboard moyxRLHHeiI_0 bus mo5ZpMFELUQ_0 motorcycle mpO9dBwTeW4_0 bicycle mpYAM0x6L5M_0 motorcycle mphFmT6TzLM_1 knife mphFmT6TzLM_2 knife mphFmT6TzLM_3 knife mp25XfIJhQY_0 cat mp8USuQKinc_0 bird mqUyhzbCpig_0 motorcycle mqjilBZByTI_0 skateboard mq5DqmYGVM4_0 person mrJAakc7Fj8_0 bus mrOsDCuEdRQ_1 dog mrY8gIFiUhE_0 car mrhfyNpFMq4_1 truck mryDGEujJno_0 motorcycle msNXnb1a02o_0 knife msbOXFTsSVU_0 giraffe mszokIKsdUk_0 bus ms0_k1aLULU_0 truck mtITgRv95Sw_0 dog mtU7bHAsI8Y_0 cat mtZHgLGJiu4_0 skateboard mtmzPf2AZuI_0 skateboard mtnURpE0wyE_0 bicycle mtnURpE0wyE_1 bicycle mtnURpE0wyE_2 bicycle mtpTPJtG8F4_0 motorcycle mt_LZ5UsG_w_5 knife mt_LZ5UsG_w_1 knife muKQy-1p4fg_0 truck muWIt0X4pKQ_0 dog muZ7xPF8odU_2 bicycle mueRS6nKTdA_0 bicycle mujGcuAzOdo_1 bear mujGcuAzOdo_4 bear mulQIomc988_1 bicycle mulQIomc988_3 bicycle muoqLEyrhhI_0 dog mursOuNatdc_0 boat mu65YolQZds_0 knife mvEcWlHP6u4_0 bicycle mvYBfdZkCe8_0 dog mvb5jVJeuGE_0 person mvhEFfQeFCY_0 bear mv2FHxOHSR0_1 truck mwAPVTEbZGM_0 skateboard mwAPVTEbZGM_1 skateboard mwBKrjOpxkY_0 skateboard mwIroQ9RbXA_0 bird mwrxbdZraRk_0 car mw5fQZ8EB5I_0 knife O1KrpGSvXAY_0 knife O19Mlhhzqgc_2 bear O2ZR7HPYZCo_0 cat O2u5126JYpY_2 motorcycle O3DA7qzf2s8_1 bus O3DA7qzf2s8_0 bus O3y2taxKvCA_2 boat O4CfuT5BDcc_0 skateboard O4VQQaJ07zY_0 cat O5b3XcEGZ4M_0 car O54XRvo6VU0_2 motorcycle O54XRvo6VU0_0 motorcycle O59A3lMogSo_0 giraffe O59A3lMogSo_1 giraffe O6BXRuq_YcE_0 dog O6BXRuq_YcE_2 dog O6EtCByhFZI_0 truck O6Jf2yxCTuI_0 cat O6Uln7GkqDA_0 skateboard O6b3a--pX3E_0 bird O6kqsEuKhis_0 bird O69gCmR0LvA_2 motorcycle O69gCmR0LvA_3 motorcycle O7ReHsig5IQ_1 knife O7Wrpfzb8_g_0 bear O7lvzdzmX5k_2 bicycle O8BNclEPo5w_2 dog O8BNclEPo5w_1 dog O8f0Dhn1as0_0 umbrella O8sB46kfM28_7 umbrella O8sB46kfM28_6 umbrella O8sB46kfM28_13 umbrella O9Duu2Un8AE_0 skateboard O9Duu2Un8AE_1 skateboard O9Duu2Un8AE_2 skateboard O9EqKcj_CPs_0 umbrella O9iWg3ZqLcU_2 bear O-NZJ4-eoQ8_0 bus O-ZUr1bQzp4_6 umbrella O-kJ078YJq4_7 truck O-2S79hisI8_0 zebra O-4CV4-x7Tk_0 dog O_D7M00pmjQ_0 motorcycle O_PCiV3NICw_0 dog O_bAX_ruSNQ_0 skateboard O_fZm7Mblgg_0 knife O_mRo8YLc50_0 umbrella O_3VssPsSVQ_5 bicycle PABLxf3U8qc_2 bicycle PABLxf3U8qc_4 bicycle PABLxf3U8qc_1 bicycle PASMcbnOtUM_1 bird PAZBEMKPQEw_4 boat PAbB9I6MC_o_2 boat PBD1IW-vA6Y_0 dog PBD1IW-vA6Y_1 dog PBQjiKBWtao_1 bicycle PBQjiKBWtao_3 bicycle PBqIT1T_Tl4_2 umbrella PByJb40LNJ4_28 bicycle PByJb40LNJ4_30 bicycle PByJb40LNJ4_3 bicycle PByJb40LNJ4_13 bicycle PByJb40LNJ4_18 bicycle PByJb40LNJ4_22 bicycle PB8sWVNFkDw_1 motorcycle mxA8JbJ0Do8_0 car mxFga0703Mc_0 bird mxMCBmJ5owQ_2 truck mxXH5aZCSJ8_0 truck mxYl5Y1KAiY_0 dog mxZgNkjbyxk_1 knife mxeuMHAWMxo_6 knife mxeuMHAWMxo_7 knife mxeuMHAWMxo_9 knife mxsTfEQlVgM_0 motorcycle mxvG6gSVYuo_0 bicycle mxwmtm7rKF8_0 car mxxiqhZzhEE_0 motorcycle mxyHDUSMhLs_0 cat mx2i3CYeEEE_0 bear myRelcztkqo_1 knife myWzn06fmDI_0 dog myY1Ijlbknw_1 bicycle myY1Ijlbknw_4 bicycle myY1Ijlbknw_5 bicycle myY1Ijlbknw_2 bicycle mymtiyldysk_0 truck mzGmbowEFfA_1 knife mzMgXA_v8q4_0 motorcycle mzYPSSUS--w_2 boat mzYPSSUS--w_0 boat mzdD_0CKekQ_0 motorcycle mzfrEqAhHeY_0 bus mzm_D3J8zqQ_0 umbrella mzyu28WsuFs_0 motorcycle m0MVwwL_0MM_0 bicycle m0gukhoxW0Q_0 skateboard m0gukhoxW0Q_1 skateboard m0gukhoxW0Q_2 skateboard m08CnM1FBR0_0 cat m0_tPmnque0_0 bicycle m0_tPmnque0_1 bicycle m1Qhj9jYohk_0 bus m1pFyDGuVzk_1 skateboard m1pFyDGuVzk_2 skateboard m2StZDAc1yw_0 bird m2uQowbhYDc_1 bear m3AM4AQLDo0_0 zebra m3AM4AQLDo0_1 zebra m3RCOnTUyMY_0 boat m3RCOnTUyMY_1 boat m3SOT8NCOEY_0 bicycle m3cgfDs0_G8_2 dog m3fctWcU4as_0 motorcycle m3sztS1QC3s_0 cat m3uDjNrfbD8_1 bear m35CwgXROHw_0 car m4qZSrgBZkc_0 bird m4qZSrgBZkc_1 bird m6NemUzZQFc_1 motorcycle m6NemUzZQFc_0 motorcycle m6S6MEQgo2E_2 motorcycle m6S6MEQgo2E_4 motorcycle m6hQABEUkQQ_4 boat m6z3sbKYwcc_3 bus m6z3sbKYwcc_4 bus m669S-54lMc_0 motorcycle m669S-54lMc_1 motorcycle m7djLwb_a5k_0 car m7k5fJXTZPI_5 bird m7xUarlXKEw_0 umbrella m7xUarlXKEw_4 umbrella m7xUarlXKEw_1 umbrella m7xUarlXKEw_2 umbrella m8B-pb1I7nc_0 cat m8YA8dXocmg_2 boat m8t6gPBCxr8_0 truck m9HGLakPqSo_1 bear m-NEL2Jq0nQ_2 car m-dKTMwfPqo_0 truck m_JHW_eCKY0_0 umbrella m_dOsn1chuA_1 bus m_dOsn1chuA_2 bus PCC9sJ4Gdxw_0 car PCeoeGBYrJU_0 dog PCqa_yHJ32g_2 bicycle PC2plr6JdQg_0 umbrella PC_wbEzLNLQ_0 bicycle PC_wbEzLNLQ_1 bicycle PDU92To89cE_1 bird PDlKUKo06lI_0 knife PDvSiH5Pf_0_0 bus PEC7E1t79A8_0 car PEJFRzyvIBc_0 bird PEJvGdLGOjU_0 zebra PEY59JrOz5I_1 bird PEY59JrOz5I_0 bird PEfpmwboH3w_0 bus PEtsR4S5Zzg_0 bicycle PE_zE5T1ayo_0 cat PFJiRWGaPaw_0 car PFJiRWGaPaw_1 car PFa_RCiQVjA_0 skateboard PFjuIzuDmJs_1 knife PF8HAptOIC8_1 car PGEM0ys1sGE_0 knife PGMimFwsl54_0 cat PGP0PEOv3zw_2 bear PGP0PEOv3zw_0 bear PGipyYSRHso_0 bicycle PGn623RKWNA_1 car PG8bMx6DuSo_0 knife PHeQ1xoUBgg_1 boat PHmnvFIAtHo_0 bus PHxuey2u6UE_0 skateboard PIDvuyKFIJ8_0 cat PIT2XsuODRE_0 bird PIa767e6xuQ_0 cat PIkhnCxrF9g_0 cat PInIdEVTPn0_7 truck PI5ROW9ewOg_0 cat PJoSJpMWo0Y_3 skateboard PKTJIVIuSFw_0 truck PKZXF6Hj0kw_0 bird PKZXF6Hj0kw_2 bird PKZXF6Hj0kw_1 bird PKtfgOMwx4A_0 dog PK-4bXZDtlA_1 skateboard PLO2xY76oh4_0 motorcycle PLVEvFhXHAE_0 truck PLVEvFhXHAE_1 truck PLd8HlO4HYo_1 cat PLd8HlO4HYo_0 cat PLwQ0AHwZgg_1 skateboard PLwQ0AHwZgg_2 skateboard PL2FcMREy_0_0 bicycle PMRnsvlMF4A_0 skateboard PMUqAknVm2Q_0 motorcycle PMXmKup8jy4_0 boat PMkiPjm9XdY_1 motorcycle PM028PEyjv0_4 bear PNpDnymoq8w_0 truck PN6PB668zV4_0 truck PN86cQumWDU_0 motorcycle PN_b6R9HxwQ_2 cat POQalChDjmU_0 skateboard POW6F8MZMTQ_1 bird PO-OnjGHjDk_0 bus PPI6aG2QFaM_0 bird PPdV273cZC8_0 skateboard PPhYyYHNaQ4_2 boat PPhYyYHNaQ4_3 boat PP5_L_EZsmE_0 bird PQI2zG7I8jI_1 bus PQjM0fGHXds_0 bird nARlDpJ1mzQ_1 dog nAmX6FEKmTg_0 truck nAsHFcuT16U_0 skateboard nAsHFcuT16U_1 skateboard nBLWjCuzp2g_0 dog nBPhMvA4QIs_0 dog nBXKLM2hLN0_1 car nBtF1BDR8wE_0 motorcycle nCKBmlhUPYg_0 cat nCPhfqQsjIQ_0 motorcycle nCPhfqQsjIQ_1 motorcycle nCe_XQHu77g_0 truck nCgjbB7wxoE_0 bus nDsb271W8XU_1 car nEFtdboPB2w_1 bear nEIawnnD8V8_0 truck nELgP3wAnm8_0 dog nEM7mY_k1_4_0 boat nEVFHD_9xCw_1 bird nEtqWL5nz_U_0 bus nEtqWL5nz_U_1 bus nEyJKW3bMCc_0 dog nE6lY5G16lE_1 bicycle nE6lY5G16lE_2 bicycle nE6lY5G16lE_0 bicycle nFQvQPqMjpk_0 car nFZrdv6K4pg_0 motorcycle nFa5TGw-b5Y_0 bicycle nF28ACSGHM8_0 boat nF444n6UUJE_0 bear nF444n6UUJE_4 bear nGQ3Hq6P5tM_0 car nGnDoylbNm8_1 bear nHAF0LI8CPk_0 truck nHAF0LI8CPk_1 truck nHAF0LI8CPk_2 truck nHApjxTb0fI_0 umbrella nHAt_MmKZtA_0 dog nHRioXgb-Fo_0 bird nHbHOfTnrtg_0 dog nHbHOfTnrtg_2 dog nHe8j-osZck_0 dog nH9AXssn9vw_0 umbrella nIIQLgiJpz4_0 motorcycle nIqnT8pJFz0_0 knife nJF2wWsJCd8_0 cat nJuhir_bIpw_0 cat nJ6iwd_XQso_0 umbrella nJ6uR6SE01w_0 bicycle nKM_iCO6bKs_0 bus nKS1tzA_Hrk_0 skateboard nKUBzJ38GgY_1 boat nK-2zxkNCuA_0 cat nLED5Us6rMo_0 motorcycle nLL3PMe48dQ_0 boat nLXX8_SfZs0_0 cat nLn2LN33uxg_0 cat nLx78Uv2dmc_3 skateboard nMbLyO3605c_0 knife nMo_-oHL7bU_0 knife nMtxrG4hH5M_0 skateboard nMyhi847s6A_0 knife nNNF1j89RS0_0 bear nNScwJL6ym0_0 motorcycle nNeaR2o9KMY_0 boat nNwEBFJZT8U_0 bird nOe7o_AaOUs_3 skateboard nOfyHwhf35s_0 bus nPBFLS60OYk_5 truck nPhpYRGfHlw_0 bear nP5wigEk-3A_3 knife PQsHE_w_Q5I_1 knife PQuYVLwcT7k_0 skateboard PQ4gPP2l3RY_0 bus PQ9ZEkeKIzs_0 skateboard PRIJbfolHpE_0 umbrella PRIw6kIS_oM_0 motorcycle PRg6CE_exgE_2 dog PRoAGpjxUIQ_1 dog PSdh0lzfg3M_0 bus PSrvUaBxbgU_0 motorcycle PS_CABKe3Yk_0 motorcycle PTKnZd28Sac_2 dog PTORa3OCyoU_1 truck PTxm2ZRQbNg_0 skateboard PTxm2ZRQbNg_5 skateboard PTxm2ZRQbNg_1 skateboard PTxm2ZRQbNg_2 skateboard PT2XxI2FufM_0 bus PT3felQmrwU_1 bear PT6KXLLxhes_0 bird PUFo51ngpe8_0 bus PUeS5CCMoa4_1 zebra PUgpXWoI6nw_2 bird PUiSf8EuinE_2 bear PU3x1IpbndQ_0 knife PU5v_AtaKKw_9 bird PU5v_AtaKKw_2 bird PU5v_AtaKKw_3 bird PU5v_AtaKKw_4 bird PU5v_AtaKKw_5 bird PU5v_AtaKKw_7 bird PU-lRdkaqdg_0 cat PVV-saboi8Q_0 truck PVXtjPyNMms_0 dog PV6mXKbH058_0 skateboard PWIWGwJZENs_0 dog PWQGxn3c5iQ_2 knife PWQGxn3c5iQ_0 knife PWs7zuWiKZo_0 bus PW7XGdRhgKI_0 cat PW97rAj3_84_0 truck PXb9PHJghpA_0 cat PYH5FxLfm3M_0 bus PYOwGQUBJXY_1 boat PYWfE8WhDKk_1 knife PYohJALR7DA_1 motorcycle PYohJALR7DA_2 motorcycle PYsiftgJNrs_0 motorcycle PZEun35Hcoo_1 dog PZNXXWorkrY_0 motorcycle PZSGccVPUm8_1 bird PZjQiLyqHkw_0 truck PZoM9dv8P3A_1 bear PZuGSUZ1N2w_0 skateboard PZz86aIvTWU_0 skateboard PZ3PfRXk2rQ_0 cat PZ9YkHds_00_0 dog PaVPMVUQwtM_7 boat PaVPMVUQwtM_2 boat PatPjxyHqvY_0 boat PbPu-cnEMqo_0 cat PbUb1IktyM0_0 motorcycle PbdnWP3AnKQ_1 person PbhIhdwp7nI_5 knife PceERP83N7g_1 dog PceERP83N7g_2 dog PdER58jIvPg_0 cat PdRRvS5p7TM_4 bicycle PdRRvS5p7TM_0 bicycle PdRRvS5p7TM_1 bicycle PdgOy1B6ByE_0 person PdgOy1B6ByE_1 motorcycle PdkRSALRJOE_1 truck Pdne4jISJMk_0 bird PeXODrjPJpU_0 motorcycle PecvaJstdYE_0 knife Pejvg4LHBXw_1 skateboard Peur7tMeMNc_11 bicycle Peur7tMeMNc_12 bicycle Peur7tMeMNc_20 bicycle Peur7tMeMNc_21 bicycle Peur7tMeMNc_5 bicycle Pew5sug67ao_0 dog PfKS2L_bxBc_0 cat PfOYq_uyVF8_1 bird nQPFPYvmWtU_0 skateboard nQd33JTaurM_0 bird nQd33JTaurM_2 bird nQmH_VIOI4o_0 cat nRG70FCdevw_0 bus nRP28gcIe5Y_0 bird nRP8SwdbUGw_1 bear nRr5gMvJ77k_0 skateboard nSgcLfwMJu4_0 dog nSvaQz0i9i8_1 skateboard nSvaQz0i9i8_0 skateboard nSz_BdDSYsk_1 bear nS_SY6iDJ2U_3 bear nTjbCPXR408_1 truck nTjbCPXR408_2 truck nTjbCPXR408_3 truck nTjbCPXR408_4 truck nTz3LA23B4U_0 skateboard nUBgjOAcKBw_0 truck nUDvay-MfVs_0 truck nUVSuT7wfDs_0 motorcycle nUdbTm-FW0I_0 bus nVAOU6r15Ww_3 knife nVTMM3F16j0_1 boat nVi9QbrUrjE_0 motorcycle nWvR8fiLxGw_0 truck nXD-zvpjC50_0 car nXG_fwbJQ-E_0 car nXjIIWFPSd4_0 cat nXlSVy8CmMk_0 truck nXpq0p9VBXc_0 boat nXqE-XROi78_0 bear nXqQPuJmTZo_0 cat nYYFquwhxeI_0 cat nYqRuOF_Uao_2 car nYqRuOF_Uao_0 car nYqRuOF_Uao_1 car nYut3zBSbuM_0 bear nY0xtzTME34_1 cat nY2XarSrm7Y_0 boat nY2XarSrm7Y_1 boat nY3BS_3Mq6o_0 motorcycle nY3fRfvoh9w_4 bear nY3fRfvoh9w_0 bear nY_icz32gn8_0 cat nZHGbmVkhrE_0 cat nZn4xAbcGSk_0 cat naE1svJuCTw_0 truck naE1svJuCTw_1 truck naR-9rNf5fE_0 skateboard nalqTKM6890_0 umbrella nalqTKM6890_1 umbrella nalqTKM6890_3 umbrella nbCix4zvF_E_0 umbrella nbcH6NfapD0_0 boat ncuqh0iglYU_2 skateboard ncu8gbqMkMc_0 cat nc9aHs1_xzs_2 motorcycle ndBPYFAVIiM_0 bird ndJ2_mPZktw_2 bear ndJ2_mPZktw_1 bear ndMfXyYPfAM_0 bird ndNs3q8tY9U_0 bus ndO2b-r-Krs_0 motorcycle ndO2b-r-Krs_1 motorcycle ndj7VTH_PhE_0 bird Pfi9ZEQtgjY_0 knife PfnFeL4ArA8_0 skateboard PfpTZKfKeKY_2 truck PfpTZKfKeKY_0 truck PfpTZKfKeKY_1 truck PgBMaMqbYqA_0 motorcycle PgE6BAQmVQQ_0 umbrella PhFFfxYo2_o_1 dog PhJOcszed6A_1 car PhJ5rQ5VmeY_0 skateboard PhjPRYTcJwQ_1 car PhyQoxFlTMU_0 truck Ph8Vag9VxRU_0 zebra Ph8Vag9VxRU_4 zebra Ph8Vag9VxRU_1 zebra Ph8Vag9VxRU_2 zebra Ph8Vag9VxRU_3 zebra PiO6F4X8k_M_1 truck PiO6F4X8k_M_2 truck PiRy-T8d0gQ_1 skateboard PiRy-T8d0gQ_0 skateboard Pi_aEuQD5gA_5 umbrella Pi_aEuQD5gA_8 umbrella PjAWqdid4rw_1 umbrella PjBOLvrlicY_0 car PjBOLvrlicY_1 car PjBOLvrlicY_2 car PjTtsfl7KZ4_0 cat PjjO6IaSiuo_0 skateboard PjjV-pCjgqc_1 bird Pjk0d9eP2gI_0 dog Pjm-ptGWuWU_0 dog PjpGwiZ8mK8_0 bus PjuUsIXzSzQ_0 truck PjwfhUvbBNI_0 skateboard Pj9588RHCHM_1 car PkktNSL9IjE_0 bird PlKRGU_XIzs_3 boat PlfCXfMXcs0_0 skateboard PlfCXfMXcs0_1 skateboard PltDcKetGYw_0 knife Pl6ja9eNHzE_3 skateboard Pl6ja9eNHzE_4 skateboard Pl6ja9eNHzE_1 skateboard Pl6ja9eNHzE_2 skateboard Pml224S87BE_0 bird Pm_2At7P8Yo_0 bus Pnt2XmUpT8Q_1 bear Pnt2g-tHwK4_0 truck Pn1VFdKk5vQ_0 truck PoL9E8Yc2vo_0 car PoL9E8Yc2vo_1 car PoUPC9WCdiE_5 dog PoV7Wn66UTo_0 bird PolaH6r1Qds_4 truck PolaH6r1Qds_2 truck PpI7DZdWcfc_0 person PpZHxI0N3Wo_1 motorcycle PptqwylntWQ_1 boat Pp6vch1kMqE_0 cat PqJOWTjp0ww_0 cat PqKlF5nnOFs_0 motorcycle PqNDvGH2-iM_0 truck Pq7tfwAqhIM_0 motorcycle PrV4kyVAwWE_0 bear Prynn7mNQdQ_0 knife PsVhOsDIopI_0 umbrella PsfddppUmSk_0 skateboard PsgPXqr-N7A_0 skateboard PsvVwYAeKEc_1 boat PsytJKFxV8c_0 boat PszGWhekz-Y_0 umbrella Ps9ReRjYLVk_0 bird Ps9f-iFqX4M_0 skateboard PtL5k4ew4q0_0 car PtR7vRI9mn0_0 motorcycle PtVUPVUYld8_1 skateboard PtnFOxat4hE_0 bear Ptq7-B4P9Bw_0 skateboard Pt04IRhfVFk_0 boat Pt1vVuKH3fk_1 skateboard PuV7SV-FwOU_1 skateboard neA0T50G8TU_0 car neA0T50G8TU_1 car neA0T50G8TU_3 car newqX6GTbrA_0 car ne8K6jHnOT8_0 boat nfnKsQItZjE_0 skateboard nfxMe31pjec_4 truck ngAKsr62ACQ_0 knife ngOtFD7Fxd4_2 boat ngZtMG--t4I_2 bear nga4aEZQhJw_0 knife ngslQPG3kEI_1 bird nhI3C5y85gw_0 truck nhdMHfvazLY_0 umbrella nhoO0Evj7OQ_0 umbrella nhoO0Evj7OQ_2 umbrella nh56dQ3T3Mc_2 boat nh56dQ3T3Mc_3 boat niBK6HGH16U_0 cat ni3trEPOXck_0 bird njBEUyoUzlQ_0 bird njK1OLFCvv4_0 cat njMC5HAlnMU_1 umbrella njnGmGuXNdE_1 knife njn4TkIDn0k_0 truck nkJxMYiG9Ho_0 bus nkSvwnLvBmw_4 motorcycle nkSvwnLvBmw_0 motorcycle nkSvwnLvBmw_2 motorcycle nkVPvJ3Smrg_0 cat nkZ6NDOt4r4_0 cat nkv5eof4q_M_0 knife nlAePf94uwk_0 cat nlupdJzbyKs_1 bird nl83jp96h9s_2 knife nmmeE-Dfds8_0 bus nmwFYDopqBc_0 dog nmwFYDopqBc_1 dog nnIGNFEnlw8_0 car nnNkJ09YO9M_0 motorcycle nnUkcXbXbFM_0 umbrella nnhUxSjBHP8_0 umbrella noCrLkdGSXw_0 bus noGmFOxKIr0_0 person noIHydna8tw_3 truck nonoyrFpKVA_1 zebra nonoyrFpKVA_4 zebra nonoyrFpKVA_5 zebra nonoyrFpKVA_0 zebra nosbeVXMgAk_0 knife nqN2uJfit8o_1 car nqPkd_Quci0_0 truck nqWs5hqd8Ps_0 bus nqbsnsBZULc_0 truck nqnjh-NO9go_0 bus nq8oHNlU_BQ_0 truck nrJURcGigjE_0 motorcycle nrlcROgdPlI_1 cat PumbYcoJ5zE_0 truck Pu8rYMOC0Iw_0 dog Pu_KMtdCGZY_1 truck PvSzSsQ4YCY_0 cat PvuGk2XhJW8_0 bird Pv77ig8kBgE_0 cat PwBNm2_oKbQ_1 zebra PwE-w-S8nQc_0 cat PwRb6q11-rw_7 bear PwRb6q11-rw_0 bear PwRb6q11-rw_3 bear PwRb6q11-rw_4 bear PwRb6q11-rw_5 bear PwgxDMnN1SA_0 truck PwmBtcc64nM_0 zebra PwmBtcc64nM_1 zebra PxN14d54as8_0 truck PxOYpOxjFFc_0 cat Px02MS-Ywo0_0 knife PyevrWYsc8k_0 motorcycle Pyr-sHCH2wc_4 truck PyvyP3J13FI_0 knife PyvyP3J13FI_2 knife Py6rKt-beyk_0 knife Py-bAIGcQ1Y_1 boat PzZ-Jr7jMk8_0 bus P0S7eBa6_S4_0 dog P0e6zPkZO5s_1 knife P1_bfvyTku0_0 truck P2NRNopueuo_0 umbrella P2SgXG0mMWU_0 truck P2Wv0vXNCqQ_0 zebra P2kLj1DZq3I_1 bird P2kLj1DZq3I_0 bird P2ldC-_7nrs_1 boat P256TqMIJZk_0 dog P3MLJSbWlpg_1 motorcycle P3jB1tXpVMw_0 bird P3q6jIrZyo4_1 dog P4jpdzY2as8_0 dog P43doVXj3y0_0 cat P5DcP_VLnP4_0 bear P5Gd_8k2O5s_0 truck P5VAaJj-1Rc_0 dog P5kFeiFmPxw_0 person P5xsJqm2v6c_1 motorcycle P5xsJqm2v6c_2 motorcycle P5xsJqm2v6c_0 motorcycle P5yrLRVD86M_0 dog P6Qm9u9GIE4_0 motorcycle P72vKWjKtik_0 truck P741OzHLvig_0 dog P8BX8WSWRm8_0 bus P8K2yXmSMwY_0 bird P8MCMBcqM00_0 motorcycle P8MCMBcqM00_1 motorcycle P8h9iD7kPRQ_0 bear P80sglFzhRI_0 bear nsS9iSqNMew_1 bus ntO6br-N89w_0 cat ntVDuucoRIk_0 cat nuVxM9m1nb8_0 motorcycle nuVxM9m1nb8_2 motorcycle nvIi1SvX-sU_0 dog nvXKI_MhTTE_4 knife nvYTcYLFUvc_2 dog nvdIoQ5mj64_0 knife nvxwnGRXwZY_1 dog nxJkhdCqhc0_0 dog nxUe9yoeHvs_0 bear nxYGMvfgi8g_0 person nxj_aavOM50_0 boat nxmr9gg0ses_1 bear nx9Uisdggps_3 knife nx9Uisdggps_0 knife nyOaHbw3DLo_0 cat ny2pC-BfLT0_2 dog ny2pC-BfLT0_0 dog ny2pC-BfLT0_1 dog ny3nZLL4cQ0_3 motorcycle nzGPh9yFDTI_5 truck nzQqdKnkQ9I_0 zebra nzppX26-51c_0 boat nzytVTFaYvs_0 knife nzytVTFaYvs_1 knife nzytVTFaYvs_3 knife nz9DMQ9cPrw_0 cat nz_YTLNErSY_1 truck n0P8wVonqY4_0 motorcycle n0T51DP8868_0 bird n1VbuQk_3JY_0 bird n1ZrqU8VSBA_2 bus n2Xd8e_vz0w_0 cat n2Xrvmq2r2I_0 cat n2jvWkboChM_11 bus n2jvWkboChM_10 bus n2jvWkboChM_14 bus n3EKpxnV5U8_0 car n3bFZVLqNvI_0 umbrella n3iNRmzhO1U_0 motorcycle n3pRNFU0ovc_0 bear n3pRNFU0ovc_1 bear n38NmPI7Sss_0 boat n4cdQF8d8UI_0 knife n4mWuEmbbEM_0 bird n5J7UxAi_70_5 car n5J7UxAi_70_1 car n5J7UxAi_70_3 car n5J7UxAi_70_4 car n5i5aZXPgok_1 bus n5ojrsEczYM_1 truck n5wZ3Zin9uQ_0 bus n5wZ3Zin9uQ_1 bus n6cpTMT-Ci0_1 car n6sMWDd_j1c_0 cat n6wMhru1Mx0_2 car n7HaOXaXWJw_2 truck n7NWTiq_W-c_0 boat P9sfOBt9FI8_1 bird P95Pyq4kglE_0 knife P95Pyq4kglE_1 knife P-EecPZ9zV4_0 motorcycle P-JbMZ89Hac_0 car P-SIr3rYBzg_0 umbrella P-lf6syyjAs_0 cat P-tXkGlSa_8_0 motorcycle P_A56tkbbmk_8 umbrella P_A56tkbbmk_1 umbrella P_A56tkbbmk_7 umbrella P_un1_qBDWo_0 umbrella QATQMMA9vo4_2 motorcycle QATjEG1LPL0_0 bear QA4LOoc1Crg_0 truck QA__knfzZZM_0 bird QBZUbx6SUyU_2 bear QBbAz7q7E9c_0 bus QCDUv9KNiWQ_2 dog QCKzW_uA3vY_0 motorcycle QCl4OGNJdos_1 bus QCqvd4xHZLs_0 cat QCzgTA2cABU_0 boat QDQgSF9ciHk_4 knife QD4ioxu8LAk_0 cat QEMoyw7o_f8_0 dog QEQfoQOU_F8_1 bird QFB5gDukoqg_0 bus QGDhzG35q8c_0 dog QGDhzG35q8c_1 dog QGDhzG35q8c_2 dog QGDhzG35q8c_3 dog QGFSTul5MDQ_0 knife QGcd6O1NAkY_1 bus QGcd6O1NAkY_2 bus QGv8jcDgmBY_0 motorcycle QG25-t2CqY0_0 bus QG5tLrHw5Hk_0 cat QHVkPy7f680_0 car QHVkPy7f680_2 car QHhXgNBSjV0_0 umbrella QH2Vo_5h-x8_0 car QIe7ky6mJO8_0 bear QIqf221MKYo_0 bird QItwshU9sAQ_0 car QI65w7sMLtA_0 cat QJIgRLU_fU8_0 motorcycle QJfS9bR2S4I_0 cat QJsyPZ31U-0_0 cat QKG7PXh0UoU_1 bus QKG7PXh0UoU_5 bus QKG7PXh0UoU_6 bus QK9WWQe1WQU_0 bus QLTztdEJ8Ts_0 motorcycle n7dIhGKEzWM_2 boat n7hFNcaW9rw_0 knife n77hlwjlW_Y_0 dog n8IsRKE9S6k_0 motorcycle n8kFOAqnMao_0 motorcycle n9RozRHi7iI_1 knife n9RozRHi7iI_3 knife n9xiuvCd5Lw_1 bear n-fT4fcLulk_0 bear n-fT4fcLulk_4 bear n-gEIxTHjBk_3 bear n_EpRXVan0M_0 cat n_J23TUQdl0_1 bear n_PRUX4zrLw_0 car n_bIC-prc2E_0 motorcycle oARh23g1-LA_0 cat oAhYK7brhk0_0 dog oAhYK7brhk0_2 dog oBDdj5mkGyc_1 knife oBraEPvaSi0_0 bird oBuzx2dwA_Q_2 knife oBzhDbxL57k_0 bird oCUkN7ySpf8_0 motorcycle oCZ3WCK5BZU_1 motorcycle oCf-LgXx6Dw_0 bird oDHO9J7vFwI_0 boat oDUJYHwNuS8_0 bus oDsRL8dvgLA_1 bus oDsRL8dvgLA_2 bus oF81nMQlA-4_2 umbrella oGMlnXjD9R0_0 bird oGuIyQiDsy0_2 boat oGuIyQiDsy0_0 boat oH-XJADp0FM_1 bear oH-XJADp0FM_2 bear oH-XJADp0FM_4 bear oH-XJADp0FM_5 bear oI5l1By4H7U_0 car oI_peuU5xk8_5 motorcycle oI_peuU5xk8_0 motorcycle oI_peuU5xk8_3 motorcycle oJD17uQnW_o_0 dog oJK_TUb7HoQ_3 knife oJLVcOe7CEU_0 motorcycle oJervxxOCvY_0 dog oKTgwWf3FKA_0 dog QLxMt8F3oYA_0 cat QL4uK4sZxIU_0 cat QL-hkYCV0BQ_0 motorcycle QMEIKO8LcEU_0 motorcycle QMGNMAZLRFY_1 knife QMGNMAZLRFY_0 knife QMHCb6-qyQE_4 bird QMHCb6-qyQE_0 bird QMHCb6-qyQE_3 bird QMJHMIdkS0w_0 boat QMVKAdAOrNY_0 dog QNUGl2q9luk_6 dog QNVeq1dY-gY_0 bus QNV_xE7TePM_0 umbrella QNV_xE7TePM_1 umbrella QNaFT-Ch0Oc_1 bird QNgnQe-MASw_0 bus QNgnQe-MASw_2 bus QNibPLG3_Q0_0 dog QNibPLG3_Q0_1 dog QNibPLG3_Q0_2 dog QNrg73bCl7M_0 bus QN5joVuigKw_0 dog QOCUHjNieAs_0 cat QOGKQmMhYE0_2 knife QOQU7N2vIdQ_0 dog QOcPhbRnGh4_0 bird QOm8zog21wI_0 bear QOp31EvHfRU_0 cat QOs2s2r3hpY_2 bird QOs2s2r3hpY_3 bird QO1T0Gc_cJk_0 bird QPwnbNFbZyY_0 motorcycle QQAQLPTkDwg_2 bird QQAQLPTkDwg_0 bird QQh4Cpr7tpM_0 bear QQ7EaN8ArmM_0 motorcycle QQ-MUe-ni48_2 motorcycle QRXtuZBCXtA_0 umbrella QRZ_xQK1gx8_0 bus QRZ_xQK1gx8_1 bus QR3BO_SYrpQ_0 bird QR5EuXvYbms_0 car QSK1oOt_5R4_0 knife QSld_dZQvpY_0 bear QTPAOir-oYM_1 knife QThuW0gGa20_0 dog QTlzTtcPjwk_3 car QT0-oUhQtbk_0 dog QT17xRXmBGA_0 umbrella QVCd5pTgbds_0 boat QVRM0OueKFY_0 dog QVXv0Z1FCdg_0 motorcycle QVXzwEenImE_0 bus QWBwnViynQA_0 motorcycle QWFR4XdQv2Y_0 umbrella QWPkooq95So_1 knife QWPkooq95So_2 knife QWSsyFwwdO8_0 dog QWl839SnUOs_0 dog QW1BlOtH1bo_0 cat QXAw2xD7Sgc_0 motorcycle QXB7sLTVqfM_0 bear QXIGeVZ6Uqk_0 bear QXVQ8S7aUB4_0 knife QXjfaOwHSFo_1 motorcycle QXwh-lAa3Pk_0 knife QXwh-lAa3Pk_4 knife QXwh-lAa3Pk_5 knife QY2pVib4cZE_0 motorcycle QZOPux7sysI_1 dog QZOPux7sysI_0 dog QZhaeUKdGYk_0 motorcycle QZpfX1aipco_1 car QZui5buTy7k_0 bus QZ3FD2qszF8_0 motorcycle QZ3MWq6qwJI_0 bus QaGjoVfIWLQ_0 motorcycle QaM6ny5gEFQ_0 cat oKY-KsLfJe4_0 bird oKY-KsLfJe4_1 bird oKbCNTwLJoI_0 dog oKe3Rcvn_TU_2 cat oK9TjDSQdSs_0 cat oK9erjaiRq4_0 bus oLRDfgRIJ-A_1 bus oLSjl-qN4M8_0 dog oLrou9S3K-0_1 motorcycle oM_FQGUvPIk_1 motorcycle oNFmLa8pU3A_0 knife oNLkf1j-v6Q_0 cat oNZOg6XoSrY_1 dog oNbWPkOIdxg_5 car oNbWPkOIdxg_4 car oNyfqJGJhrY_0 motorcycle oPhE3ECqxf0_0 bear oPlhh62giKI_0 car oPrG5_acHVU_2 bird oP0yHq-dlRY_0 motorcycle oQV827pXDXA_0 motorcycle oQXdls5ffZc_2 bear oQXdls5ffZc_0 bear oQXdls5ffZc_1 bear oQ7ARK51eHE_1 dog oQ7ARK51eHE_0 dog oR-7d677bYw_0 motorcycle oSPVZs6_Bd4_0 motorcycle oSVes8uNT5E_0 motorcycle oSao8txZd7A_0 motorcycle oSb17xrITtY_0 motorcycle oSqq5UHBveo_0 bear oSxoAvNHNB0_0 motorcycle oS60CV9BFs8_4 bear oTYr-qD5JOE_0 bird oTj1e8RI67A_0 boat oTlwKNdm3rE_0 dog oTuVBf1jiPM_3 bear oTuVBf1jiPM_0 bear oUHa0FV0wwM_1 dog oUVJrf3WBrs_1 bus oUVJrf3WBrs_3 bus oUuQYVAvtgs_0 bird oVUE-0XhhsQ_0 car oVUE-0XhhsQ_2 car oVUE-0XhhsQ_3 car oV1vhE0ypUE_0 cat oV6wthYHnKA_3 knife oWFO_yss01s_0 cat oWI2O83zUJk_1 car oWI2O83zUJk_0 car oWYSJgX0THI_1 dog oXMW3YjDAqQ_2 boat oXaieymppqU_0 cat oX4YRc-No7Q_0 dog oYY_svQfTs0_1 boat QahJqWjC1v0_0 motorcycle QakBz4K6hqw_0 umbrella QbHAXTRKk8w_0 knife QbHAXTRKk8w_1 knife QbNU92uEUSc_0 cat Qbk_YIfY5q4_7 knife QcLZ-b-0PxY_0 boat QcU2S6m_GJk_0 dog QcuHNJWb-AY_0 car Qc0kbcpophI_0 car Qc5ZW-ni9ZQ_0 boat QeRfpcI_TTQ_0 bear QebJi8pjWkk_0 car QeeG_4eNyg0_0 dog Qe1-M3oVaFs_1 knife QfOdxYnCAKc_0 bear QfOdxYnCAKc_2 bear QfaVCQOGlMM_0 motorcycle QfgJh_s9H0I_1 bird QfgJh_s9H0I_2 bird Qfr5Fc1k7Ic_0 knife QfwCa3YapRg_0 cat QgRbpAz8TuI_0 bear QgRbpAz8TuI_5 bear QgRbpAz8TuI_2 bear QgXjMUMIe4Q_0 cat QhbwOw5dHPg_0 cat Qhc3Bb_6Uq4_1 motorcycle Qhc3Bb_6Uq4_0 motorcycle QhnEXqWFBuw_0 bird Qhxv39Tkzbs_1 dog QiHJ2uYByjM_0 motorcycle QjV-g1D6Be0_0 motorcycle QjV-g1D6Be0_3 motorcycle QjV-g1D6Be0_1 motorcycle QjV-g1D6Be0_2 motorcycle QjdGUh1FtN4_1 bus QjqhhoIx6nQ_0 boat Qj4Mfd45GOE_3 bus Qj4Mfd45GOE_0 bus QkPH2LBso5c_0 umbrella QkPLEWaH1bo_0 cat QkkuZ_G7t48_0 boat QkwI5-_QspU_0 cat Qk6G7eAHlCs_0 dog QlcaO8pkzd4_0 bear QliTvc637Yk_2 boat QlieDL9xPyU_1 motorcycle QlxQKy1yzyI_3 motorcycle QmP4xj9S0mQ_0 motorcycle QmR3bvWDA1s_0 boat QngGa73C1G8_0 cat QnnV6lKKIgI_1 knife QnuD7a8BM30_0 dog Qn9CU5O4FHU_0 bus Qn9Z0LVIxbo_0 car QoTopiP9k2o_2 bus oZLdU13R4uU_0 motorcycle oZoTyJNjCJI_0 bus oZ6Py8Tx-sA_0 dog oZ9qkN9Q1X4_1 bird oaXGm1MdDoA_0 cat oajaYAOs_oI_1 knife oa_73oVbH38_0 bird oa_73oVbH38_1 bird obbzKGrHOP0_0 bird ob70dcN35yg_0 bird ocNVbpQhB5g_0 cat ocPgZeXuFqs_0 car ocj3mV2T-ls_1 bird oc4RRoFoUo0_0 boat odsCgfz0yM8_0 motorcycle oeIBPeBAEv8_0 dog oeVUkEvC3To_0 boat ofDmsqy24k0_0 car ofJOKOICGco_0 motorcycle ofvHImJKiAg_1 bear ofy3Sid451s_1 bear ogIewcLFxLo_0 dog ogLOXI-Kvcg_0 knife ogzWVQ5TC80_0 cat oh7uEf_YE40_1 dog oiItk_51540_5 motorcycle oiKC4SxYNJE_0 bus oiRnmB7WQjQ_0 bird oiu_53B5AAc_0 motorcycle ojFBoKltgfQ_0 bus ojFBoKltgfQ_1 bus ojFBoKltgfQ_2 bus ojQfL_XgMM0_2 boat ojz2xLrH-Ts_7 car okKrvzNb9IU_0 car okiIzmV8YLw_0 cat okiIzmV8YLw_1 cat okzrd8v1G-w_3 boat omGx_muz0SY_1 boat omngVtTFM1I_0 umbrella oms2XkgghV8_0 boat QoqeX-W0RFw_0 boat QoqeX-W0RFw_2 boat Qo0mxFOMVGc_0 dog QpAWeYA1pc8_0 car QpDm5g1dELc_0 bus QpD7CVh2Z_c_3 knife QqdW9IMDHgs_0 boat QqdW9IMDHgs_2 boat QqdW9IMDHgs_3 boat QqhZnuITXs8_2 bird QqhZnuITXs8_3 bird QqkblYN1YOg_0 bus QrEjYyinITM_0 car QsQFhUd04jI_0 motorcycle QsQFhUd04jI_1 motorcycle QsV9BTogrKc_0 knife Qt78_24lkeM_0 boat Qu8xNQ6Vd04_0 cat QvgmjwKuAeM_0 umbrella QvqNodq3NxA_3 bear QvsjDkJ_oho_0 cat QwALBOsUby0_1 knife QwYxgsacjx0_0 knife Qw9UvjSO9_Q_0 bird Qxx3WjrGmtE_2 bear Qyc0xSSPT1E_0 dog QzCvBtKWPjg_0 person QzPFEeJYDcE_0 umbrella Qz1R2sk37qg_3 bear Qz1R2sk37qg_5 bear Qz1R2sk37qg_6 bear Qz1R2sk37qg_7 bear Q0HX6Jfnnb8_0 bird Q0J1QbF_Vis_0 bird Q0KhMTnvbxM_0 bus Q01P6P7bm7E_0 motorcycle Q0-7SsSXMV0_0 knife Q0-7SsSXMV0_2 knife Q1RqyDERgxM_1 bird Q1VXWNHzPqI_1 cat Q1VXWNHzPqI_2 cat Q197NAaQodY_0 dog Q2Sop28spdM_0 knife Q2bha73kLKM_0 motorcycle Q2vBCDtNAGI_0 motorcycle Q2zRXVl7bLI_0 motorcycle Q3ZxsgPKTGY_2 bird Q3ZxsgPKTGY_3 bird onoO4tamBlA_0 knife onpRejbK_VE_0 umbrella ooJg7-nxmUw_0 motorcycle opOHceUyoXk_0 cat opb_qoqO05s_0 bird oqUbqkDsSzI_1 knife oqvnxRx-0J4_1 bird oq4KPP5PYAo_1 motorcycle orQkUDPfTg8_0 boat orTFjuPHzxU_3 dog orcE_uPKO_c_0 bird ormZXNXni-U_0 dog osYgSn6yOG0_0 cat os3H6KzvGEg_1 knife otHFt4YAKeI_2 dog otvQKWvIXAE_0 bus ouFwG2YU59c_0 motorcycle ouNsmVT6GRU_0 car ouqFEe0ud_U_0 motorcycle ovHCJGK35r0_0 knife ovHCJGK35r0_1 knife ovQY7VA36gU_0 bird ovRBelXjQ-A_0 bird ovaFSf6jda4_1 boat ovnkb_MuAlg_0 bus ov9yaGUtSEw_0 bear ov9yaGUtSEw_1 bear owKiuZVov4U_2 dog owaIraEDvqI_0 umbrella owaIraEDvqI_1 umbrella owb-43QL8Dc_0 cat oxKhcqfQV7k_0 umbrella oxZ42ECABUo_0 motorcycle oxdCJK5GPS8_0 dog oxyS9oNIBaQ_2 boat oy52khlb79k_0 cat oy885M8rmDM_0 bus oy_Efqu_Zhk_0 knife o0CsAQaDp1k_0 boat o0VArHW9gpE_2 dog o0yyk1GchoE_2 knife o06poedEjtM_2 knife o1RqDbHx0IA_0 umbrella o12Lc5yZNco_1 bear o2E2ypLvzOo_1 car Q34_kBWh3QU_0 motorcycle Q4IH3ZOVKFQ_5 bus Q4TELEHdcjA_0 motorcycle Q4YD_lW8JFE_1 knife Q4afI-fku0A_0 knife Q4d0z-q-UXQ_0 bird Q4jZeoLzZXs_2 bird Q5DrYh7pcTg_0 cat Q5RabF9bK3o_0 car Q5cY3mt9NHI_1 car Q5cY3mt9NHI_3 car Q6Lg4c8W2XQ_0 bus Q7SXsNoT9cc_1 boat Q7TDTHQoPGc_0 bird Q7TZ3TlDNzI_0 bird Q7V8JjnLW_A_0 person Q7a4tWAU7-o_0 dog Q8gHTSzR6h0_0 cat Q807ZgwscUk_0 cat Q9LvGsq1Mas_2 bird Q9fbeFbARPY_0 bird Q9qA-2ofuFc_0 dog Q9qA-2ofuFc_1 dog Q-JQokKqXZM_0 motorcycle Q-STF8c8RSE_0 motorcycle Q-S6ypfxn4w_1 bus Q-VqbNMPAjE_0 dog Q_a7bRv2dM0_1 cat RAQAfTprH5s_0 cat RAc8MyscjAA_4 bear RAc8MyscjAA_0 bear RAc8MyscjAA_3 bear RAqMmf5FS_Y_0 dog RBNNklw-NjE_0 car RBNNklw-NjE_1 car RBdpxD5mMy8_0 cat RBssHo0ygdI_2 car RBssHo0ygdI_1 car RBvocl1t9qM_0 car RBvocl1t9qM_1 car RCzBVv_Vddo_0 dog RC444E40nLY_0 cat RC_ckl7o7sc_0 dog RDq9wvYEiSI_0 umbrella RD8OUO8u7oQ_0 person REBpFtJosSc_3 bear REBpFtJosSc_4 bear REBpFtJosSc_0 bear REbm5i5vhcQ_0 umbrella REbm5i5vhcQ_1 umbrella REiwqNPkmew_4 bear REiwqNPkmew_3 bear REjT99mHV_g_0 cat RFIE-agz3SA_0 dog RFUZkHtGWvg_2 bird RFUZkHtGWvg_1 bird RFZG72_XG3U_0 motorcycle RFcz2p3w1oc_0 bus RFhEq5WF9Io_0 motorcycle RFqSKdzXQFQ_0 bus o2z2zu4L1Ho_0 cat o3OdAgJnYlw_0 umbrella o3TpeQ7mhIQ_0 bear o4It_gqHKoM_0 bus o4It_gqHKoM_4 bus o4It_gqHKoM_5 bus o4bpCoFINtY_0 bird o4yKF7ZQge8_0 cat o4yxnKhoWrQ_0 cat o49yvv0vmJQ_0 knife o5TWf69h978_0 motorcycle o5bJmNSZmGE_0 cat o6vw6_1pc_g_1 person o6x94jhuMEw_0 cat o7UXYGmFww0_0 knife o8BqJTsAjnI_0 boat o8BqJTsAjnI_2 boat o8Gr9wZzcA0_0 knife o83uI_tdkrE_2 car o9UpoUWgJWw_1 motorcycle o9YqiVSTBVs_0 motorcycle o9qB9kYt9Bc_0 motorcycle o9vRwcqz30w_2 bear o98cAmKOAtk_2 truck o_BpJHlv8bY_0 cat o_NYHfqWzBw_0 cat pAP3j2UmTAA_0 car pAuz372kMrs_0 boat pAvBjM_cSCk_0 umbrella pA_f-DZ2FdI_1 bus pBj4KFDTwGg_0 cat pCPwOGObTcs_0 umbrella pCXmnj6vY7o_1 knife pCa3Tf27TcY_3 bear pCdwcy8npiE_2 bear pCfA0E-TIXo_0 motorcycle pC9mu-CQ9fg_0 cat pDjjH1_G6Z0_1 motorcycle pDjjH1_G6Z0_0 person RGT-FumEK7I_0 car RGXgv5gqM8k_0 umbrella RGiE9-CME30_0 motorcycle RG6y27UUUMI_0 knife RHHOcUqVF80_0 knife RHSfZLRz95o_0 boat RHrnX__15lI_0 car RIBigSX5_90_1 bear RImslgwYbYk_2 boat RIwUvnURoqs_0 cat RI14PaJgb7E_0 umbrella RJ95URcz63g_1 motorcycle RJ95URcz63g_0 motorcycle RKZ4YVnDywQ_0 knife RKa1tJXFTAw_1 cat RK8ZJaF2QHQ_5 bear RK8ZJaF2QHQ_6 bear RLP9M0bfpWo_0 umbrella RMapunE2wEc_0 boat RNPKsQSr2o8_0 knife ROfxuPZWET8_2 bear ROkJ79Y9T7s_0 motorcycle RPJ0SJeC5ck_1 car RPJ0SJeC5ck_2 car RPWms_VL6wY_0 bus RPhdhEKBBAM_0 motorcycle RP81F6rIP4w_0 motorcycle RQ5liX_fOJw_0 umbrella RREV1E0Mbhs_1 knife RSXIvkOJQq0_1 knife RSq71vJH9yc_0 bus RStmsJCm7mo_1 car RSztnKS1IYI_0 car RTTysK1hBpg_0 boat RTvVXaA35DI_0 motorcycle RT0tTVP14XE_1 umbrella RT0tTVP14XE_4 umbrella RT0tTVP14XE_6 umbrella pFCVfOX_UJ0_0 umbrella pGJMt9Jmk_Y_0 car pGnZDXcCjSc_0 bus pHC850dBc-E_1 car pHf0EP0QU9Y_0 cat pHueI1IUqzg_0 car pIhqwiD8cks_0 bus pJXxn2DRWyI_0 bus pJYetmKuiE0_4 bear pJj28cMLcZc_0 knife pJl14EZ6-Mc_0 umbrella pKPRv5lL_DQ_1 motorcycle pKz_g-J2O-A_1 bus pK1umZxS4nE_0 knife pLEV-uFmv6I_0 cat pLI_HgRsRow_4 bus pLQDtquQaSE_0 bear pLp7vmowqNs_0 motorcycle pMHRlQ2NxeA_1 boat pMaT7qWMaV4_1 bear pMg2xwjkfVc_4 umbrella pNHKmiurxTg_0 knife pOCvwILBOCY_0 boat pOjuNMevoaM_0 car pOq6RrgrXWY_0 motorcycle pPyL4U8gYpM_0 cat pP22coNl6r4_0 bus pP5q-Bszfh0_0 motorcycle pQMkOOTP0Lk_0 cat pSJypg6az1w_0 bus pSjKd_x9ycU_1 boat pSz961UYSrY_0 motorcycle RVvfyYc8jws_0 umbrella RXAW31Vm7pU_0 motorcycle RXQ-E6_Y__c_1 car RZAlTTj0Z4o_0 motorcycle RZAlTTj0Z4o_1 motorcycle RZL2H_-y3vE_0 umbrella RZrAehHE8aA_2 knife RZrAehHE8aA_0 knife RZ0yQkyeSd8_0 boat RaZy_JiiJ3E_0 motorcycle Ra48MJPLmUw_2 motorcycle Ra48MJPLmUw_0 motorcycle Ra48MJPLmUw_1 motorcycle RbQTcoldE8M_0 bus RbRqkcC6l_A_0 knife Rb5tGSqtlFU_1 motorcycle RcSm0O0Ylc0_0 cat RdNjlTlNbEA_0 bus RdP6hW5p6ys_4 car RdUjywh70lM_1 cat RdlWUo9fYmA_0 motorcycle Rd4TvDZNwHs_0 umbrella RfNyu5aooJs_0 car RfrtTbza00c_0 boat RgBWTOo9hqo_0 cat RgC0rdZCy2c_0 motorcycle RgFR8z8IzAQ_0 cat RgUwlXzmX4Q_0 boat RhYw3jSi0xY_0 bus Rhqz5maRjNs_0 cat Rh0zI8vpRWk_2 knife Rh7Y69j41EY_0 bus RiCptCjnrqk_0 cat RiOw5wO0xTg_3 knife Rid6twPtgIo_0 cat pTGbMPGsbCU_0 car pTSbrP23T0s_0 motorcycle pVCT-jEaSPE_1 bear pV8hPodV-zY_0 motorcycle pXBltXzZZe0_0 car pXcoix_wq4E_0 cat pZC4kceO-0g_0 bus pZJDlV5VS3Y_0 motorcycle pZ7RohF8JgE_1 knife paF1hQf-YFk_0 boat palM4nIm6GU_0 motorcycle pba0HVNnmbc_1 motorcycle pcOsY0MSbh0_0 bus pcb_jPcg_U8_0 bus pcpHHo_gp-Q_0 cat pc2aHxzJDtQ_0 cat pdDVE4LsX54_4 car pdDVE4LsX54_0 car pdDVE4LsX54_1 car pdDVE4LsX54_2 car pdDVE4LsX54_3 car pdDVE4LsX54_5 car pd0IEWCwpUY_0 bear pd1BZjvbFNI_0 knife pgKdcFb2680_1 motorcycle pg4m5Fi0Mhc_1 car Riq87Q_unPU_0 cat RjDo0UDX9Ws_1 knife RjItZnZQBKk_0 car RjqDxu3wf5o_0 cat RkSzsg-k14I_0 boat RktoQu-Wk0M_0 cat RmFxIMl1tSU_0 bear Rmpv0oMhUCc_0 bus RnEWcQNxWGY_0 motorcycle RnPY8wgKxj4_1 cat RnQ-v8AJQbc_0 motorcycle RnjU70B_0cU_0 bear RpTRF_oB1-I_2 bear Rpn1EcI_ESo_0 knife Rp8euBdhkR0_0 motorcycle Rp8euBdhkR0_1 motorcycle Rqs856i0jbs_0 umbrella Rrj0e5VSIgY_0 car Rsw947loMaA_0 cat RtSEfWF3PdI_1 knife Rtng6SCToEM_0 car RufUHX-TjyM_0 bear RvHvTQC9Kr4_0 bear RwC5kkt5VDU_1 person RwC5kkt5VDU_5 motorcycle RwVgY7zgnYM_0 knife RwVgY7zgnYM_1 knife RwYiNSlAYcE_0 car RwpY0u7t3vE_0 umbrella Rwp_dTfFI28_4 boat Rwz5T35lNgY_0 cat Rw5dzv79c-M_1 motorcycle RxLwy_iZqKg_1 bear RxWhDOyHYNo_0 cat phJS1iN6HFo_0 umbrella phTyZcbKeQw_5 bus pihR4mhfwxM_0 motorcycle pim0lzR8i1g_0 cat pix5Cxt_fUM_3 knife pjgi60dJalw_0 car pjgi60dJalw_1 car pjhNnA0142Y_0 motorcycle pmszdloBDwA_0 bear pmszdloBDwA_2 bear pmszdloBDwA_5 bear pnMd28rPX7M_0 motorcycle pncTBxEM4WM_0 bus pnjPhdpuKGc_0 motorcycle pn0ZChK2ASs_0 bear ppAj6dnl62Y_0 knife ppAj6dnl62Y_1 knife ppJXGy7snUw_1 knife ppwjIgwParM_0 boat pq1swOh85gc_0 boat pq1swOh85gc_2 boat pq1swOh85gc_1 boat priwWNrQnkI_1 bear prwglbuvyZ8_1 knife prw0IWDYBUM_0 cat pr3LOwTWNnk_1 bus psOuOLCJNk8_0 cat psTqTt0np_I_11 bear psTqTt0np_I_3 bear psTqTt0np_I_6 bear psUASBNRwIE_0 car psUASBNRwIE_2 car psUASBNRwIE_4 car ptCx-L_n2Yg_2 bear ptNC5ou_rOQ_1 motorcycle puZUIBS4Ceg_0 cat puw9BfAKOHU_0 bus RxiBbfFH3is_0 knife RxiE2beIvjQ_0 bear RyWLXS1Vrco_0 knife Ry4q0UokRjo_0 motorcycle RzWczJnyzmg_0 cat RzWdM4_lg2c_4 bear Rzj5xv434WA_0 bear RzrQOptkjFM_0 motorcycle RzrQOptkjFM_1 motorcycle R0hj1kAnMgs_0 car R0w6j1wmwo0_2 knife R0w6j1wmwo0_3 knife R1Fkwaa8CxU_0 motorcycle R2FlyNrjZBQ_2 boat R2FlyNrjZBQ_1 boat R2Fps165H9g_2 knife R2XiIC1qbAM_0 bear R2YmjDNC8oo_0 bear R2duXYQhnFA_0 car R2sy6qbPc4c_0 car R23ZSmBA2Rg_0 knife R3zhr1iboG0_0 bus R4ktPNCb564_1 bus R4vLajpLSMk_0 cat R5CBlOfUL4w_0 person R5cIoEcqZ9E_1 knife R5r3AIx_BoU_1 knife R5r3AIx_BoU_2 knife R6PuHPDiwPs_1 car R6f_t-MqO_s_0 bus R6tsNuvoTus_0 car R6uZ5JpxQ88_0 cat R6wk6JHQSeI_0 knife R6wsV6cYN_w_1 bus R7w-mdDyhG8_2 knife R8TV702EIqs_0 knife R8j0mjQR4lI_4 boat R84Bj4PKOvE_0 bear R84Bj4PKOvE_1 bear R9LK4x3pO0Y_0 cat R9L1I9EEE0g_0 motorcycle R9zDzUslz9g_0 car R9607CioN3U_0 car R99fGQRB6rM_1 car R-UGxl6KGoo_1 bus R_LEKDTlVvs_5 boat R_NxqXdz3RA_0 car R_UPR78XIvA_0 knife SAFptHT-UpM_1 boat SAFptHT-UpM_2 boat pvrO7c2imos_4 car pwgqJO3yKHI_0 cat pwwdlKxLCqQ_1 knife pxBtDlmwesI_0 car pxIlEGkEw5U_0 cat pxwl3iVkx08_0 boat pyAuY2v2U0I_0 cat pyTXP2GZRuM_0 knife pyTXP2GZRuM_1 knife pyTXP2GZRuM_2 knife py0K3KEYfjA_2 umbrella py0K3KEYfjA_4 umbrella pzZvI_g1S8M_0 motorcycle p03u2BJIvyE_0 bear p03u2BJIvyE_1 bear p1p9QUFIi_8_0 bus p1_thBtA2-g_1 bear p2pRN03gXFk_0 cat p26eBX5AGCo_0 boat p3MF-uxvtWk_0 bear p32jOqTS5ec_0 cat p4MmW7gFlLI_0 motorcycle p4MmW7gFlLI_1 motorcycle p5NxEAfgmro_0 motorcycle p5bLvlU8ua0_0 motorcycle p5lUPYsz-HE_0 cat p5vt7l9pW-0_1 motorcycle p5vt7l9pW-0_0 person p5_O08ZNK_c_0 motorcycle p6GkhJZsCi8_0 cat p6Rtu645O08_1 motorcycle p6Rtu645O08_0 motorcycle p6dBx3tBRr4_5 bear p6dCoZRaQOA_0 boat p6dCoZRaQOA_1 boat p6dCoZRaQOA_2 boat p7OlEbiu5to_0 cat p7WwUD62qfY_0 motorcycle p7gjVQyX07A_0 cat p7pnYAaDqPI_0 umbrella p7sHze5SC0g_4 bear p8MEDllYMKg_0 cat p8RUtiaGu5U_0 cat p8ZUCNMnKpE_0 car p89fuT8e_zk_0 cat p8-8JqAgtv0_0 motorcycle p9XjLjpQX-8_0 cat p9by0qLqHOQ_0 knife SAkHT1Ozg1c_0 motorcycle SAkHT1Ozg1c_2 motorcycle SA1Tb1XbngU_0 cat SB1UBp1PVf4_2 bus SDKsL-L7GbI_0 knife SDbe9JVnITk_0 knife SDk3Y3jzalg_0 knife SEp92WMharw_0 bus SExW2mVb1Mc_2 car SExW2mVb1Mc_0 car SExW2mVb1Mc_1 car SE5Rg8Qpb8c_1 knife SFB2FGuZb6w_0 motorcycle SFMc-UCkcT8_0 cat SF8c7EeFPPk_0 motorcycle SHcJfBJBQe4_0 bear SHxyKRdKRc8_0 cat SHxyKRdKRc8_1 cat SH1noq6GrKw_0 knife SISqo1FBefA_0 bus SIbLAYX2J_A_0 bear SJAZnOnRtag_1 bear SJsxWsiEuTg_0 motorcycle SKNl4frouUY_1 knife SLEOr8bmm2w_0 motorcycle SLEOr8bmm2w_1 motorcycle SLzqvins4p8_0 bear SMYpv_Ea3w8_0 person SM6BtnyDz5w_0 cat SNZ0xGGmZvU_0 knife SNhnfqJHoI4_0 motorcycle SNl4Gq_2aVQ_0 bear SNrosAtwG2k_4 bus SOYkQc-toMU_0 bear SOYkQc-toMU_2 bear p-J0yyoF0lU_0 motorcycle p_C9Zwt3N5c_0 umbrella qAJSLnflSrQ_0 cat qA5rC8MxCoA_2 bear qCzILENpEWk_0 boat qCz4ft26CAw_2 knife qDobzjbo_aM_0 cat qEcNn2_TQC8_0 cat qEei5YCRiHA_0 car qEj3r8dtvKg_0 boat qE5fKHWTLMw_1 bear qFR-yuWiHVk_3 knife qFR-yuWiHVk_4 knife qFwugOO0pC0_0 knife qGjYX-iNrPE_0 boat qGohF2oMPS0_0 motorcycle qGxfRwBmBEc_0 motorcycle qGxfRwBmBEc_1 motorcycle qHKwI-35nNU_0 motorcycle qIIu-MIIYIE_0 boat qINDYDOlPLA_0 motorcycle qIPydTwqwmI_3 car qIPydTwqwmI_0 car qIPydTwqwmI_1 car qIPydTwqwmI_2 car qIkNPwKd6ag_0 knife qIkNPwKd6ag_1 knife qInP3tWVtWE_0 cat qJMxoAbx9YU_0 boat qKxQVpaLChg_0 bear qLfa8e4ffQY_0 bus qL6LVXg4Vt4_0 cat qMEMl1FFVIM_2 umbrella SPRByN4TiFg_1 boat SPsOjXxZymk_1 boat SQ_ChhUwWng_0 bus SRUB2kzDBTk_0 person SSFOqr1ARgI_1 umbrella SSaN8vntuYs_0 bear STTRwCtQ8_8_0 boat ST6aA292Pos_0 motorcycle SUMc-5fiNzQ_0 motorcycle SUnPNgAE_ho_2 boat SUyRs3xvc9c_0 cat SVBc-W37yW0_0 umbrella SVSMGxy8Z6I_0 cat SVXaBPnNWO0_0 knife SVXaBPnNWO0_2 knife SVt7vQ8LYZU_0 bear SV70cwNA6o8_0 knife SWJyq_mITbE_0 boat SXmy9BLHr84_0 bus SXvXN3waFWs_6 bear SYCg5NuWc60_0 motorcycle SaHw7yyoeJg_0 cat SaSgclGWGwE_1 motorcycle SaSgclGWGwE_3 motorcycle Sa1iRLR4d_c_0 bus Sa4L2rdyD10_0 knife SbWCXCuXBqY_1 bear Se3XbBA4N4o_3 knife Se3wtx4DzwE_5 bus Se3wtx4DzwE_1 bus Se3wtx4DzwE_2 bus qMlYXZy1Tow_0 bus qNfS9Y5zs-Y_0 car qOaABf_zb9U_1 boat qO7qHolBYj4_0 bus qO8D0E7MjOI_0 cat qPGkJRPae6A_0 bus qPMDgkgSTnA_2 motorcycle qPaox7otsVI_0 knife qPwAWEtJBqA_2 motorcycle qPwAWEtJBqA_0 motorcycle qPwAWEtJBqA_1 motorcycle qPyR7CpZ6l0_0 knife qP88t7GfZc8_0 knife qQaIW7IjCZo_3 motorcycle qQaIW7IjCZo_0 motorcycle qQaIW7IjCZo_1 motorcycle qQdtuBd-SgI_0 knife qQlsMjenbfE_2 knife qQ5tf8s7KrE_0 bus qRO6U_tg6SE_0 cat qR4kw8rf-FU_0 motorcycle qSQGG-K89mg_1 knife qSgOYqBt_8k_0 bus qSnoKy6T22k_0 motorcycle qTKtODdEZIg_0 cat qTut_O_LppA_0 bear qT00uOC9JpQ_0 car qUuTEKdKNNg_0 car qU7DT4ipQHw_0 cat qVSnhT0Luh8_0 cat qVyAlx4rMTo_2 bear qV7U9CRjZGI_0 cat qWN8i7sJyVg_4 umbrella qWcXQWy7yw8_1 bus qW-zRq8VTV0_0 boat qX8RcjE0tjs_0 motorcycle qX-YEHlu0Kg_2 knife qZWxhCk8AX0_0 knife qZf1fw737A8_1 car qZyxILyLOv0_0 knife SfZLu5uG7mc_0 car SgDdyLB3fFo_1 motorcycle SgHH9KN_nkY_2 motorcycle SgOvlqqKbEI_0 bear SgSsk-eeClA_0 cat ShHLzcBozxo_1 boat ShPl28Zw1kU_8 car ShPl28Zw1kU_3 car ShPl28Zw1kU_7 car ShPl28Zw1kU_9 car ShaLoFJZv-M_1 knife ShhC84AwZ04_0 bus Sh6uHJRUnP4_0 cat SiSP3Kko4VM_0 bus Si3psXQA46c_0 bus SjLNVLIdpbc_0 cat Sj0pcvct_3k_0 motorcycle SkLwUmczAMo_2 knife SkLwUmczAMo_1 knife SkVIH0IZI1I_0 motorcycle SlBZM22tlSU_0 knife SlIzgQZ63h4_1 knife SlWmnHWeqIE_0 boat SlYqzpZkWho_0 bear SmCvuBfyU5o_0 motorcycle SmCvuBfyU5o_1 motorcycle Sn8nb_cv5K4_0 motorcycle Sn8nb_cv5K4_1 motorcycle So5dCmgNRtU_0 bear So-dFj7N07Y_0 car SpGfQe7sWIQ_0 motorcycle SpuAy2Z1ejE_0 boat Spx8fHkY0Ac_0 bear SqUzKvBRVmQ_0 cat SqkoepvLN3c_0 motorcycle SqkoepvLN3c_1 motorcycle Sq-LvVdVwhc_4 bear SrBwCHcEe4g_0 cat SrPgW-L7Gps_0 bear SrTxMAryank_0 knife SsQb12lMU_w_1 car SsQb12lMU_w_2 car qZ0egYy10zs_0 cat qaKYHGIZ8tU_0 cat qantWNz3Z-k_0 bus qc1U41zjMfI_0 knife qeSfa-Xin3s_0 bear qfZHHSjai5Q_3 motorcycle qfZHHSjai5Q_5 motorcycle qfZHHSjai5Q_0 motorcycle qfZHHSjai5Q_4 motorcycle qfZHHSjai5Q_6 motorcycle qf4dZ323eu4_0 cat qf5FQP-vjpY_3 bus qgYBD0GBerg_0 knife qglTXvFe5vw_0 motorcycle qgr1pdkQkKM_1 knife qhTOaoL2B54_0 bus qhgQ0_y6Jr8_0 motorcycle qhyihSkbubs_1 bus qiW4cUVZCJA_0 motorcycle qjfkIHC3sNA_0 bus qj1y76m_WFg_1 car qklXdTo1CKQ_0 truck qlGmmBY7ITI_0 cat qlGmmBY7ITI_1 cat qlfCKWLj_xU_0 boat qlvwUVksAC4_0 cat qnaQOGGmyhI_1 motorcycle qo2tG-wOpLI_3 car qpBRU2SONe0_4 bear qpNPlLO7Wdo_6 bus SsWwZCQR8pA_1 bus Ss6lM7iutJ0_2 boat Ss-ENa079_Y_0 car Stg0xs4yv5A_3 bus Stg0xs4yv5A_1 bus Stg0xs4yv5A_2 bus StoHoHg6XHo_0 motorcycle SuoVrAXkHsM_1 boat Sv-Xsjm8Seo_0 boat Swfda4hcQzo_18 umbrella Swfda4hcQzo_0 umbrella Swfda4hcQzo_3 umbrella SwrxLGIVuNg_1 bus Sw01FqLPH0o_0 motorcycle SxxBAhDGWzU_1 car SybtH9db7tI_1 boat SybtH9db7tI_6 boat SybtH9db7tI_0 boat SybtH9db7tI_4 boat SybtH9db7tI_5 boat Syk5Jc9_tQA_1 boat SywBQoMh8Q8_1 car SzD0AW8MKxY_1 car Sz3ay4xexe0_0 motorcycle Sz3oWSS6V3s_0 bus S0AoM2Xz64Y_0 motorcycle S09dKnW798o_0 cat S12WKCebYHg_0 boat S2YoTKzOHW8_0 umbrella S3O_xjPQToU_0 knife S4lNN0zJE4A_0 cat S49Hdfpc-SI_1 boat S5VjgUVKjV0_0 cat S5Z4g_SORHc_3 knife S5Z4g_SORHc_4 knife S6crKzUWKYI_0 umbrella S6ksiMdECu8_0 umbrella qp11ZgRmeck_1 motorcycle qqd7FMwn5Ks_0 cat qqmk0BKAubw_0 boat qqo83uqRldw_0 motorcycle qqumKQ_igJQ_0 motorcycle qqumKQ_igJQ_1 motorcycle qrHPEAVq_yE_1 boat qrJljeVBE-k_0 boat qrJljeVBE-k_2 boat qrTOqXRwHqM_1 bear qrTm-7zA5FM_1 motorcycle qrU7MAMf42A_0 motorcycle qrfZoDvW7wI_2 bus qsFkwL9ikBE_6 umbrella qsFkwL9ikBE_0 umbrella qsbpGZepU_4_0 motorcycle qs4ACjrDQvo_0 cat qtEJPGYfmb0_0 motorcycle qtQNJD43Z30_0 knife qthVtX1KeJY_0 cat qtmXJD337Sg_0 cat quMSh4JZfSE_0 bear quSzbk4CkBE_0 car quZjkqmOTys_0 cat qvAPzGCqVG0_0 bus qvAPzGCqVG0_1 bus qvCVL7reF8g_2 bear qwBsDRYIhwg_0 cat qwI3fCK486I_0 cat qwZ_bpVY018_1 bear qwcgkEVHQS4_1 motorcycle qxwgvTIA0Oc_0 umbrella qykj452YYlU_0 boat qzjG5RMNfB0_0 cat q0tjDTtHr00_3 knife q1LbqldHuM0_0 knife q1QElQCedrc_0 umbrella q15Lr3-V3qI_2 motorcycle q2K3ctdaVGU_0 knife q2MasRNKQxI_0 bus q2NfowB59fs_0 motorcycle q3J7hUfBGGQ_0 cat q4EXWy685Wo_0 person q4EXWy685Wo_3 motorcycle q4EXWy685Wo_6 motorcycle q4EXWy685Wo_7 motorcycle S7SEfKdokC0_1 bus S7-k1XdAR7Q_0 cat S8BbQRnxfqY_0 cat S8WFgIrdEyI_0 car S9LooqaA-VA_0 cat S9wDiwQMla8_0 person S9wDiwQMla8_1 motorcycle S9wDiwQMla8_2 motorcycle S9xCWTCFhNc_0 motorcycle S-T-e07Bgys_0 motorcycle S_K_nwYUS2o_0 cat S_09gd9e0zE_0 boat S_5w6lmw0DI_0 knife TAzjOrAfzFM_0 cat TA1NbMN7gNo_0 motorcycle TBvuwl0phUE_0 motorcycle TBy---hD-FA_0 bear TB9qJG8A-H4_0 car TCS6svwO2AE_0 boat TCVj-PtxnsQ_0 bear TDSmQkKnGFU_1 car TENive2WCAw_0 cat TFUV5Dy2MvE_0 motorcycle TFu5bNUW02Q_0 bus TIZr3Y-PLQM_1 knife TIpoS2Jymv8_2 knife TJJgVPay9LE_0 bus q4zFevdC3-w_1 knife q5D67534lFM_0 motorcycle q5ESvcujAps_0 person q5wOimcVyaI_0 cat q6YyhMSTSjg_2 bus q6YyhMSTSjg_3 bus q65QzEDi_jo_1 motorcycle q8nG4OvfGhY_0 cat q8oKL5zvWZw_0 cat q9QycGD31Co_0 cat q9ZSVLXRUx8_1 cat q9p4QZdwQ0I_0 boat q-Sw3Dx1Xb0_0 knife q-lbxXK_UY8_0 bear q-nt9k61jqQ_2 boat q_NnyABqOFg_3 boat rAcvNOp95pA_0 car rApBsMx8ZjU_1 umbrella rAtKVQ_h94Q_1 car rBLqbf-KdaY_0 car rBjCxCwLz84_0 car rBl7T312SPQ_0 cat rBnSmzTRsqE_0 car rCAA1xoobto_0 car rCOxllaoO64_0 bear rCrQRhaJeAA_0 bus rDEW_AdTSH4_1 cat rDEdeXsgOdU_0 umbrella rEL7A7rKARs_3 knife rFF0purpqAU_2 knife rGgvqpRsaew_0 bus rGlpoWppAfU_0 car rG4cDTukyNw_0 car rG4ld81Rxt8_0 car rHHUlsaTde8_2 bus TKCXvzTT2ws_0 umbrella TMyv9XNlPGQ_0 bus TQWq_YDrKc0_2 knife TQm0C-2ersM_8 boat TQm0C-2ersM_10 boat TQm0C-2ersM_1 boat TQm0C-2ersM_5 boat TQm0C-2ersM_6 boat TREARdQ16GQ_0 car TREARdQ16GQ_1 car TSQwlIeADdw_0 bear TSQwlIeADdw_1 bear TSQwlIeADdw_2 bear TSpUcayboiM_0 car TS7UuEszy9E_0 car TTQQky-HcCs_0 knife TTdbV_lHq_s_0 cat TUrnPZr3eXs_0 bus TVjvTR7CrNE_0 knife TVvo40ERO9Y_0 cat TW6cU7OYa60_1 cat TXrnNVUe53o_0 boat TXsQGHJjWhI_2 knife TX2BAlXe5IA_0 boat TX2BAlXe5IA_2 boat rIUepAhKVnM_0 cat rIc3ZEnqjQA_0 umbrella rIezbmq7N9U_3 bear rI79TJwwnW4_3 knife rJGGo2bI150_0 bear rJGGo2bI150_1 bear rJGGo2bI150_2 bear rKiQjOPzf0s_0 cat rKs2bGgU29k_0 cat rLm1866Q28U_3 umbrella rLm1866Q28U_0 umbrella rLm1866Q28U_1 umbrella rNlm7i1BcaQ_0 cat rNw1jiERG4I_1 car rOtd7pdh-zY_0 cat rO0qo7r4TTc_0 cat rPCOxxRwiTM_0 bus rP6vb-cxVcI_0 bus rQBwAWkz3Ao_2 boat rQBwAWkz3Ao_0 boat rQBwAWkz3Ao_1 boat rRL4f466oNQ_0 umbrella rR9vwlyXtYs_0 bus rSNfdcbzEhE_1 boat rSNfdcbzEhE_2 boat rSNfdcbzEhE_3 boat rSNfdcbzEhE_6 boat rSNzuWEgSeg_0 cat rSWYvSf29vQ_1 cat rTM-3OYHQZA_0 bear rTM-3OYHQZA_9 bear rTreVVS3XVg_0 umbrella rUcsGq10bCk_0 umbrella rWLG9njOx1k_0 car TYuoW3gezZ4_1 car TZFETDh9bQo_1 bear TZFETDh9bQo_3 bear Tain2YW14ok_0 umbrella Tb943q0WnTY_0 car TcfdUbzZcIc_0 knife TcnKT-jCrxQ_1 bus TcnKT-jCrxQ_0 bus TcnKT-jCrxQ_4 bus TdmeXkKeGmE_0 knife Tdxsosl1CIk_0 umbrella TeF2gxyzjF8_4 knife TeM8oPJR8nM_2 bus TeM8oPJR8nM_4 bus TeM8oPJR8nM_7 bus TeSMF-Tw8b8_0 bus Tf8ZmK4GZYU_0 bus Tf9piH7b4Js_1 bus TihSkV4th6I_0 umbrella TimXSaV1u4M_2 bus Tjs55_3zB_o_0 knife TjvHNNlcym8_0 knife TjvHNNlcym8_4 knife Tj-U_ZtaHe0_0 boat TkmEiKe_Uto_0 boat TkuUMAPSGiU_1 car TnN1RBRfLnE_0 umbrella TnN1RBRfLnE_1 umbrella TnXDBpRvE_U_0 bear rWw_OZqgPk8_3 bus rYlL6avPERw_0 car rZDchhWp8lc_1 bus rZ7XejB4nyk_0 boat rawi3Ka9Oew_1 car rawi3Ka9Oew_0 car rbONk59p13Q_0 bear rbWOxoprQ2M_0 bear rbXmAC9QV2A_0 car rbjK97ECn_A_0 boat rcrE_BJU-n4_0 knife rcrE_BJU-n4_2 knife rfksy8z9X40_0 car rgWglS6-TTw_1 knife rhIa7DWBXUM_1 car rjVLfZDg-1g_0 boat rk9SO8fR7-0_1 bus rk9SO8fR7-0_4 bus rlBfiB0epGw_1 knife rlLJTjn9vkk_0 umbrella ToclpwxGMe8_0 bus TpKpXHgy7yw_2 knife TpKpXHgy7yw_5 knife TqPnQuSGm2Y_0 bus TqZZfXdm7D0_0 car Tqnj4qeawHg_0 boat TqsQOw3CqXo_0 bus TrXkieSIkII_0 boat TsfcgwFff0k_0 bear TsrQwMo3niY_1 bear Ts8Wofx6QYY_0 car TusmYht5g7o_0 bus TvbiwdoAnv8_0 boat TvvBAOBoHFU_1 umbrella TwEihF94LGQ_0 umbrella TwSkZlbuaEU_0 bus TxUm-m-jFQM_0 knife TyV9inNHHAE_0 bus Ty_FDwb_nLY_2 car T0Mp-gJmMlU_2 bear T0Mp-gJmMlU_3 bear T0tT7l2X1_g_0 bus T1Zywya-PcI_2 car T1Zywya-PcI_3 car T1Zywya-PcI_1 car roNPRQwafcU_2 bus roNPRQwafcU_5 bus roW8_xIYVAk_0 knife roXQ3vv08_A_0 bear rqA8P346qIQ_1 boat rqDqbsbIcc8_0 bus rq5jwk8hqYA_0 bus rq5jwk8hqYA_1 bus rriv5ZJYcJI_1 knife rsMmhzkVg_0_0 boat rta_HO-3L_A_3 bus rwH7x0MR_38_0 boat rwS5mEyV7Go_1 knife rwS5mEyV7Go_2 knife rwcVAIM0TvE_0 bus rwcVAIM0TvE_1 bus rwu0xKkvzvA_0 knife rxRxMZ6DIjw_2 umbrella rxSJHCdoi0c_0 bear rxm15TcjWqQ_0 knife ryBGF3WFvsY_0 bus ryBGF3WFvsY_1 bus ry0Pnb8VkxU_0 bus ry0Pnb8VkxU_1 bus ry0Pnb8VkxU_3 bus rzDa9eW_dpg_3 car rzDa9eW_dpg_5 car rzOhM6n6Amc_0 boat T21Uim3jGuo_1 bear T3wZwUQ_7q4_0 umbrella T5ZgfFcAd94_0 bus T6QiKZd4bH0_0 knife T7h2fJLtABk_0 knife T8C-sLfGg3A_0 boat T-5AESRu0pM_0 car UAptbKXXoJI_1 bear UBk45sVKl_o_0 umbrella UCnTA86V3o0_0 knife UDmjHWk8iRk_1 bear UE1kUiVy7LA_1 car UFPrfB6_TJY_0 bear UFQmHju3MrM_0 bear r1JK0UIvyoM_0 bus r1YNttJqXjI_1 bear r2GN4IDacgM_0 boat r2GN4IDacgM_1 boat r2GN4IDacgM_2 boat r2GN4IDacgM_3 boat r2sw-3mWNEQ_1 boat r4U8cMe6_Uo_0 umbrella r4cneWcmGJc_0 bear r4cneWcmGJc_1 bear r43KKtRQNxw_0 knife r5c09tdbF3U_0 knife r6HzXMpwuOg_0 boat r7V8M9vMX8I_0 boat r8oV5neCRZc_1 bear r-Wqqn-oS_0_0 bear r_squ5DWzV0_0 bus sAa0aLc0rvM_0 bus sAo-z30biYY_0 car sAqB_9DrpiU_0 boat sCGJB9oAeHo_0 car sCX1zbdQvbE_0 boat UHvwjd6eSDY_0 car UH6GKx07mu0_2 bear UIlo6WvfABM_0 boat UJ7xasCu9yw_0 knife UKdl8BrKy4g_0 knife ULTTzu_-eQI_2 bus ULgPda0ny1Q_0 boat ULxGPhbhuwI_0 umbrella UMQ6fAZTiLo_0 umbrella UNfKxOwP1V8_0 bear UNyq1SNbNPk_1 bear UP2WXifDFc0_0 bus UQdjo1v_Hv0_0 car UQrP0Wa7bfA_0 bus UQ90qkTMSes_0 umbrella URiNDCZBU7E_1 car URmMAndDPfQ_0 boat USYudaDNkeU_2 knife USYudaDNkeU_3 knife UTx1Fw7nQcQ_0 bus UVGq9IRroYo_0 boat sDSmkWE8qw4_0 knife sEnhkLttWlw_0 bus sFgXir9g_Os_0 car sF2EQhRNlQc_0 umbrella sGpQTqemybM_0 bear sGzXdAI4YSQ_0 bear sG_AruJlxiw_0 umbrella sJA7-N7htNo_0 bear sJL716urwpY_1 car sJL716urwpY_0 car sJTLB7bgb0k_1 knife sJsEpKneYMs_1 bus sMm8f8vBx7c_0 umbrella sOQWtx6GiR4_1 umbrella sOQWtx6GiR4_0 umbrella sOvnHbg6d_8_0 umbrella sPDY-ey2kNA_0 umbrella sPDY-ey2kNA_1 umbrella sQEBpH647Mw_0 umbrella sQJr7LooP_s_1 boat sQftML4HXiU_1 knife sQvi3OxMoKU_0 bear sQvi3OxMoKU_1 bear sQvi3OxMoKU_2 bear UWJIq_1uAnA_0 boat UXDmIQTthAE_0 knife UYRhIhbuh34_0 boat UanzlUcmoJY_1 bus Ubj2t-7KcJk_2 car Ub5O76sDojg_0 car UcBLQsI3Dzs_0 car UcKyiCjpXoA_3 bear UdFEBlYt9tM_0 umbrella UdaAkO2f_pU_0 bus UeQLdrnbe8E_1 bear UeQLdrnbe8E_3 bear UgHNBgeg9cY_3 knife Ugh33I0Qxi4_0 umbrella UgkXJsrPys0_0 umbrella UhgJaZWsdCQ_0 knife UhupGJ7k3Q0_0 knife UhvhrEMHY0E_0 boat UhwOdFtF8os_0 bus UiZ3tYMpOic_1 umbrella UjTdR_85bTo_0 umbrella sSPe9VqmSuU_2 bear sS-GtompdcQ_1 boat sUhpJsSmrzA_4 boat sU-mmzCCGmg_0 bus sVbrxAG6jtA_0 car sVkPUjUh0UQ_0 knife sV9ymK-zZ8A_4 bus sV9ymK-zZ8A_6 bus sWfQh6SsvG0_1 boat sW7n8r3vvl8_1 knife sXwrjhXbAwA_0 umbrella sYE45Xnof5I_3 bear sY1i3-cQv70_2 boat sY3G5eOlysI_0 bus sY_jGNxKdYw_0 knife sY_jGNxKdYw_2 knife saBAx3Xw2PE_0 bus sbR26E99_A8_0 bus sbmsWqsHD9M_0 bus sb1unJ1sby8_0 knife sb1unJ1sby8_4 knife scFiRRTU5jg_1 bear scJFbu3WboQ_1 car sc-BJ-WirDo_0 bus sdHNJK0mfWQ_3 bus sdd5ViCUDwY_1 bus sfVwMcMm77E_1 umbrella sfVwMcMm77E_2 umbrella UjxwNRWfxBo_2 bear UkBlnrNOssQ_1 bus UlLwBfXpz4A_1 bus UmAOVqCB6UM_0 bear UmBxMf5cHV4_0 knife UmewKWpE2qE_0 car UrRiUQPaxic_0 umbrella UrxeEW4FBq4_1 umbrella Utvo55GUNyg_1 bear UutgI7H2EPc_0 bus UutgI7H2EPc_2 bus UutgI7H2EPc_4 bus UutgI7H2EPc_5 bus UutgI7H2EPc_6 bus UvsMOU9XGYk_0 car Uvsup5BdpLM_0 car Uwlk3sF-l38_0 knife UxD-6ScNF1U_0 bus Ux3oyD0wLig_0 boat Ux_-m16Ntqs_0 bear sgDzqYTo0GI_0 car sgDzqYTo0GI_2 car sghMPNg9wB0_0 bus shgKQ2FcjfM_1 knife siNixoeB9Ew_0 car si8Uk6frpqI_3 knife sjBWnj8kKVs_1 bear sjESht-PXb0_2 bus sje-nlCBYAk_0 bear sk5gj6VnXds_0 boat slGCyLNlI3w_0 umbrella slgsRri0IUU_0 bus sli0aHrS-l4_0 knife soPkYPTLD-Q_1 boat soe3qmwZTEE_3 knife soe3qmwZTEE_4 knife splTIYA-rtY_3 knife srUGXKwzLf0_0 bear U0G9nt_JMp4_3 knife U1jXflUgiSo_2 knife U1p1HQ3ZsUo_2 car U1tGGfRyOzY_1 car U3BQYG5-Koc_0 bus U3pwXnANDgk_0 knife U3pwXnANDgk_6 knife U4nccTmpY0A_1 bus U7N--AsibJc_1 knife U7fW1r0kRYw_1 car U7-_NQlr8l0_1 bus U8EGQyjwfEQ_0 car U85wCYoCIZ4_0 knife U-B7Xkx_rF0_1 knife suQJeplwaco_1 bus svZPjH3EGcI_3 car swj8kdhr03w_0 bus swkyfcVE17I_1 umbrella syJ4LBRPwjs_1 knife syY8MaSUvJI_0 car syfJEZrVzqA_0 bus sy9XCn-ebrE_0 car szClXDUETvQ_0 umbrella szW2Gonojss_0 knife szXVjlTlt3w_0 bear sziUCgMKvrM_0 bus sznHM_K2obc_1 bus sz6Zoh7MfnA_0 bus s0ABooHpZjo_0 knife s09Dr7gZ5G8_0 boat s1t73kIOSQU_2 bus s2BVmX4vImY_0 knife s2gkrcGsOxU_1 bear s2nioy3J4RY_3 boat s2nioy3J4RY_1 boat s2nioy3J4RY_2 boat s2qgkHBVQxo_0 bear s2qgkHBVQxo_1 bear s3lwoM0rD2U_2 boat s3-sF0tSY8w_0 umbrella s6BicsP9eBk_0 knife VA3OWlsrD28_0 umbrella VBPWsv5FfbU_0 bus VBPWsv5FfbU_1 bus VBr3P_OGawE_0 knife VB6eUS7LSfM_1 boat VCCevTa32Ng_0 car VDz1RZU6x5c_0 bear VESEWamKy10_0 car VFv1UuT7klg_2 knife VGAYYimByOM_0 car VGwSM3IXcJ0_0 boat VG_OHq6R1AE_0 bear VHiMLGyNYgQ_0 car VIASAf569_k_0 car VIxj6BV3kgM_0 umbrella VJZpavOgVEo_0 umbrella VLaCK3u84vI_0 umbrella VMLuyFD54AQ_2 boat VMXrHUjXjyQ_0 boat VMXrHUjXjyQ_1 boat VMi5mAdZyZI_1 knife VMs0jemUzI0_0 knife VNuYRPiFrus_0 bear VN-BCqBlrhs_0 car s8vzssNUlOA_0 knife tAOx6NFDD9I_0 knife tAxbjy_edDI_0 umbrella tBOSPNFbuv8_0 umbrella tBQRfKeIYZc_2 bear tBgtSnOMOwM_0 bear tBh6HxQHmrs_0 knife tCZLl-MZJp8_0 car tDYPtg0At_Y_0 bear tE42n_1PW6w_0 bus tFfqpeBbvr0_0 umbrella tFjlTZqwoWI_0 bear tGycfa97LVU_1 bear tIX4eIYzfD8_0 knife tIX4eIYzfD8_1 knife tIs05U9pd04_3 knife tIs05U9pd04_1 knife tIs05U9pd04_4 knife tIs05U9pd04_5 knife tJXbZyaUOD4_0 car tJhfshKvRmE_1 bus tJhfshKvRmE_4 bus tJ01Y3R3Qmg_0 umbrella VOcplsa6Gq4_2 knife VOcplsa6Gq4_5 knife VPI_Nm3GHHc_5 bear VPI_Nm3GHHc_2 bear VP0u_E6FOsY_1 car VR_V9WaFYn0_0 umbrella VSj9dXwt7zI_0 bus VSxoLvaJN2Q_1 bus VUcCABjVSO0_0 car VU2lUX4NdkM_0 knife VU2lUX4NdkM_1 knife VVg7sbsw9vY_0 bus VWpm6_Uhis0_2 boat VX9TPrjMcOg_0 knife VX9TPrjMcOg_4 knife VZ5r0BHRf84_0 boat VaW7Go5pX-c_0 umbrella Va50KanUO94_0 umbrella VbA0B1JcpNY_2 knife VbeIRLOQ5pI_0 bear tKCjJuulqx4_2 bear tKCjJuulqx4_3 bear tKCjJuulqx4_4 bear tKN3Qo0oUoc_3 knife tNvGTzks1yw_0 car tNvGTzks1yw_1 car tO0igm1AwqU_0 bus tPae9uGqDog_2 bear tPzWEC_9_H4_3 knife tQpyrprwwc0_0 umbrella tR2sDFGND7g_0 bear tSEneDiCrqg_0 bear tTFTWquOTi8_0 bus tTjbx39rZMk_0 bus tT2pUZ0W33A_0 bear tUHf6Ynx_vI_0 knife tVJE-0uNX1s_0 boat tVTkAh80t5I_0 umbrella tVuL82POt-I_1 car tXMBGjGduCM_2 knife tXsMGHCKw7U_1 boat tXwfqREzEtI_0 boat tYGp2PFiAUE_0 knife tYas1z25M_4_2 knife tYcNeSisfpI_0 bear tYdhIaTDwiE_1 knife VdLohVQNC5Q_0 knife VdLohVQNC5Q_1 knife VdLohVQNC5Q_5 knife VdLohVQNC5Q_6 knife VeUIJlyGjkY_0 car Vekx17G8mkk_0 bear VfBqMWT6aRM_0 knife VfKgW5eSGsk_0 umbrella Vhmj1OGGQuc_1 bear Vhn-8bCU70s_0 bus Vh21adwevRU_0 bear ViXmx_D5BAY_0 knife ViXmx_D5BAY_3 knife VizxeIzWEFw_0 car VjF-G6FQooU_0 boat VjS5w2pc0tA_1 boat VjvpOU349zY_0 bear VkDn2-1H23o_0 umbrella VkDn2-1H23o_3 umbrella Vk43AD4O_hc_0 boat Vnrw6Fjmj8I_0 bus VnwwgTO4w_k_0 umbrella Vn4aKSlYXX4_3 bus VppPgMZqfEQ_0 boat Vp0kah4_m6w_0 boat Vp0kah4_m6w_2 boat VqHSuVVKfjs_0 bus Vqo2RiAzLnU_1 car Vrnm_kf7OCs_0 boat VsDgOcOWqXw_0 bear tYofvh4_3K4_0 bear tadYkEU5suY_1 knife tbIUesoKv9Q_1 bus tb_hKPkH2co_0 knife tcFQ5kE3PKM_0 car tcFQ5kE3PKM_1 car tcSHrlGTFJc_0 knife tc912gGdckQ_0 boat tdjDSO8NFx4_0 knife tdpAPPsHlDQ_1 bear teJyM5tywno_1 bus teQkZqDa1lw_0 knife teb83RDwop4_0 bear tgSfan8G7wo_0 car tgVXG7H_acI_0 umbrella ti3J-8aWPcw_0 bear tjldcvPuif8_0 bear tj4mnSXX2DM_0 car tm2bmSBR4uE_0 knife toiMoCxSyKY_2 boat tos1ELGZH0M_2 umbrella Vs3Mi3Ch_EQ_0 bear VtHzTaDh4WM_0 bear VtHzTaDh4WM_1 bear Vt8DAmG3nHs_0 car Vu4xkIEs6U8_0 boat VvXxRawsOCs_1 knife VvXxRawsOCs_4 knife VwYEgB5HOD0_1 bus VxdUG7Sinyw_0 car VyDNhpvCuc8_0 bus VyfIuIcelhc_0 umbrella Vz3wJsLA_gI_0 bus V0NnR8HLSbo_0 umbrella V0o8kxcOZRc_2 bear V1a9QcSegdw_2 umbrella V1dqjmHNyIY_0 boat V23vmoZYoVw_0 bear V4o7I9cLp-g_0 bus V6nKvvfzWpg_0 boat V64pvhB8sKU_0 car trAReSHvUdQ_0 car trAReSHvUdQ_5 car trAReSHvUdQ_6 car trAReSHvUdQ_1 car trAReSHvUdQ_2 car trAReSHvUdQ_3 car trAReSHvUdQ_4 car tsNhgDUKwHw_3 knife ttdTnGOIBmA_0 umbrella ttdTnGOIBmA_3 umbrella tvVLkJ0HTQQ_3 car tvew-P2UPL4_0 umbrella twiEfNprSoE_0 knife twiEfNprSoE_1 knife tw7jf9U2-kM_2 bus txpIIsM1T8U_0 bear tx2dZF1Ckxk_0 knife tx5tKODiGuo_0 knife tx5tKODiGuo_1 knife tyO37NBAS1Y_0 bus t1UtwxOBGvE_1 knife t1vrE0cEB80_0 bus t10FRgv9o5M_0 bear t10FRgv9o5M_4 bear t14PUW9SINk_0 knife t31z17N5skw_0 knife t31z17N5skw_1 knife t31z17N5skw_3 knife t31z17N5skw_4 knife t33TQH8-7tg_2 boat V9UCv2qhsxc_0 car V9ulnUIQGJU_0 bus V9ulnUIQGJU_6 bus V-KNIu_PsaQ_0 bus V-NvBHig1i0_0 bear V-tMggTxBu4_0 knife V_Bb7A55f-c_0 car V_dJ2KuqfOA_0 boat V_dJ2KuqfOA_1 boat V_t8pbEf8bA_1 boat WB7fT2tI7Pg_5 car WCSEuwFm7KU_1 car WCfc8YGLu1o_1 bear WCfc8YGLu1o_3 bear WDgLmrXq4vg_0 umbrella WHLIJlNh3TQ_1 knife WHQXE5tuTXk_0 car WHUaoqVF57g_0 car WIdj4ovuDWQ_0 bear WIdj4ovuDWQ_1 bear t4oaGCoTBZc_0 car t42tnyTtYWE_0 boat t7OKXKxjHls_6 bear t8X-x_7pv94_0 car t_-dK1Xhg90_0 knife uAjqm8B-aio_0 knife uB_Hurzj4s0_0 car uGEDuDcqqvU_0 boat WJ2A2XRRTw4_1 bus WJ_vIH7FJsQ_0 car WKDhXr_5mbI_0 knife WKKFM7oRSd0_0 bear WKS6aq75gk0_3 knife WKV4j8-G1Nc_0 knife WKfQfA_YQTY_3 knife WKubVTrND7s_1 knife WKzUT3zOIU8_0 knife WLxzHH6iJlk_4 boat WMSu-XOQe5w_4 bus WMSu-XOQe5w_0 bus WMgP1z0x0Io_0 bus WOVTnN-HcZ0_1 bus WOxTA78OlZU_0 knife WPqEyeVtih8_0 bus WPuItCUuEkY_1 knife WQAr1enuPKw_1 bear WQX6ptTAKHg_0 knife WSc0kYKLGTg_0 bus WStgEyiPBBE_0 car WSvHn5XJq0Q_0 knife WS0DayzAv80_1 boat WS0DayzAv80_2 boat WTXytzbF5lU_0 umbrella WT69VoU2Hps_0 car WVx9vOoutGo_0 bus WWKuCF2FuYk_0 car WWm9iMkKk-g_0 knife WW7ib8XAVz0_0 boat uHqj6xQGOYg_3 bus uHqj6xQGOYg_4 bus uHqj6xQGOYg_6 bus uHqj6xQGOYg_7 bus uIKZlXUoHOc_0 bear uJMFDY-BKiQ_1 bear uJMFDY-BKiQ_4 bear uKdOuLYJjrg_0 knife uK-zcpEE8nE_5 boat uLdXkXRsHok_0 umbrella uMK6b2TG8rc_0 bear uMV37U-DNUQ_0 car uMciOwjd0GU_0 car uMciOwjd0GU_1 car uMd1DmjxAZQ_1 car uMj3V0s7mUo_0 bus uM_jxm7bFp8_0 boat uNDkbmlEYeQ_0 bear uO7OtV3J1AY_0 bear uPE1o5dCYDc_0 bus uQhMkVrdghM_0 bear uRLAyu-3l0A_0 knife uStpLanz0fU_0 car uTAqzBGMDOc_0 bus WYwRW_t4jb8_0 car WZK5IqBtpGE_3 knife WZgxjIvc2nk_0 boat WaEyVBSggwQ_1 bear WaaW6bElWCM_0 car Wb20JaIrr8M_0 knife Wb20JaIrr8M_2 knife WcNlbTBZM64_0 umbrella WdIATjW74Pc_0 boat WdYFXDv4TEo_1 car WdgTHJurLx0_0 umbrella Wd0xTEH2d9k_0 boat WejCws8AoxE_1 knife WejCws8AoxE_2 knife WejCws8AoxE_3 knife We4_tuFKyGE_0 knife Wf6hHpxRW_Y_4 knife Wgx6hhiRLoA_0 potted plant WjiMUA6_CkY_0 boat Wlm2mLKCMlM_1 bus WlsN6HURFTc_0 bear WmFqo8n67Ok_0 bus uWi9-84kTFQ_1 bear uXHJHV0bwUk_2 bear uXe9WOlTFcs_0 bus uXe9WOlTFcs_1 bus uZgcOYmazsw_0 bus uaJ1g0xJ4QY_0 bus ual32V7-KJo_0 boat ua_5GosOa-c_1 bear ubFoUAh6d4g_1 knife ubOiomYqbNs_2 knife udSE-6UkgwM_5 umbrella ue1CIlwhPEs_0 umbrella ufFT2BWh3BQ_0 bear ugWs4v6DbUw_0 bear ugsJ5cOmFTg_1 boat uhXcL98XNCY_5 umbrella uhXcL98XNCY_1 umbrella WoxbRmDfLeI_0 umbrella WoxbRmDfLeI_1 umbrella WpCyx-QCMec_0 bus WplsTumdQf8_0 boat WqFFUvf-YJk_0 knife WqxU9aIFmNY_0 umbrella Wr5BjrtC4Ts_1 knife WsEiHZFGeFs_3 umbrella WsaP8FyRUCc_0 car Wses8y3NyJ4_1 bus Ws9V_B7mqJI_0 knife WuTHL7GtG-8_3 knife WvGzCV5ICZM_1 boat WvuZRZqhxk4_3 knife WvuZRZqhxk4_5 knife Wvv8cOXaAZI_0 bus Wv-Weuc4E1A_0 umbrella WwLtxfDC7ok_0 boat WxWXB9hf7n0_0 car W0kDpFkg6xU_0 boat W1z3EAv-eJw_0 bus ujnUCtI7gzI_0 bus uj4TRH5r_ww_6 bus uklsFjegS-w_0 bus ulzto7-Hl64_3 bus ul__w-oqHrw_0 bus umjU9X1kuYg_2 car umjU9X1kuYg_4 car umjU9X1kuYg_1 car uoGBYfJo5Xg_0 car uo1J9BUgQmk_0 boat urRNkZvzuHI_2 knife urmSoxyi9Vo_0 boat urmSoxyi9Vo_2 boat utmsGeHFdvI_0 boat uuBKDGoTmGY_1 car uu-UptVYr_A_3 car uvV7cblR4qc_5 umbrella uvZOzZjBKXY_0 bus uwL5LYln0EM_3 bus uwL5LYln0EM_4 bus uwL5LYln0EM_5 bus uwL5LYln0EM_6 bus uwx7UKo4jcg_1 boat uwx7UKo4jcg_0 boat uwzHiGF1YMM_0 boat W2z3SxorVnI_0 knife W2z3SxorVnI_1 knife W38vB3cw2fA_2 boat W4Is7CI2Sfo_1 umbrella W47ZA0onzb4_0 knife W5dSTfMCj-U_0 boat W5zIkmZyS18_0 bus W51Spbo8SQQ_0 knife W6YCv9ZVVOc_3 boat W6uCEMEi7_E_0 bus W7JkNuRYNr0_2 knife W7JkNuRYNr0_3 knife W7JkNuRYNr0_4 knife W7JkNuRYNr0_5 knife W7yqHDA_RMU_0 knife W8EKt6cG0E8_3 bus W8EKt6cG0E8_7 bus W8EKt6cG0E8_1 bus W8xqW-QD_B4_0 knife W87M2lQeWNk_0 bear W87M2lQeWNk_1 bear W-ZpC_K7Df8_0 car W-x__78AyrI_0 boat W_Wc7lFraRg_0 bus W_v5wpcibRM_0 boat W_2LqiQ_ico_1 knife XAa2L1v8iJM_1 umbrella XBAOFn8KXFo_0 bear XBn6P-IKuis_0 person XBssw3bqXL0_2 bear XCZv_AjZo08_0 knife XCu0Ea4zHuQ_2 bear XDtfr902CVM_0 bus XD1OYmmeKic_0 umbrella XD1OYmmeKic_2 umbrella uxFX6p61oPY_0 knife uxlDad59mFc_0 boat uyWVUOcgZHg_0 bear u1OhTXTmuWM_5 bear u1TvbkpmEbs_0 car u1vMDzyFxzI_0 bus u2BVfAFQ1zU_3 knife u2BVfAFQ1zU_2 knife u2EDuPJijZ8_4 boat u4K3jRl7Gag_0 car u4S9mlFpt0s_0 bear u4uwaq4uf54_3 car u4uwaq4uf54_0 car u6XGBXhCJ18_1 knife u7STs8FCy_g_0 bus u-1HZJXwFHo_0 umbrella XF8B5xjRCF0_0 car XF8B5xjRCF0_2 car XF_oHXRGd1o_0 boat XGRZLrZC9zY_0 boat XIlybSpq0mg_0 bus XJmn9i57K3g_0 bus XLvSaN_M6lE_0 car XL0B2niNRCw_2 bus XMlEA_yRojM_0 knife XMyio1ZckJc_0 bus XQBtgwUzEL0_0 car XQX5y5BQykU_0 bus XQ6u2yTbu_0_0 car XQ7UbbPjnDo_1 knife XRenv5AHI_8_0 boat XRpgkCuziGY_0 umbrella XSI7M8s2Tc0_0 bus XS4ow1Wcaro_0 car XTm-jN1RVHA_0 umbrella u_YKLGqrMKQ_1 knife u_gN-dXNRHI_0 knife vARZcTna8NU_0 boat vBEaeqdPsho_4 car vBEaeqdPsho_3 car vDT-DShjnjU_0 umbrella vEMHY2cT6kA_0 bear vEi5gkcTDGY_0 bus vE9zapt1WdI_3 car vFSRrtT5AL8_0 bus vGbt_XsSaVk_0 knife vGi-DjriLLs_0 umbrella vHAlsHYE3mo_3 car vHAlsHYE3mo_0 car vHXM9IJdVcM_0 umbrella vIQAK-4lMOc_0 umbrella vIgmRBC2ayQ_0 umbrella vJl9QkAbpc8_0 car vKxCl7DzJjI_0 knife vK8dgvZ5B6A_0 umbrella vLA-mHM7MAQ_0 knife vL-6uNdrCV4_2 knife vN54ADSnJmE_0 bus vOKH_DIjvAU_3 knife XUkTknKOdrs_4 knife XVa23hmwe-E_0 umbrella XVrNN52RTEs_2 car XVrNN52RTEs_3 car XV694aCXY8Q_0 boat XW6BQWpl3bI_1 boat XZl5Luzj6v0_6 bear XaSsc3noeLs_0 boat XbHWOyNM3Bw_0 bear XbHeGzyGejE_0 bear XbWrCVe09YA_0 boat XcLl0qSs9bU_1 knife XcifNE0anDo_0 knife XcifNE0anDo_1 knife Xc1jzGFyrnE_0 car Xc5LW1FIVE0_2 knife Xc5LW1FIVE0_3 knife Xdu-98BUgmA_0 knife Xd7VbtoAdb0_0 car XeOwt5KeVfA_2 car XeR1DgyOa9o_0 knife XekvrqFtazY_0 bus XeplLROyXyA_5 umbrella XgBTEQN_ZxA_2 bus XgBTEQN_ZxA_4 bus XgBTEQN_ZxA_7 bus XhSmPb3cA_A_1 knife XhSmPb3cA_A_3 knife XiEeY5R56EQ_0 knife vOy0N09kGEE_0 umbrella vO56uCHmSjg_0 umbrella vPVpX6GPY5Q_0 bus vPVpX6GPY5Q_1 bus vQ_8ry_dx68_3 boat vRhGvmXk2js_1 boat vRzpk-thwA0_0 bus vTvjeXsP7TM_1 car vTwSeYRU_WQ_0 car vTwSeYRU_WQ_2 car vUKk9LqKVpA_0 boat vUKk9LqKVpA_1 boat vUg2Sr7Jl-Y_0 umbrella vVKZzTBvsF4_1 bear vVNCUA8hss0_0 boat vVUbZCrCqEU_1 boat vV72xGim-is_5 knife vWMiT73g5-k_0 boat vWO0tyaGuaM_0 umbrella vWUAzQ_EEJ4_0 knife vW_aJr-PSvA_0 bus vW_o48lG_0I_0 bus vXX9FmlwVlk_1 bus vXX9FmlwVlk_6 bus vXX9FmlwVlk_0 bus vXX9FmlwVlk_2 bus vXX9FmlwVlk_4 bus vXaLFnwvrX4_0 bear vXvR0RiGzj4_1 car vYROjLzMqvY_1 bus vYROjLzMqvY_2 bus vYROjLzMqvY_3 bus vYwdLoOa0Rc_0 umbrella vYwdLoOa0Rc_1 umbrella vY1sAfu99Es_2 bear vZznldYVwGA_0 boat vbfWHUjHR2k_0 bus vcdEtOGEEcU_1 bear vcdEtOGEEcU_0 bear vcdEtOGEEcU_2 bear vch6R3EO9Ec_0 knife XjHJiHO6onE_5 bear XmVv2wQSvjs_1 car XoJahpK73EM_0 boat XoqPCnlpymI_2 knife XpDVw5mS058_0 boat Xp591jCTBOA_0 bear XqfkP1lAkyE_4 bus XqfkP1lAkyE_5 bus XqfkP1lAkyE_2 bus Xq-5DHWJ1pk_1 bear Xrh68BP53Gw_0 car XriRhjtrlLE_0 car Xu-ZZl_L38Q_2 boat Xv9eEVcD2P0_0 bus XwvKtur_QEk_0 knife XxHnDkI1NdQ_0 bus XxHnDkI1NdQ_1 bus vfzGrdk_Mxo_0 bear vhrRnvGSMMY_2 boat vhrRnvGSMMY_5 boat vhrRnvGSMMY_6 boat vhrRnvGSMMY_8 boat vh4BHzMwVT8_2 boat vh4BHzMwVT8_3 boat vi4ktD0dAD4_0 car vkfdn7gkQh8_1 umbrella vknUR0K4MqM_0 bus vlNLyHxz1TY_0 boat vlaeAly1nZc_0 boat vmr5UiZekic_1 bear vo0WWdM7UCs_0 bus vo6Uzhx2fcw_0 boat vpItyB8epmQ_4 boat vp8NiaEmk2M_0 bus vqeybXtIwxE_3 umbrella vrK5lDQJnmc_0 car Xy1w-6sjVS0_0 bus Xzj_w2QkjRg_0 umbrella X0iu2HmUYfY_0 umbrella X0nevXM5278_0 car X1drOgA68EU_0 bear X2zWe7ayseQ_1 bear X3ST-FA3VS0_4 bear X4YaqObAEns_1 bus X4kxk4G-BOs_0 bear X4kxk4G-BOs_1 bear X6Y6e6qsVOc_1 bear X6tuO-hL1cg_0 boat X6z7yGyP3UY_0 boat X7AJSe6kUz4_0 boat X7PChwjgRog_0 boat X7mkuAPcpg0_0 bus X8Wc00FiJn8_1 bear X8lHVX9uGm4_0 car X9dNz1MhFTM_0 car vtOaPYxGauU_0 boat vwp5f1sTcOM_2 boat vxEizaWVZ2E_0 car vx7S4ISNz90_0 bear vzKEVGD3E3w_0 boat vzKEVGD3E3w_1 boat vzmWbtFBxb0_0 bus v0DjGmLiuao_0 car v0P7DOSAooM_0 boat v0Uh3fazz7A_4 bear v4CWziKFAvg_0 boat v4CWziKFAvg_1 boat v4TWD1hSObU_0 umbrella v4TWZQM-t_M_0 boat v4wheqJ7qmw_0 car v4-PEShPpKo_1 car v4-PEShPpKo_0 car X_1xeuzdJII_3 bus YAI5kxAVlag_0 bus YAS9QgwaKuA_3 bear YAacEL8GB8Y_0 bus YCTBEauAnvs_0 boat YCT0ue2AdNE_0 umbrella YC0SWC1thDM_2 car YDxjfXnUsjA_0 bus YFb4IgdgsQI_1 boat YGm0A03QK-0_0 bus YJklsCjPlRE_0 car YJrYjEZ4Hfo_1 bear YLNAOu0nAaM_1 bus YMWEbvBeA2k_0 car YNOl5XssrmA_0 car v6RTPFSqVAo_0 bear v6d52nxP9CI_0 boat v6d52nxP9CI_6 boat v6d52nxP9CI_2 boat v7R5EfiWsMU_0 boat v7mxF1u1eJA_0 boat v74SVFcInoY_0 bus v77um2oiCmw_1 bear v8vdjpigkqA_3 bear v9EO_34zhPY_0 bus v9dJjyyqJ14_0 bear v-_nfHjdDrM_0 car wAJI2wAjCLA_0 car wAktmcUSj0Q_0 bear wAsEbrNlx-Q_0 car wBEyQdKDniA_0 bus wDOuWmULTDo_0 bus wDwRfk2Ka7A_2 umbrella wFuYr5TAoA4_0 car wFuYr5TAoA4_2 car wGqMuP3z6nY_2 bear wHdnCnPBax4_0 umbrella wHrdTEho0Do_2 bus wItLJ3GVPHo_0 umbrella wIzhSLqL-4M_0 boat YPR6uiSn_PI_0 bus YPR6uiSn_PI_2 bus YPWoY6sseHw_2 bus YP9HVTyFrM0_0 umbrella YQRaUcLNZjw_1 car YRmCe16K5EI_0 umbrella YRxTciapqLc_0 bear YSFyOBQNQzc_1 umbrella YSOeyn1SUIc_0 bear YSx79S6HsRE_0 boat YSx79S6HsRE_1 boat YVueKFH38pQ_0 umbrella YWAY2hVlXwU_1 boat YXC4y1_fd5M_1 boat YYjM_RIWUWk_0 bus YY-G2b46dbU_0 bus YalvFPYggIo_0 bus YbsAJsBizWo_0 car wJbu3nAVmh8_0 car wJ-qeIIyve0_1 bear wKlqztWBWCE_0 bus wLXsUww1z0Y_1 bus wLXsUww1z0Y_2 bus wMW3eYDAmiM_0 car wN6DTQLhQo0_0 boat wOAtMDJ1DIU_1 bus wOqLqQhPKNs_2 bus wPCVya7FjXI_0 bear wPcWihBU6Fc_0 boat wPjzhuBuZ_E_0 car wPrTnHfCQy0_0 bear wP83jrOriho_5 boat wP83jrOriho_1 boat wP83jrOriho_3 boat wQY4K0ZN5RY_0 bus wQY4K0ZN5RY_1 bus wQY4K0ZN5RY_3 bus wRJ_foSdk2g_0 umbrella wRs7_Un28R0_0 bus wSaf-OQyJzM_0 boat wSkaSUiYB60_0 boat wUG-UKf5xOM_2 bear wUtwwmbus0k_0 bear wVI9BeWuM68_0 bear wVX6wPj2U5M_0 bus YcrP36sQwVc_5 bear YepGVMeHePw_1 boat Ye3mi53K_Oo_2 boat YgouPUMM7w8_0 bus YhZT5GU-dEY_0 bear YiDVwrN1Djs_3 bus Yi8XHxZACGY_0 bus YlGg5v-AWZc_2 umbrella YlnMI5yk7FU_0 boat YmRfW-9QwH0_0 car YodCYpx5p8o_2 bear YogxE9OtHGE_0 car YogxE9OtHGE_2 car YozOMrrhBWk_0 umbrella YozOMrrhBWk_5 umbrella Yo8IaFdsDHQ_0 umbrella Yo8IaFdsDHQ_1 umbrella YpGGnhGqqkc_0 car Ypv2bwSbJbg_0 bus YpyrD-P9emk_1 bus Yq3H6FwjqwQ_2 bear wXg6MT7--Ms_1 bus wYO_Z3tO-P0_0 car wYO_Z3tO-P0_1 car wYO_Z3tO-P0_2 car waGAoKeMDbo_2 bus waZHoBhYNXM_2 car wan2A1Zp9pg_0 umbrella wa4LKNmoGCI_0 bus wbBafnofeHM_1 bus wcLRQ5lDklc_2 bus wcRJMRP7TtY_0 car wcUHhJA9ynY_0 umbrella wcUHhJA9ynY_1 umbrella wc6z479m8VU_0 knife wePYCAT9VWI_0 boat weUGYN9mO8M_0 car we9P1H3yM9s_0 umbrella wgn5GA4Kt_w_0 bus wioe2rgDFxQ_0 bus wi_60seXhMg_0 umbrella wkCC1-6dZZc_0 bear wkRF61CxvWQ_1 boat YsJGlSMV6fc_0 bear YsKpyV6dNVU_0 umbrella YsKpyV6dNVU_6 umbrella Yukb6C-FiPs_0 bus YyqN8OKq7-k_0 car Yy9Cj5ayVow_4 car Y2esC00COVs_0 umbrella wkhiKomfWwo_0 boat wku7FWw9zok_6 bear wmN3gF7czBE_0 boat woB4lneU8v4_2 boat woB4lneU8v4_5 boat woB4lneU8v4_3 boat wonqKYd_Hkc_0 boat wulomSbG8Ww_0 boat wwHyMOLjtHw_0 car Y8gjbHlOSpg_1 car wz-CYTAvpJA_0 car wz-CYTAvpJA_1 car w1xC4CowaVk_2 bear w2d7ZPHVRsQ_0 car w4QoeqK4vN4_0 boat w5KKrxi32ZU_0 boat w5RAGrRh6N0_0 boat w85PvG-O3JQ_3 bear w-RoxIo67S8_0 bear w_dzHMbP1wk_0 car xAdflusGMAM_2 bear xAdflusGMAM_1 bear xBQVhJr5tn4_0 car xBQVhJr5tn4_1 car xBW2dB1aHqE_1 bear xE-fIbBizEc_0 boat xIjuSe8NERE_0 boat xIr-46lqsbs_4 boat xI3wdcR9GOU_0 bear xJaqlEqJIsg_0 car xKUjAAXXark_1 car xKjnn1lJsUE_0 boat xLl8JlHPals_0 bear xL0aucx8LjA_0 car xM1N_JeMAns_0 car xNfYVO0HOWA_0 bear xNfYVO0HOWA_1 bear xNqzZtEMt6A_1 car xOQ_zqhFFoQ_0 car xOQ_zqhFFoQ_1 car xOQ_zqhFFoQ_2 car xPgexGqlrpM_0 boat xQ2ursLiV78_0 boat xVl7ISxNOBo_1 boat xWfIV6ykSZU_0 umbrella xYRbcgZcjTo_0 boat xZdiy-peZpE_0 bear xcC48didfYg_0 car xds7aav_WA0_0 umbrella xeEFpaZutxQ_2 car xeEFpaZutxQ_0 car xemv_TG3nHo_2 boat xf7e7HpnDAI_2 umbrella xhLH-f-e2Ds_0 bear xhLH-f-e2Ds_5 bear xhLH-f-e2Ds_1 bear xhLH-f-e2Ds_3 bear xhLH-f-e2Ds_4 bear xhYRRVSUjcI_0 bear xh6_xD0_FUY_0 umbrella xi1l0PNYmVU_0 car xi1l0PNYmVU_1 car xk-PCxxgLyQ_0 car xlSq_r-1VZI_0 car xlTBS98u4Xk_1 boat xl03KNG3qcY_2 bear xl03KNG3qcY_3 bear xmXEOSj-QR8_0 umbrella xm61skXJVHY_0 bear xm7yMjZR_HM_0 car xniXqwdU3rM_1 car xn_6GQGdyww_0 bear xoL1TWqV2UY_8 car xoL1TWqV2UY_3 car xoL1TWqV2UY_4 car xoL1TWqV2UY_6 car xo93ACxVFCE_0 car xu3hCCY1M98_0 car xvJ-vgSlRFQ_1 bear xyUFBTV5sfA_1 boat xyUFBTV5sfA_5 boat xzFwd6rktG8_1 bear x1PZyiPtcD0_2 bear x1PZyiPtcD0_0 bear x2MUZI0ckUs_0 boat x51qh-jbh2w_0 car x8bgasvRg_0_0 car x_PtUMz2m3g_0 umbrella x_yZa__92dU_0 bear yE9ySV90e2U_2 bear yFdbcjv2scY_0 bear yFwt2mHmJQw_2 umbrella yFyTQPoWKrg_0 car yGYLwBmuRVI_0 bear yGYLwBmuRVI_1 bear yGq_wX2hSms_0 car yHFbPuIOGec_0 boat yMVPEp44IcU_1 car yNYzTl3zuSA_0 car yOeQRz1L-6w_0 boat yPx8JYuB8jo_5 bear yTEPer0Bvnk_0 boat yTr7cqNxVw8_0 boat yVwePYmRfaA_2 boat yVwePYmRfaA_0 boat yV3gYczZGSU_0 boat yWKpg3C3HRA_0 umbrella yWQT0KUXmZs_0 car yXA2s-Ylkx4_0 umbrella yYt1-j5ltQg_0 bear yZOWsBbP8Dw_1 boat yafgzvvEBsk_0 car ygqn0Cw0cJg_0 boat ykAF4z2vPRI_1 car ynSIMn0mh5Q_0 car ynuXudWT-jg_1 boat yqDO3G8QSxs_2 boat ysudb_DYv1E_0 bear ytzy45KRs4k_0 umbrella yy-1Eaz2SGI_4 boat yy-1Eaz2SGI_5 boat yy-1Eaz2SGI_6 boat y26dbfVQaAI_0 car y3HDa7ZvWW4_0 umbrella y5rlUzgK0z4_0 umbrella y6l_Xj3A7dU_0 bear y6nMm6sNieE_0 bear y6oa4gTfIaw_0 boat y7_Teuq-Jd4_0 umbrella y-J-zu3KYKk_0 boat y-lv7_3azcQ_3 bear y-lv7_3azcQ_1 bear y-lv7_3azcQ_2 bear y_Kbef75lDk_0 umbrella y_OvZEh5PxQ_1 umbrella zA7rl-0pCw4_1 bear zBCRUfv1YVo_0 car zBomR9gjgg4_1 car zCnqglOaM40_0 boat zC1J8hrm_FI_0 boat zGOI3Uds1-A_0 car zGvuvfZeouY_0 car zHwK-Ov5Dn8_1 bear zIGdWP0BOPc_0 car zIoLntgax_4_0 car zIrTQvy-DtU_0 umbrella zKN-t-wHfVw_0 car zOxKFs0x_-M_0 car zPUoexM4GJg_1 bear zS4G-dKS3dg_0 car zUYNrm52mG8_0 car zU9O4EpnP8g_0 boat zW4j5HFdFCE_1 bear zW9G9_luulU_6 boat zW9G9_luulU_8 boat zX70EOhK1IA_4 boat zX70EOhK1IA_0 boat zX70EOhK1IA_2 boat zX70EOhK1IA_3 boat zYNSRTs7wcI_0 boat zZMZCzV930Y_0 boat zaXvp0LSorI_0 umbrella zcIJlqUAlyQ_0 boat zcdpKM2gDkA_3 bear zdWOfDZyRWg_0 car zdp6LbsF3Fo_0 car zdp6LbsF3Fo_1 car zglydzoqdNw_1 car zhSMuVKY4jM_1 boat zhgbbZA2jZo_0 car zj0QGbLx2Ek_0 umbrella zkC1ygaZUL4_0 car zkFlovQ2F80_2 umbrella zkFlovQ2F80_4 umbrella zkFlovQ2F80_0 umbrella zkYqOEAbTTE_0 car zk5BFmxsRfQ_1 car zmXJ3VmO_yQ_0 bear zmXJ3VmO_yQ_1 bear zn_LOCSgnBI_0 car zobMJDgPWmM_0 boat zpW9Kjtbu7g_1 boat zp4-YNYr-l8_0 car zqDdt_wpfcM_0 bear zqyhnAN5qnA_0 car zq-AjPBQb3w_0 umbrella zsszkZnE24M_0 car zsszkZnE24M_1 car zwKNqBmI95k_0 umbrella zxfyvjQQ0QY_0 car zxuleRJc5Pw_1 boat zySbpWHTUUI_2 umbrella zzDlzbpuFUg_1 car zzOYV3PIwDo_1 car zzljeIZDjM8_0 car z1CT7NYPStE_0 boat z1CT7NYPStE_2 boat z1DFtYFOfsQ_0 boat z1GcDqMXI5U_0 bear z1WPNBklZbo_0 bear z3V1O449zY8_0 car z3V1O449zY8_1 car z3V1O449zY8_2 car z32BNdijIPo_0 car z4C0C5AtXd8_1 bear z4Nk6je-k5E_5 bear z4Nk6je-k5E_6 bear z4Nk6je-k5E_2 bear z4Nk6je-k5E_4 bear z4YdhKjeNQk_0 car z5PqRVPhGGo_0 bear z56C-TtwATI_0 car z6Bzk_B2FVo_1 umbrella z6gL7THeOz4_0 car z8GzZUKj04k_0 car z8QYapjsTBo_0 bear z8WzXJMRLkg_1 bear z9CJpzFuqHU_0 boat z-gqhqI7U10_0 umbrella z-n_qZEuRko_0 umbrella z_CWMOiNpzY_1 boat 0Ah0DHbJ6Uw_0 bear 0B-l9QmJK3I_0 car 0DHXMcNUn60_1 umbrella 0EEILwHA4Dg_0 umbrella 0FRiwnN3Wv8_0 bear 0FUPhsPv9vs_0 boat 0FUPhsPv9vs_1 boat 0GR555fb7uE_1 boat 0GR555fb7uE_3 boat 0Gal36CHm94_0 car 0Hf-spRN8iA_0 bear 0H81H-1s398_0 car 0JkwSF_s82I_0 umbrella 0JxUW6X6VTA_1 car 0JxUW6X6VTA_2 car 0LY3jcKxA2E_0 boat 0NN0x0UcFVI_0 car 0NgLxOGQPPM_1 car 0Nh6NERAbQM_0 umbrella 0NyneL4SB78_0 umbrella 0O2cDoxCAhA_0 car 0PqvPOqRHik_0 bear 0ROl0QaHTgU_0 boat 0ThOYMXH3Mw_0 umbrella 0TyHCEslM-4_0 boat 0UGD0u7LEPY_0 car 0UVJn4oJR3I_0 car 0Vu78K6ZsOk_2 bear 0XETGtPrUR0_1 boat 0XrWsyRsBYs_1 bear 0YWXAZlIFZE_0 car 0YWXAZlIFZE_1 car 0YaZ8lrPQJc_0 boat 0YaZ8lrPQJc_2 boat 0YaZ8lrPQJc_5 boat 0ZJeQYZxfGQ_7 bear 0ZJeQYZxfGQ_6 bear 0agrBEPe_w4_2 bear 0bx9mbPU7zo_0 umbrella 0c5dV9e0rL0_1 car 0hafN9Sygek_1 bear 0jL3xw-Gfq8_2 boat 0kyg-HgBo7o_0 boat 0lXT8w6Nvz4_1 car 0loh5Nhb32w_0 bear 0lyjvzKFjn0_1 bear 0lyjvzKFjn0_2 bear 0mIwwe5irHk_0 car 0mSZED2I97w_0 car 0mSZED2I97w_2 car 0mSZED2I97w_1 car 0oHtf7nx8m0_0 car 0oHtf7nx8m0_1 car 0peaciSDgqg_0 boat 0rIli5nmkus_0 car 0sAim6AJwgY_0 car 0sAukk-qZs8_1 car 0sWjMW4aW_Y_0 bear 0sbXLfSaBvk_0 umbrella 0tapt-cyoSY_12 bear 0vC1j_r-gPc_1 boat 0vun54M7U5c_0 umbrella 0wXgXCqnblk_0 umbrella 0wzUHyuc5JE_0 boat 0zKI3bZagm4_2 boat 01aEu9jy-zA_0 car 02AiKGZAu3k_2 bear 02bMGGTZE_M_0 boat 04FPpXq4qHc_0 umbrella 04FPpXq4qHc_5 umbrella 04jEe0lfdos_0 car 04p58ydbAvM_0 car 05VoMpLo7Cc_2 boat 05rSMaVX3yA_1 boat 06kAyBeWx5c_1 umbrella 08Fj_YF5X8Q_2 bear 0-Jhv9dONP4_0 bear 0-zDto8pBU4_0 bear 0_ByJ0bAD70_1 bear 0_P-fui2MeI_0 boat 0_soacANAc8_0 umbrella 0_2dsK8nudw_0 boat 0_2dsK8nudw_1 boat 0_2dsK8nudw_2 boat 1EIBn1zqhJA_0 boat 1Fv0cFr9B_Y_0 bear 1Gd-hUsNAsQ_0 bear 1Gd-hUsNAsQ_5 bear 1HhUsmUQmRY_0 boat 1KnTTBiP4ig_0 umbrella 1LKTvGMlL60_0 bear 1MVBovgEi4s_0 bear 1OvseXyo27E_0 umbrella 1PYMTwN-dl4_0 boat 1REcM5EtrZg_0 boat 1REcM5EtrZg_1 boat 1SQF7Tb6pUA_2 bear 1T4c050qGWo_0 boat 1UGqDCwd0TU_2 bear 1VziogDsYAs_1 bear 1WOfnEUurGM_0 boat 1YelAl0OQQg_0 bear 1anH_WthXTc_0 umbrella 1anH_WthXTc_1 umbrella 1avrrmB_Q5s_3 bear 1cbY1pGpdhM_0 umbrella 1cy1p57Z49c_0 boat 1dmbrwAgFuc_0 bear 1fPDeE9SwYI_6 bear 1gbd0C2wJrI_2 bear 1huEYUsV2ng_0 boat 1iD7yA3Elk4_0 umbrella 1iLq0PGfeCs_1 boat 1irtTU-RM8g_0 boat 1lCEFERcEKg_1 boat 1lSGhF2K_lM_3 bear 1l-NcYZKF8w_0 umbrella 1miy1sfneCI_0 bear 1qIgbCRt2C4_0 bear 1qknV5a5WQA_5 bear 1rt4XRA4RHE_0 bear 1rt4XRA4RHE_3 bear 1v8UDwaLZOk_1 boat 1yym4MiYTrs_0 boat 1yym4MiYTrs_1 boat 1zGry9uSuEs_0 boat 10oedSsXbw0_0 bear 14R96gxvKtU_1 boat 15ImffljXUs_1 umbrella 16BnXZheZE8_0 boat 18XvETJJDqA_0 bear 19ID_DbSclo_1 bear 19vhT11oPv4_0 umbrella 1__PWUxtAJI_0 boat 2Da3689mFHo_0 boat 2DimBSzdfPw_0 boat 2Fo-71zWO5Q_0 bear 2F9aM3isFOg_0 boat 2HDMk0mGW_w_0 umbrella 2IWPUKQEQc0_0 boat 2Irm_qCNQ_g_10 bear 2Irm_qCNQ_g_2 bear 2Irm_qCNQ_g_4 bear 2IyAOD0OkOg_0 bear 2I_k7e8QpWI_1 umbrella 2LWxx48-zmY_0 boat 2OYJuEnLK_w_0 umbrella 2O-9dVZBFm4_0 umbrella 2PL1rgU3jQ4_3 bear 2Pxvoh1PnpM_0 umbrella 2QOthN0H0jo_0 boat 2UBlre798kQ_0 boat 2U7mw3Z_nrI_1 bear 2ZeSJRQEIDg_0 umbrella 2huYkh1UAa8_0 boat 2j5p2kIFnF8_0 boat 2kAmyrOg2is_0 umbrella 2l4-4yNg4uM_0 bear 2l4-4yNg4uM_1 bear 2nWt5S5AcdM_0 bear 2oAbMVTBupI_2 boat 2olUVemt4wc_0 umbrella 2rbAoA6KuZ4_0 boat 2rzjzIvxob0_0 umbrella 2sDjXjM3vuk_4 bear 2sgrwTqPz-Q_1 umbrella 2vC56ILIWK0_1 bear 2w5-fxqKaR0_0 boat 2xzgP87zGDM_0 boat 20nMgEiCqVs_0 bear 223bkVsFvUg_0 umbrella 23-uEh5ygBE_0 boat 24kbYgf2_xM_0 boat 27Yd0qtplBs_0 boat 2_VfwSLic7o_0 boat 3EBKN0vh_8Y_0 umbrella 3EQ8WatEGfM_1 bear 3FBfwZ1vctY_0 boat 3GXWmiQHAA4_0 boat 3Hc48OCKEaQ_0 bear 3ICqGhWY-HU_0 bear 3IOrKwocmOM_0 bear 3KUAz0bb87g_0 umbrella 3KqDceVP3xg_4 boat 3MqGpNqj-fo_2 bear 3M5VwMaIzvc_0 bear 3PN8pPy1PLc_1 bear 3PN8pPy1PLc_4 bear 3PuByhkRjdA_0 bear 3P8-bKeMTDU_0 bear 3P8-bKeMTDU_1 bear 3QQYEFonITE_0 umbrella 3SJI7j-hBwU_0 umbrella 3SbQY-gSjTI_1 bear 3SofVK5wM1k_0 bear 3T5iqGlQLn8_0 bear 3T5iqGlQLn8_4 bear 3UJ24QWw0js_0 bear 3UUo8exclHk_0 umbrella 3VZuzA8i9tI_0 boat 3ZWFSRxFKp8_4 umbrella 3ZwOfZ6mdTE_0 umbrella 3cBiXmqHBLE_0 umbrella 3eH1SNLDT7U_1 boat 3fiWerkBy1s_0 boat 3fm54fM2fh0_1 boat 3kOuqiigfhM_0 umbrella 3khbnSUKCjw_0 umbrella 3khbnSUKCjw_3 umbrella 3khbnSUKCjw_5 umbrella 3khbnSUKCjw_1 umbrella 3leEAIEn6wg_1 bear 3oFuTv4g5QE_0 umbrella 3oFuTv4g5QE_2 umbrella 3ohEBnBnt7o_2 umbrella 3pli8lLuPF0_1 bear 3qGBc-85DMI_1 bear 3q0pJjI8W5o_0 bear 3v6DRHFQTz0_1 umbrella 3yct6bNJF9c_1 boat 3zhjI0Cn1AM_1 bear 3z0lIa162ps_0 bear 31PMTcBL5-o_1 umbrella 31PMTcBL5-o_0 umbrella 32GDx70-6cQ_2 boat 351brnq0Ryk_1 boat 38Tbojzrw80_3 bear 3__l885Wkz4_0 bear 4A-5QKpDBFE_0 bear 4A-5QKpDBFE_1 bear 4BbVz6UbHFY_1 bear 4GTfq2m-SnY_0 bear 4K0agSc78Js_0 umbrella 4K0agSc78Js_1 umbrella 4MUu-MomyB0_1 bear 4N85gqVvlWU_1 boat 4OQGDsYtfSg_0 boat 4QdM0aAdf4g_3 bear 4Qf9iJ-IMDg_0 bear 4R5HjEAW6Y4_0 boat 4ViaowUogyA_1 bear 4ViaowUogyA_3 bear 4VxP7VQ-WtQ_0 bear 4XCmBo2k6Hc_1 boat 4h2kJG8rDAk_1 boat 4h8E8d4P5ms_0 umbrella 4iktvQjNLS8_6 boat 4lyoTIuPa9s_0 umbrella 4rxmIDjvHvo_0 umbrella 4td5npVxACw_0 boat 4td5npVxACw_2 boat 4td5npVxACw_3 boat 4td5npVxACw_1 boat 4u8RQi7_xUQ_1 boat 4zYtj8BG_ZA_0 boat 4z3XNRP4Qvk_0 boat 40Ogw6O8g2M_0 umbrella 42-2FjqvBRw_0 boat 44nxZjEYqLI_0 boat 45HOGdlAVq0_2 umbrella 45HOGdlAVq0_3 umbrella 45HOGdlAVq0_6 umbrella 46Sp7L3iKK4_1 boat 47mMBnGHuOE_7 boat 48IdCSlEHlM_0 umbrella 48pGfV-z-x0_0 boat 5AhKWEjMmUw_0 umbrella 5AzSuHB6_jc_0 umbrella 5Ce6X4i25i4_4 umbrella 5Ce6X4i25i4_0 umbrella 5EaEfiCIEcA_4 umbrella 5EaEfiCIEcA_3 umbrella 5FZykf07mxY_0 umbrella 5FZykf07mxY_1 umbrella 5FviZXBOPWk_0 umbrella 5H6nBOIIziQ_0 umbrella 5IdOF-nnOkU_6 boat 5I2hW9gRRwU_1 boat 5JubFWZKmZc_1 umbrella 5Kf5KxsLCmI_0 boat 5PxBf16_oMg_0 umbrella 5WUSwyO4k7A_0 umbrella 5XWfGTUYLbQ_6 umbrella 5Y3Lrgpl6s8_0 umbrella 5dL3vGF_-ug_0 boat 5e9luwmv6mU_0 umbrella 5g_ugz2HmKM_2 boat 5iYpaHYUElI_0 boat 5iYpaHYUElI_3 boat 5iYpaHYUElI_5 boat 5nMhK15X4R8_2 boat 5rT33oH7aV4_0 boat 5srF-BzF_go_0 umbrella 5suoa4TFYd4_0 umbrella 5vMpwDm27VM_0 boat 5vyqdnOWivc_3 umbrella 52m9SGVaiW8_0 boat 521jpaMoQ58_2 boat 537tF6-uRB4_0 umbrella 561s-m-0mqU_0 umbrella 561s-m-0mqU_2 umbrella 561s-m-0mqU_3 umbrella 582V5-HF4yg_0 boat 582V5-HF4yg_1 boat 597l2xVl9Tc_0 umbrella 6C42Di7bIpE_1 boat 6FG49plD8TQ_0 boat 6FQz5w7HaKg_0 boat 6JGioFiqwww_0 umbrella 6JLdACYt7D4_1 umbrella 6MVLpYA1t8E_1 boat 6MVLpYA1t8E_3 boat 6OEFFwKhAFw_0 boat 6PVjXDW7JlY_1 boat 6Sxb0d7xIys_0 boat 6Ug54vSsrio_0 umbrella 6WP3KFUYTrM_0 boat 6XrW8Yjd16I_0 umbrella 6c0RAJO-AGg_0 umbrella 6inTfRLx_58_0 umbrella 6it-xMMovj4_2 umbrella 6khDUjxTmdo_0 boat 6mvP_NKlIHg_1 umbrella 6qpeBvh9pqs_0 boat 6rowMK5ERz8_2 umbrella 6sN56W9U7tY_2 boat 6tLtEuKyj1E_1 boat 6tQrO26kwOY_0 umbrella 6t0mbpnPPdg_0 umbrella 6t55VfdtMWE_4 boat 6t55VfdtMWE_7 boat 6t55VfdtMWE_8 boat 6t55VfdtMWE_0 boat 6uM7MFSH15g_0 umbrella 6uvJft-l1R0_3 boat 6yCsWwj87QI_0 boat 6zxrdodJut0_0 umbrella 61RreGvIPOk_1 boat 66WmMvvZOxI_0 umbrella 68C7HGRrJ8o_0 umbrella 68kx9VUVhzE_1 umbrella 6-Nh0bY1nUk_0 umbrella 7HD-o1yj47U_0 umbrella 7NXmDbHoJn0_3 umbrella 7NXmDbHoJn0_5 umbrella 7NXmDbHoJn0_6 umbrella 7RcyfoxqADA_0 umbrella 7WKzOMuf3Cg_1 umbrella 7a_nsGmUZNU_0 umbrella 7kSyhlnimb8_0 umbrella 7kaTL52xbiY_0 umbrella 7tlbytb63z4_0 umbrella 7uR1cEVdMDo_0 umbrella 7ydX3wCeOgk_0 umbrella 71k1TftUiYE_0 umbrella 76ljAryU9Bw_0 umbrella 78lA-eJGUn8_0 umbrella 7-ugeb_4vqE_0 umbrella 7_k6DM-PlXg_0 umbrella 8AZtNaOO_8A_1 umbrella 8FhIv4h9D3E_0 umbrella 8FhIv4h9D3E_1 umbrella 8H88MFohrUM_0 umbrella 8SuTrZ6xu2E_0 umbrella 8d_Vt2SWIvg_0 umbrella 8fsRltS2ul4_0 umbrella 8nReKSsSgGE_0 umbrella 8oOer9PS53g_3 umbrella 801xOkfqjkM_0 umbrella 84Ber6V3IrA_0 umbrella 84zKfCKtsDo_0 umbrella 9CGTYEUn-mo_2 umbrella 9JFicuESmEA_0 umbrella 9JiMiflDI68_0 umbrella 9J4O20b9qnY_0 umbrella 9S2mGfudahk_0 umbrella 9UVLb_-RbfA_0 umbrella 9bFrwgSSAkQ_2 umbrella 9bFrwgSSAkQ_4 umbrella 9bFrwgSSAkQ_0 umbrella 98OOq0Wh904_0 umbrella 99uO6qHrhsU_0 umbrella -PaNPkpeFdI_0 umbrella -PaNPkpeFdI_4 umbrella -Z3_Ixwl1YY_0 umbrella -bA7JdKB0LA_0 umbrella -d9Vg5j5vZU_1 umbrella -eJmt-GItyI_0 umbrella -k8FuC01N5E_0 umbrella -0y7A0GDVY8_3 umbrella -0y7A0GDVY8_5 umbrella -0y7A0GDVY8_7 umbrella -3TIfnTSM6c_1 umbrella -3TIfnTSM6c_2 umbrella -98I0B3kkqw_0 umbrella AAVVg5xx0p8_0 person ACB01WGxOSM_0 skateboard ACDc6tGnXXQ_0 elephant ADWNgv6trag_0 person ADznOfGgfj8_0 person AEEVGgiuS5c_0 person AEHbOzlbmOQ_0 dog AEJTsQNMkME_0 bus AFlkSTJ-mF0_0 dog AGRV17_1OS0_1 bus AHsZ4FTQ8Ew_0 truck AIViQtfacts_2 horse AJBtOVA1KSw_0 person AJbQP-rIwCY_0 person AJ9ODXcnhVo_0 person AJ9ODXcnhVo_1 person AKBq0oH8IOM_1 train AKBq0oH8IOM_3 train AL9dFpjFlLM_0 horse AM-TjLTvBSU_5 bear ANA-pgSAzGI_0 horse ANVnK2HmZno_1 airplane ANVnK2HmZno_7 airplane ANeOKwjvX7w_0 dog APP17gURiBU_0 bear APP17gURiBU_1 bear APTYyEYJfOY_0 bird AQD8YBCTSPs_0 umbrella ARaILMtc8fs_1 person ARsokXpl07Y_1 boat ARsokXpl07Y_2 boat ASPK-ZSB9Ts_0 person ASfv8cmreoA_0 person ASfwyHCtnIU_0 person AS5LvQT9rrQ_0 person ATy91FTiYvU_0 person AVF8lCKe6os_2 umbrella AWRcJpWTPwQ_0 person AWtY9Y2mPso_0 motorcycle AWwDsm1WnKE_1 knife AXjDlIFY7ww_0 boat AYAkMpj_MHA_2 bicycle AYAkMpj_MHA_5 bicycle AYAkMpj_MHA_6 bicycle Aax6L0Qqgio_0 bird AcYd7y_-V74_0 person AdY55Q3qVK0_2 elephant AgbIDWiOXQ8_0 person AgsYgmA19z4_0 person AhWU-QUzOOA_0 person AiqGEAjF6QI_0 train Aiu6EH4a8v8_0 train Aiu6EH4a8v8_1 train Aiu6EH4a8v8_6 train AixV6QSGqto_5 bird AixV6QSGqto_6 bird Ajj7WZLukdw_0 motorcycle AjpbAriY8rU_0 person Alab3dEYXM0_0 person AoAoH9yb6zY_11 bear AoAoH9yb6zY_6 bear Ao7Sa2afCb4_0 person ApDgLQUsEqc_0 bicycle ApakHefqWv0_2 airplane AqIG0zk2bpg_0 person AqTXLh7DtcM_0 person AqTXLh7DtcM_1 person AqdoD9jkBFc_0 horse Aqj7VnXQt4s_0 cow Aq4dBqb2SbQ_0 person ArgYRdhvlc0_0 skateboard AsPXe7qUyuI_0 person AuLrPQqrKV4_0 motorcycle AuY8vITQrsE_0 cow AvBm7iHiDdI_2 boat AvSgTHXgSXQ_0 cow AwVdVzh1Eh0_0 person AwvDMOeS7no_0 person Awzt30r0OLQ_1 bus Aw2t3AalW4s_4 elephant Ayh_2ithjCE_0 cow Ayh_2ithjCE_1 cow Ayh_2ithjCE_2 cow AylQiap7dj4_2 bear AylQiap7dj4_3 bear Ay9QToaaTGc_1 truck Ay_a2OkcdEk_0 person AzVvPUazPYk_0 motorcycle AzzlFx32dQs_1 boat A1RSx6j_ra0_9 elephant A1RSx6j_ra0_4 elephant A1RSx6j_ra0_6 elephant A27YZAfJmrc_0 knife A27YZAfJmrc_1 knife A3E72P24pf8_0 person A3cgW1rDOcI_0 person A32Fi06yKpU_0 horse A5U6AHe9_4A_0 train A5pUgLCQq9k_0 elephant A5pUgLCQq9k_2 elephant A5pUgLCQq9k_3 elephant A63BoLTUNAM_0 horse ZBzVnA8zj6Y_0 person ZB45YyN1WUM_0 bus ZFYGhJKiw5w_1 giraffe ZGfOCwbu-PY_0 person ZHTMfW1eaW0_0 cat ZHURcze8rOI_0 person ZIJUWQKzzsQ_0 person ZJgwacILoAw_0 person ZMgP2kxv5E8_1 person ZM3wX5zgKOA_0 person ZNXnJahaXIY_0 person ZOc4wfLX2Jo_0 cow ZOnuSLp6asQ_0 train ZPQNucbAjBM_0 cow ZQITHWk17a0_0 bicycle ZQxmb_nVoH4_1 cow ZRUXj8o10Po_0 person ZSnP5B6NiI8_0 train ZTqDuCZVTmM_1 airplane ZTqDuCZVTmM_5 airplane ZU3AYv2eU74_0 motorcycle ZU4XQbNaYQc_0 knife ZVZWEWzZg50_1 bird ZVjep3tDJjU_0 person ZWL6CshdsuY_1 cow ZWogXn8xs7E_0 motorcycle ZXU4Uua3l0E_0 car ZYOUZjfZMhk_0 cow ZYS0h2pAK6M_0 horse ZYm5iVw0YdE_0 truck ZY8pG-I5Ax8_1 bicycle ZZBBcTBPmis_0 person ZZpckGIvGTI_1 boat Zana4yKDGxY_3 skateboard Zana4yKDGxY_1 skateboard ZbnxzLt8FJk_1 dog ZbnxzLt8FJk_0 dog ZcXtrHkjobw_0 person ZelRUJyMMkw_0 person ZeqhN6ndscE_0 person Ze8cOn59rW4_0 person Ze8cOn59rW4_1 person Zj1TAkYHlQo_0 person Zj7GzCIi_9c_0 person ZlEiOICCDdc_0 person ZlH8Hd961FM_1 knife Zl30Oy50PfQ_0 person ZmXKvpkfHZA_0 train ZmdvunyqJB8_0 bus ZqTkqkEbXEk_0 cow ZrPn3BODZJM_1 person ZrPn3BODZJM_0 person ZuBD3A8Vecs_0 bird ZuEbZKmjxaA_0 train ZuEbZKmjxaA_1 train Zu7udgxuUkk_5 airplane Zu7udgxuUkk_6 airplane Zu7udgxuUkk_1 airplane Zu7udgxuUkk_2 airplane Zu7udgxuUkk_3 airplane ZvadVS1LnQU_0 bus ZvadVS1LnQU_1 bus ZvadVS1LnQU_2 bus ZwLvs9JUsFY_0 person Zw4-vF-vOMk_0 person ZxO4Gd5fhOg_1 train ZxO4Gd5fhOg_2 train ZxX6DBopv30_0 skateboard ZyEA24Ud3EM_0 person ZyM24-ekpz8_0 person ZzBvzlzuw4M_0 person Z03ZC9qmwDc_0 zebra Z1N0xBj_H3E_0 bird Z1ns6XidhT8_0 elephant Z2S6XnfE5vI_0 person Z2kb4LiQJUU_0 train Z2zB-gtDgOM_1 elephant Z22DSYtblFo_0 bicycle Z5rHikLjARg_0 person Z6XKceRI1bE_0 bus Z6XKceRI1bE_3 bus Z6XKceRI1bE_6 bus Z6XKceRI1bE_10 bus Z6qQE2_jsIM_0 skateboard Z68yTt3upjk_0 motorcycle Z8SxFPbnptI_0 person Z8pujku9bPw_0 person Z9vZk0io0fw_0 truck Z9vZk0io0fw_1 truck Z-R7-Ww03t8_0 knife Z_kKBbIzdXM_0 person Z_pwMCnOdk4_0 knife Z_pwMCnOdk4_3 knife Z_0227AsAvk_0 bus A_a1H0EO64s_0 person A_a1H0EO64s_1 person A_pc9ov1cT4_0 person A_weMKVolQM_3 bear BBC4Jmlky4Y_0 horse BBHBoewIXhw_1 umbrella BBHBoewIXhw_3 umbrella BBHBoewIXhw_4 umbrella BCKR989ZYyM_0 car BCKR989ZYyM_2 car BCpaJ-tEv-0_0 car BFP7MT8RM8U_0 elephant BF7cTjrTSwY_0 cow BF8d91cJS3o_0 person BGcAVF0Zi_o_0 person BGzetX8Dz-M_0 cow BHurVVjld8Y_0 person BIUeggZa3SU_2 person BIUeggZa3SU_0 person BIUeggZa3SU_1 person BIfedkd3HEg_0 boat BJaAlMv6b_U_1 motorcycle BKKSiAed9CI_0 horse BKtAnbXVk1E_0 person BLCEb_seyUs_0 airplane BLCEb_seyUs_1 airplane BL8o-tdhlxs_2 train BL8o-tdhlxs_3 train BMhmY9_ltFc_0 person BO7KZKb9bkQ_0 cow BQRwIXopDJw_0 person BQRwIXopDJw_1 person BQswg--xiy8_1 horse BRd8dUMN0a4_0 knife BRmtavy2ZEo_0 person BR0NNg6gLLo_0 person BSo8wjoZ7zc_0 skateboard BTSUQrxC6l4_1 bus BUHULgt_7DA_2 elephant BU3iU3zJnDI_0 person BU8sEPifL08_0 person BVTVHHm7vkA_0 boat BWNTXqGixw8_0 bird BZUE0vDhMvk_1 knife Bb2fkGYxp2E_0 person BckXjb2o93U_0 person BdHNtn10UKE_1 horse BeXziIDAJDc_0 person BgHV_87CxNI_0 umbrella BgXr-bSqMIo_0 train BhO0SwB8Ee4_0 person Bh4m74dLZaM_0 person BlYWgnhwvkM_0 elephant BlYWgnhwvkM_2 elephant BmZNFBFj-ws_0 person Bm2yaWXwgjY_0 knife BpXhq5Awd3U_0 dog BrC6VbCzRGc_1 knife BrHslMc3UMQ_0 truck BscLJpi3AJc_0 person Bv8WeZ_zrJc_2 bear BzEC1EEC2ts_0 person BzXWK-LODVo_0 person BzbzymdK_TM_0 person Bz6Od4GfW6A_0 truck B0DRHTdmeK4_0 knife B31JkzyQDkg_0 bear B5GVudI81dM_0 dog B6nArbkcRek_0 motorcycle B6sR2aqScR4_1 bus B7IP-2uNuWs_0 skateboard B7yxjI6dz4s_0 motorcycle B8iZGZlQcsg_0 person B8opNd6uzmY_1 person B9GQwzI2Eqk_0 dog B92X9Xn1P2s_0 person B-CJ8miJKPs_2 cow B-n15EytPtQ_0 person B_WnXKd-oZk_0 person CADW3z8x4AU_0 skateboard CADyh6laNA0_0 motorcycle CA3wWkrNnRs_0 person CBSNFKeTnpA_0 bird CCyZAt2Js0U_0 car CE-LfFDfGKQ_0 person CE-LfFDfGKQ_1 person CFN40hxKxM8_1 airplane CFPhXPCobFg_0 person CGg2FXjvvOA_0 person CH3phgDW5Fc_0 person CINfsd8LiOU_3 horse CINfsd8LiOU_0 horse CINfsd8LiOU_2 horse CIqkbJoJhBI_0 train CKmnpW6gboU_1 boat CKmnpW6gboU_0 boat CLtQxCqTzcY_1 knife CMgYFnnxQUU_0 horse COcbSVCp4ig_0 bicycle COcbSVCp4ig_3 bicycle COcbSVCp4ig_4 bicycle COcbSVCp4ig_5 bicycle CRF7PcgB2yQ_2 bus CSnhpel7FTA_0 person CSriNtLepLs_1 skateboard CVmBocpXeTc_0 bus CWCfCeYh2bA_1 train CWvjAYt5eR4_0 bus CW9n8Gahfgg_0 cow CXT98GHNtRU_0 person CZ-Sh-SXaRQ_0 person Can5eao1S3Y_0 bus CbB-71R_n9M_1 motorcycle CbpAv8c2Vsg_2 car CbpAv8c2Vsg_3 car Cb3iufTFMEU_0 person Cc2vs8vuPmU_1 bird Cc8E7aTdEVM_0 person Cdain96L-q0_0 bus Cd7g3ZoA5tQ_0 bus CeN22koBQRM_0 person Ce2jOHHBDLk_0 motorcycle Ce7IPtXkNcs_0 person CfqkbrB0Yy8_0 person Cf2jOSj7eRg_2 train CjbhKc3Vjpo_0 person CkEVvGqgVkQ_1 knife Cl13SbLP0hE_2 horse Cl13SbLP0hE_3 horse Cl13SbLP0hE_0 horse Cl13SbLP0hE_1 horse Cl-lB_jS8Wg_1 bear CnMMdc6syXM_2 umbrella Coxzc_S3ID0_1 knife CpLMLRdeJJ0_0 train CpN-qOO6Qm4_2 airplane CpyK9j001RY_0 person CqNEwP8PwS4_0 bear CqNEwP8PwS4_1 bear CqYiAanNpo4_0 person Cqbu8vOsszI_0 cat Cr5p4NYIR44_0 person CttKQip6B2E_0 person CuGu45Z4lt8_0 knife CvszgVrLsgA_0 person CwYG2Hf6-NY_1 cow CwvR1fjMeSU_1 horse CyuollntwZ8_0 dog C1dCZ9W6WIM_0 person C2x3rdWMAyg_0 dog C3lwMd_rlG0_0 person C5MrhYouFTc_0 cow C5SKibJTnR4_0 cat C6dANICzCcg_0 person C6xJeHO8XSE_0 person C7NXymSnEFw_0 bird C8ExRKjU1vY_0 truck C8V2-wEjv5A_1 cow C8sUABBP0Jc_1 bicycle C8sUABBP0Jc_2 bicycle C80bmA0XrjM_0 person C886JwUWvxw_0 skateboard C-Tal1XUc8o_2 person C-zp91eJqtk_3 bird DApDao4fUqQ_3 horse DApDao4fUqQ_1 horse DApauH43Ivo_0 bicycle DBArY7gHuoY_0 cow DBsBTVJNxS8_0 dog DBsBTVJNxS8_1 dog aCNvyXSuG6w_0 person aCVmJCtuPeg_0 bird aCVmJCtuPeg_1 bird aDMk7CwLIxM_0 train aERiDkn_gkY_1 elephant aEwD6TC8S4w_1 bicycle aFEOvm-1KvA_0 horse aHM4Dj-2y8o_0 airplane aI0y0wY4LQw_1 person aI0y0wY4LQw_2 person aJAd-MiEsfk_1 person aJWETVChAE8_0 person aJoKSWtqs0g_0 truck aLYtaO_J2_U_0 person aLbjxTwAV7o_0 person aMDD0PenhaM_0 cow aMgj1BUBexw_0 person aNgAUBTbUUM_0 person aNmgrcJxdw8_0 motorcycle aN2a-rDAYDQ_0 dog aN2a-rDAYDQ_1 dog aOhumbyx05c_0 cat aQcTwMVs1Zk_0 skateboard aQcTwMVs1Zk_1 skateboard aQx68fklEXA_1 dog aSGod2MJ5ww_1 horse aSq5ZqH_K7E_0 truck aTAXvSNkuvc_0 bus aUFxg301s68_1 skateboard aUsTtvWAzAc_0 person aV8S5HLSI_o_0 person aWHaR4ExDpk_0 truck aWIZBHwtII8_0 motorcycle aWgH9T2sGkE_0 boat aWmC8Tbgy9A_0 train aXa5YE_AmKg_0 person aYAuay_bTaw_0 cat aYVEZrX4mE0_2 bear aZRYQJd-5CQ_0 train aZRYQJd-5CQ_4 train aZRYQJd-5CQ_3 train aZRYQJd-5CQ_6 train aaZxOcHxPec_0 person ab_RTkwBG_4_0 person acy4aJnh9SU_0 person ac68trlkEnw_1 horse adsmRxlAJo4_0 dog afE4YqgaPlw_0 skateboard afU2vHgUvaw_7 train afU2vHgUvaw_2 train afU2vHgUvaw_3 train afkiqhwTeRQ_0 person aiOHs3hApm0_0 skateboard aiOHs3hApm0_1 skateboard aij190b9wtM_4 bear akWe9oXeKzA_0 person ak1XT_Nl7VU_0 airplane ak4CfFF9Bpk_0 person albeyJBtKD8_0 person alp0ImrbacI_0 dog al12VKid_P8_0 person amyr6d2Ns6M_0 horse amyr6d2Ns6M_4 horse amyr6d2Ns6M_6 horse ao9LHpxNCqY_0 horse apLT3-LKJgE_1 truck apXNcHROKyY_0 horse aqp_quyEngw_0 airplane aspR9ca28CY_0 person as3DGRDezaA_0 person atElNgnFvlk_0 person at-Ex-CnRX4_0 airplane at-Ex-CnRX4_1 airplane au_kgqsZlMU_0 truck avRC7M3_kuA_0 bird awnORAEMUIg_0 person aytqFnOdBLA_0 person azLbVm88Dzc_3 airplane azLbVm88Dzc_2 airplane azXlb1cxVGQ_1 elephant a1qoB1eERn0_0 person a2-lZhKXx9E_0 truck a3In51YCqMg_0 dog a3T8T1R2wAc_0 bear a45XOJQaDQI_0 person a5dffDLeZsI_0 airplane a7hjIfPGJqI_0 cat a74_tj_B-YA_2 knife a74_tj_B-YA_1 knife a8v0k4Bz_QA_0 person a9jgDU5THOU_0 person a97S4U5ezQw_0 truck a97S4U5ezQw_1 truck a-M2_3j67qI_4 knife a-M2_3j67qI_5 knife a-M2_3j67qI_6 knife a-NeSgN26Zo_0 bicycle bAKQZ0F7LFw_0 person bA10PjxgV3w_1 elephant bBPKh_BPJ50_4 bear bBPKh_BPJ50_1 bear bBW4swLrEHE_0 person bB6tIraYEaI_0 skateboard bCDw1dn7M1Y_0 car bCDw1dn7M1Y_1 car bCWM39xLsYs_0 skateboard bDFkztSgMko_0 skateboard bD6xZhJfhMU_0 truck bFnzGS_doNQ_0 person bGFRHhc7zUI_1 person bGZtGWULlF0_0 skateboard bGZtGWULlF0_1 skateboard bIOpYFVLesY_0 person bJviDDrUSwA_0 motorcycle bKB6ESqkOic_1 truck bKRAinEnagU_1 motorcycle bKRAinEnagU_0 motorcycle bNXcPzWMXsw_0 car bN43crdYDJE_2 bus bOL9YHt5u-o_0 skateboard bOL9YHt5u-o_1 skateboard bOofbwD246U_0 person bPKew4jsGkE_0 truck bPRVRL4x5T0_0 truck bQkneVc9gaA_0 airplane bQ64JFsWSf0_0 bicycle bRWbXGRwlVY_0 person bS1Z1k6laqY_0 person bUqFsPoDKBE_0 train bVP58EONEm4_0 cow bW4nHswGFPo_0 motorcycle bW5IvSesbV0_0 elephant bXR-iz0NfrA_0 cat bZDsNeqNn9I_0 car bZDsNeqNn9I_2 car bZDsNeqNn9I_3 car bZDsNeqNn9I_5 car bZIU-ajwk6Q_0 bicycle bZIU-ajwk6Q_1 bicycle bZ6Tq0KWSsU_0 truck bZ6Tq0KWSsU_2 truck banaB07Fu9c_0 bear bcKUeyEaRPw_6 bicycle bdhq0SKEqe4_0 person bd3b9R30l-E_0 person beDuTpy1tg4_2 horse beDuTpy1tg4_0 horse beLkXAaP78Y_0 train be30TAE-gq4_0 person bfQSyBsTmE4_0 umbrella bgSSzKax51E_1 motorcycle bgSSzKax51E_0 motorcycle bhoUxK8FSqc_0 person bhuPA9toCGY_0 person biIFNnX2Nl4_0 skateboard biu2ssO3dRg_0 bus bjRPge2oFgU_0 knife bjV04dzuqhk_1 elephant bjdIG6B5zn0_0 person bjdIG6B5zn0_1 person blPLp16K1XY_2 bicycle bmJ_QDIRS2U_1 train bmJ_QDIRS2U_2 train bmJ_QDIRS2U_3 train bmLsrJHQQ14_4 knife bnBORorLvmk_0 person bnBORorLvmk_1 person bnVGsydNrg8_0 airplane bnVGsydNrg8_1 airplane bnZbj1dD0qs_0 umbrella bn0I2aJB5Ps_0 horse boMU1mjUSDw_0 skateboard bo8M-OTk4J0_0 person bpw3BCxYYU4_0 horse bqoDChNwIYY_0 umbrella brJqQ_iH2VE_0 person brMVhyEZLfo_0 person bs5AY2jipno_0 train btL-vruELoA_0 person btq7gMuqMuo_1 person btq7gMuqMuo_0 person bvEJDHpRNoI_0 elephant bvVfFv57gN4_0 bus bvVfFv57gN4_4 bus bwhPTEvGmIo_0 person bydgNyGwoys_0 person bziUK-7O0lY_0 dog b0Z6qKhuldo_0 skateboard b0sKQDUFTos_0 person b1s-jYD36GQ_0 person b4Wua_98Y9U_0 person b4d_9Yc0MwY_0 bicycle b4qC2fctnLU_0 horse b4zSrjPtOfs_0 bicycle b5CJtpeG1Lc_0 train b5CJtpeG1Lc_2 train b5CJtpeG1Lc_1 train b5mOcLykYeQ_0 cow b9VOmo_86Ds_1 person b_W4BWH1i_A_1 person b_W4BWH1i_A_0 person cBxo9bPINJc_0 skateboard cCEImigNo38_1 train cDHZtfsI_gM_0 train cDHZtfsI_gM_1 train cDmkhESohro_0 boat cEcTernKOqU_0 person cEcTernKOqU_1 person cGJLuwZIG5s_0 giraffe cGJLuwZIG5s_1 giraffe cGJLuwZIG5s_2 giraffe cGwjfCPO-7k_0 car cH0sXpOxvy0_2 bird cH9u1pCWp2U_0 person cH_SL9CR8y4_3 dog cIxdxFkZ7y8_0 dog cIxdxFkZ7y8_1 dog cJvh4GqZn-s_0 person cKQQVTnOzBk_0 horse cLULEYFoBPc_2 cow cMdjRuUhBIs_0 motorcycle cMdjRuUhBIs_1 motorcycle cMwa9cC304w_0 cow cMwa9cC304w_1 cow cNDYJRBsIOY_0 dog cPlqWSd2TUc_0 person cP-p4R-JZxY_1 bird cRBw9lx-EKA_1 bus cR2-4m174EM_0 bird cR-AWpc5zTs_0 person cTujx-TutbA_1 horse cUrajeQPzpQ_0 umbrella cUrf-ZwPzxI_0 person cUwPVOboe0k_0 person cVng1vleWNY_0 person cVrxfV0w29w_0 person cXZ7JY7YQmE_3 bird cYdqN1oPRdY_0 person cagT3K3Ep3s_0 skateboard cagT3K3Ep3s_1 skateboard ca8rEbHYMXg_0 cow ca-ko46j2fQ_6 airplane cbL66gVAa5Y_0 cow cctYyTO8OtU_0 person cc3mBIHi-GU_0 elephant cdNz1OLa1tU_0 car cf_U0G5W8BI_0 person cggX7PRYUh0_0 person cg_5uaJjLHk_0 person ch_23jXJ_vA_2 dog ciCfkv5831Y_0 airplane cih9W0SPGYA_0 bird ciwNB-l9a88_0 person cjHlHkhg0z0_0 person ckFwzL1Ot94_0 truck ckV9ay1lm7A_0 airplane clZo-o5v1EA_0 elephant clvCQPta7y0_2 bird clvCQPta7y0_0 bird clvCQPta7y0_1 bird cmTPsZ9x3PE_0 cat cmW0Y4KGI7g_0 giraffe cnhhgh_z5NU_0 cow cnqT4u0k3sM_0 umbrella cpK8K6JD_GM_0 airplane cpK8K6JD_GM_2 airplane cprvb4cW5x4_0 motorcycle cqd8PRxMakA_0 truck cqvjKRFEi8M_1 car crys7VEeUgU_0 person cskBHjsDXEs_0 cow cso6B_84BFA_0 horse ctm9x2MaZuk_0 cat cxu1qpzXobY_1 bird cxu1qpzXobY_12 bird cxu1qpzXobY_0 bird cxu1qpzXobY_2 bird cxu1qpzXobY_4 bird cxu1qpzXobY_5 bird cxu1qpzXobY_6 bird cxu1qpzXobY_7 bird cxu1qpzXobY_8 bird cxu1qpzXobY_9 bird cxu1qpzXobY_10 bird cxu1qpzXobY_11 bird czO8IPcAO1A_0 person c1FBptbYp3I_0 person c1FBptbYp3I_1 horse c2T3VDriTaY_0 knife c39xfJcSlxk_0 dog c4kbPHdCIE8_1 elephant c43mnrjx2MU_0 bus c5fPKbV5cAM_0 person c53j9l_w3Cg_3 dog c7gnf6G7Jpw_0 skateboard c7oqQy2Fvlw_0 truck c8JhzKh1i7s_0 person c8JhzKh1i7s_1 person c8gBv0b5g9w_1 elephant c8iU4McayiU_0 person c8iU4McayiU_1 horse c8u5Y95o7jE_0 skateboard c84BjBiic4s_0 motorcycle c93WuBjZeRk_0 person c-nMPinePds_0 cat c_aupqZy-14_0 airplane c_o91IPAB-c_0 umbrella dAHCPltzogA_0 bird dAP6fuArseQ_5 elephant dAtQR4dHPgE_0 person dA0WQ_RubaI_0 truck dBzXNQJRzls_0 cat dCJFMDQBPb4_0 boat dEIuy8LjAxc_0 car dElaQ10vYqg_1 motorcycle dHMFcv4UnmU_1 bus dIP3FoGUXDQ_0 person dJYqTnxujb0_0 person dJnLznNE29w_0 train dJnLznNE29w_1 train dJ9qJezt6do_0 car dJ9qJezt6do_1 car dKmrUcJ9rJY_0 person dKmrUcJ9rJY_1 person dK3_HiQMH4o_0 dog dMFsGGvkSVU_7 airplane dMFsGGvkSVU_0 airplane dMFsGGvkSVU_3 airplane dMFsGGvkSVU_5 airplane dMFsGGvkSVU_6 airplane dNByeKh4gnA_0 person dNJ0q9QKzmY_0 boat dNQYo7REyBU_0 person dOkb5WhLZGU_0 person dO0uu_fVUVI_0 car dO0uu_fVUVI_1 car dO4Jxsf987s_0 bus dO-OrWse3dA_0 car dPCSntP-29E_0 person dPCSntP-29E_1 person dP7je2qU_QA_0 dog dQIlnQxMIKo_0 train dQIlnQxMIKo_4 train dQIlnQxMIKo_5 train dSAlTJeDlfQ_0 person dTvJyUKKshw_1 person dTzaYePj1gY_1 cow dT5gXQAE-Qk_0 train dT5gXQAE-Qk_2 train dT5gXQAE-Qk_3 train dUpoYuxpKPM_0 person dVTCCi__Z4Y_1 person dVte44AGoEE_0 knife dW4RjdpTaJo_0 person dXYYgzjwm8w_0 person dXf-d5rkqdA_0 horse dZv4xXpV6js_0 boat daeBFAZFQhU_0 person dbXKW9_L9sE_0 bird dbwBzQuj1uA_0 person dc5oaWIkfwg_0 cat dc-iaCwezlU_0 train deO0aj59T8o_0 person dfU8DcWDX8U_0 horse dfU8DcWDX8U_4 horse dgcW3TkPLmk_0 boat dilCe3bivVk_0 bus di59PG3l25w_0 bicycle di59PG3l25w_1 bicycle djsh1r_W6ko_0 person djt1lzJn7ak_2 bird dlYwqfTRqoo_0 person dl-bg8WPGZs_0 person dmk3Cedj6g0_0 person dn006hdarCg_5 elephant dn006hdarCg_4 elephant dn006hdarCg_6 elephant dn006hdarCg_7 elephant dn006hdarCg_10 elephant dn7iBi1t7UI_0 cow dn83BrM71W4_1 boat doOsOyiHItw_0 person dpqVH2tgA3E_0 person dqlk6F07Cxw_0 motorcycle drohCN_vwC8_0 motorcycle ds7JGeImFXo_0 horse dtsLwaO2des_0 train dt5TzAZByk0_0 person duROYI-AZlk_0 person duROYI-AZlk_1 person dutryxrzRjE_0 umbrella dvDxOc2VWhc_0 person dvP5Dsp8EZA_2 dog dvTIkEA7rOc_0 person dvvoKcQ5OOQ_3 bear dvx9-0cVEYc_0 person dwQuyR9XFVM_0 skateboard dxcnKYynkEY_1 cow dxmxpyj3WVk_0 knife dxmxpyj3WVk_3 knife dyUVa3ZQVFg_0 horse dzitRPrX410_0 cow dzpcdtcQLfY_0 motorcycle DEnqBEwPykc_0 person DFCqlvY5OFY_1 bus DFXptvzN9V8_3 umbrella DFqSvoSh-qA_0 cat DHEtea1hPBc_0 person DHwUCu0rrvc_0 boat DJ_neeMWAuw_2 dog DLsYDXqthiY_0 skateboard DMBbH5HyOME_0 person DMn3ruRAObI_0 person DMyjVWCLbes_0 person DM6e1vEjYeM_0 bicycle DM6e1vEjYeM_6 bicycle DND0C3XD7mQ_0 horse DOQilAKERwk_0 umbrella DOmE1dA6CoQ_0 person DQJ4cPhVhFg_0 airplane DT895n1nqqY_5 bicycle DT895n1nqqY_4 bicycle DUO7S4ma320_1 cow DUO7S4ma320_0 cow DU9GDCN25lI_0 person DV4bDUzPAIU_0 train DWxidp6TWlg_0 airplane DXhV8uXKo7w_0 cow DXxF81ZJ_Jo_0 cow DX1_rKFVugE_0 dog DYBLqnRCo7g_0 cat DZ2-5rYAUVk_0 train DasqUqgdRv0_0 dog DbNVb8C-Au8_0 person DbcdvAsVI48_0 person DcZSisTgSJs_0 airplane Dc9pWTcUNXY_5 bear DeVQ3mr19Sw_2 skateboard DeYmal3wAoE_2 dog DeYmal3wAoE_0 dog DfOuxNA9lro_1 giraffe DfXOTMc9IyM_1 dog DfbPDcLTZEo_0 airplane Df89T9IxDvc_0 person Df93ocrYlyY_0 person DgBuwqAbIkI_0 skateboard DgBuwqAbIkI_1 skateboard DhA0S7lPFVw_9 elephant DhA0S7lPFVw_0 elephant DhA0S7lPFVw_1 elephant DhA0S7lPFVw_2 elephant DhA0S7lPFVw_4 elephant DhA0S7lPFVw_5 elephant DhA0S7lPFVw_6 elephant DhA0S7lPFVw_7 elephant DhA0S7lPFVw_8 elephant DhEO4MuDBOc_0 dog DhJAQCycHJs_0 elephant DhU-e-L13WM_0 person DhU-e-L13WM_1 person DhU-e-L13WM_2 person DiLGyNCykDE_0 skateboard DjQx_qEnXko_0 airplane DkMltyvC5l4_0 person DmPTbBo32qI_0 bear DmzlB4KBLN4_0 bird Dm-XQKFA-BQ_0 truck Dni4lPw5oH0_0 person DnzZd_9JlAA_0 cat DoB18AvtSxQ_0 train DofzMEokur0_0 person DonLBf92rMc_0 dog Dpp4k_BzZY8_1 airplane DqcEAexhJ10_0 car Dr6LfvQ_qKo_0 car Ds_4eRyQDPo_2 boat DuLk58XzeyA_0 train Duv1XrdytdE_0 cow Du4jlCLKZds_0 person DvjMMfcCq3U_0 person DvuTkGshMjA_2 cow Dvx0WVMuXVw_3 boat Dw4--8weqIA_0 person Dx0LbiFgvPI_0 truck DyY1MPuGf5w_3 dog DzUJVl_Pej0_0 person DzV-LWU5GoY_0 person D0b7xYmwl-M_0 skateboard D0fhKhpAhJM_0 zebra D0jRA5TKT-o_0 person D1vTDW7YDTk_0 person D2hRnCm0JtM_0 person D2oV8BC0iq8_0 person D21mLV716vI_0 person D32GncZb51Y_3 truck D4Jcg1u1Z-o_0 person D5maMxzZBe0_0 person D5m40zCfU8E_0 person D6E0xgBBquU_0 person D68oMT6tpc4_0 person D7H1UQbgDOw_0 cow D9RGgV3fKds_0 bird D_a5TQmLY-Y_1 person EBJ5jExrVqY_0 cow EBLJ9v0QSrU_0 car EBUmagxsoV8_0 person EC8ftAGy2qA_2 skateboard EDBDHaRqToc_0 dog EEZKnzcn-v0_0 cat EEfiTwozdM0_0 cow EExHYyuWa-o_6 bird EExHYyuWa-o_2 bird EExHYyuWa-o_5 bird EFRywDKULxc_1 train EIl3WAxkNwc_0 train EJJXpIiBEuw_0 cow EJrj49l1N8k_0 airplane ELPjTNVxWfM_0 person EL-2TiSSQJg_0 bear ENPh0zyq2wo_0 motorcycle EOAADsR4IpM_0 cow EP3xfG5_2i8_0 cow EQN5hODdb6o_0 skateboard EQ09ewMQn8Q_2 bird EQ09ewMQn8Q_0 bird EQ09ewMQn8Q_1 bird EQ9vXT_IFYQ_7 bird EQ9vXT_IFYQ_3 bird ESxRPsxVX-U_0 car ETxRky6I39w_0 person EVD8F2ZOBbI_0 elephant EVYb5simSY0_0 umbrella EWOehvvAvqU_0 person EXK2mcPIoBI_3 skateboard EXK2mcPIoBI_0 skateboard EXK2mcPIoBI_1 skateboard EXK2mcPIoBI_2 skateboard EXeKX_vOTvc_1 car Ed-cfsA3BsU_0 horse EeQOKiPASgY_0 person EfAYg1FMY-4_0 bear EfAYg1FMY-4_5 bear EfAYg1FMY-4_4 bear EfSd4ucOXKs_0 truck EfbKwoMA6Kk_3 horse EgpujPNldhs_0 train EhQXwVQsngU_0 boat Ej0A86Eu1p8_0 person ElHgkP_L8Eg_0 airplane ElTbW5itOAs_0 car ElTbW5itOAs_3 car ElTbW5itOAs_4 car ElTbW5itOAs_7 car EmvEUer4CVc_0 umbrella EnIkH0jrzaI_0 skateboard En6a3Ed7fvk_0 person Eo5s8ykuzbU_0 person EpBZ77zmngM_0 horse EpPw2JoHiTQ_0 person EqPK8xdf8hQ_0 person EqdBE21XAks_2 umbrella EqdBE21XAks_3 umbrella EqdBE21XAks_4 umbrella Eqz3xG4mWTs_0 person ErN8-oTPkq0_1 person Er-RnWQrUac_0 cat EsvPqOf-zEA_0 person EtIj5IUtn-g_0 airplane EtIj5IUtn-g_1 airplane EtIj5IUtn-g_2 airplane EtMlgBveP58_0 dog EtMlgBveP58_1 dog EtkDITl8mEM_0 person EwlCKB77dYo_4 elephant EwlCKB77dYo_2 elephant EwlCKB77dYo_3 elephant EwqkMKutzBE_1 knife Ew-67eGgZAI_1 motorcycle ExRpjMcFoBY_0 dog EzRrohN-4ss_0 skateboard EzZW0lM284U_0 skateboard E2DbbyoqLg0_0 person E2DxfZPPu5Y_0 horse E2DxfZPPu5Y_1 horse E2DxfZPPu5Y_2 horse E5erp1mhTzk_2 bear E7CsRpWElOo_0 horse E76rAl8oksk_0 dog E9ARkaJcz2M_0 person E9J03vUxTZQ_0 truck E9w2-Y4d3MM_2 truck E9w2-Y4d3MM_0 truck E-ea5keAG3Y_0 person E-jpkZw_MdU_0 motorcycle E_cxlc0vrMg_0 horse FBA18EyY2eI_2 boat FBQpWJPC5pQ_0 person FBQpWJPC5pQ_1 person FBo954IqOlo_1 bicycle FBo954IqOlo_5 bicycle FBo954IqOlo_0 bicycle FBo954IqOlo_2 bicycle FBo954IqOlo_3 bicycle FCICeCD4dKc_0 person FCypWBdHWb8_0 elephant FDKvBZH5LZE_0 horse FD89Oq7BclA_0 skateboard FETKMmV7P70_0 motorcycle FETKMmV7P70_1 motorcycle FEbVjS5-4ps_0 person FEsMY2y49d0_0 person FFuW_UWBVpU_0 train FHRrYqTZExQ_0 person FID77dKUAU8_0 cat FITKtv4tf7w_0 cow FIi2mEV5dfQ_0 skateboard FIi2mEV5dfQ_1 skateboard FIvujc5oqIY_0 train FJDKoEDLbNc_0 airplane FLsLXPchOx0_0 knife FMV_-mdKV8U_0 horse FNNrfAuIQmo_1 horse FNpd4DJ9LBA_0 horse FPrcQJh9INg_0 person FQMXzPIoL14_2 bird FQ-_p0lM-FM_1 elephant FRxSISi7wV4_0 bicycle FSFW4QxV8-0_1 truck FUlVrltDAOk_0 bird FWNxjmydNdU_0 person FYVNE1zYmyA_0 person FZrXRU5CxC8_0 boat FaG9RreeG6M_6 bicycle FaG9RreeG6M_2 bicycle FbF-nKQx0WI_0 person FcP50mFdaYM_0 train FdPApnQkBVQ_0 bird FdPApnQkBVQ_1 bird FdlDAmvsrR0_0 horse Fd1uYmMhzPE_0 horse FedOlGadIYU_0 bird Fgd7fHxPhBs_0 truck FhQLl40AANQ_0 bicycle FhvdS8wJkrI_5 bicycle FhvdS8wJkrI_1 bicycle FhvdS8wJkrI_2 bicycle FhvdS8wJkrI_3 bicycle FiCIZpT08B0_0 cow FiD6UZuDr1M_0 person FjFwrTEJK1U_0 person FjmcQfLBpvQ_0 person FkSfwpb1Gss_0 person Fkhru_XyPSU_4 bicycle Fkhru_XyPSU_1 bicycle FlOaA91Qa2M_0 cow Fm7Z44jVp_A_1 person Fm7Z44jVp_A_0 person FnIpAhpGTps_0 person Fn0IWwSVPlk_0 person Fotm2Ewrdr8_0 dog Fphk_JpP4JY_2 bus Fp2WKSG1qGw_0 person FrFv1rYtAws_0 train Fr298zXE9O8_0 umbrella FshCFVUSBXY_0 person FsiLiUl9I10_1 dog Fs0LVU4qKSs_0 skateboard FtEi5TPqRiA_0 dog FuWY9thbtxw_0 airplane Fu9EsTmh8z0_0 person FvCCkxW3sv8_0 person FvDNYPmcXjQ_0 bear FvDNYPmcXjQ_5 bear FvDNYPmcXjQ_1 bear FvDNYPmcXjQ_3 bear FvHW0PyfZ_Q_1 skateboard FvHW0PyfZ_Q_4 skateboard FvHW0PyfZ_Q_5 skateboard Fv542o8y6aE_0 person FyEliJtlQIY_0 person F0PPPvVTNnE_3 bear F3iJ9TqS-lE_1 bear F3iJ9TqS-lE_0 bear F39H1yTLerI_1 train F4xCJHUMGsE_1 elephant F47hXNWC3K8_0 cat F48wdm2YukQ_0 bicycle F48wdm2YukQ_5 bicycle F5Cc5wQJvhI_0 person F5Tm5BM0oaM_0 train F5unbOiULNM_0 motorcycle F5unbOiULNM_1 motorcycle F9B5cLZb3T4_4 bicycle F-OWsiGzRg0_0 person F_bZObIr47Y_0 bicycle F_bZObIr47Y_1 bicycle F_dg4Hi5ZU0_0 car F_xLwEhMPdY_0 person F_8rnxkAIgQ_0 person F_88eTR1pKU_0 train GAMoEnodBZ8_1 bicycle GAZx8145Hkk_1 person GAZx8145Hkk_0 person GCW28zxN9vk_0 person GDM2ctXPkmg_0 person GD5lsE86vOA_0 car GE2nS7Zbkrc_0 airplane GE6JO6nrE2A_0 person GF9unI6hEMI_0 airplane GGULYyv3_eY_0 elephant GGULYyv3_eY_1 elephant GGVYYc0KNWc_0 truck GHTZcjImEqk_0 person GIJMEjX04dI_0 person GIM6FHDMp0A_0 person GJTjlO1FJpo_3 bear GJTjlO1FJpo_5 bear GKyxtLTjXUU_1 motorcycle GLG6II1JYko_0 bird GLpNrOwqNXc_0 person GLvmwdOjsHE_0 cow GOEqT5_bhls_1 elephant GOVFUFYsINQ_2 elephant GOfP3fxCTvw_0 person GPPKPFCI-Kc_0 person GPSXltbv0f4_0 motorcycle GP5anr-xMfw_0 person GRluMAZzu8c_0 airplane GSlWcX28sLk_0 person GUMAgiab8bg_0 person GUQmoD1aWhw_0 truck GUS7BLoHHPk_0 airplane GVNmuLeQ6pA_1 airplane GVNmuLeQ6pA_2 airplane GWBEjzdOLjI_0 giraffe GWBEjzdOLjI_1 giraffe GWBEjzdOLjI_4 giraffe GXMBH6OujvQ_0 person GYM460lVV-k_0 horse GYQO-VevHpI_0 person GYYxgR_VGFQ_0 dog GZSlxtl9bj4_0 horse GZSnngz0VX4_4 dog GZhWdIsibfs_2 bear GaierMnR4Xk_1 elephant Gbe74-OWIo4_0 person GbwJhzDrFtI_0 airplane GceLsS4AwH8_1 horse GcjSF4Uyl74_0 person GdoD65Qn6kE_0 cat GeOos0BFCSY_0 bus Gf_4plKc8tw_7 horse Gk8oy0G3dRU_0 person GlAH7-Rf8gc_1 truck Gm9yMiay9Is_2 skateboard Gm9yMiay9Is_3 skateboard GnTFmN4UNrI_0 motorcycle Gn6ltyIKgcs_0 person GoXxeDaopwo_1 person Gokzf7T4oVU_0 cat GpE5cmO_2kQ_0 skateboard GpE5cmO_2kQ_1 skateboard Gq7NQWGviWU_0 train GsLJXtf6RC0_0 person GuMiw_OwxlM_0 knife GubE6GTKTVc_0 person GubjV1tFrVA_1 umbrella GvRQ4QZHPGc_8 bicycle Gvjv4DJftts_1 cat Gv5P6ORl-1M_0 person GwAGS0xPZDQ_0 person GwY5WqLjTcM_1 cow GwY5WqLjTcM_0 cow G0C4XEsjKGU_1 bird G0i_9qeBwm8_0 airplane G0sAxRZi6m4_0 car G1doEZFbv70_0 airplane G1gPj-UK_gw_0 cow G107tKapVcQ_0 giraffe G16fmAfdp9A_1 zebra G16fmAfdp9A_2 zebra G2gyuboBt-E_0 elephant G2gyuboBt-E_1 elephant G3jqix8WiYE_0 person G5jg_wMMXmU_0 person G6iN1OKj_eE_0 elephant d0G8DzwenzU_0 person d2ugQO5Z8M8_0 airplane d3_3kfZ7rkc_0 boat d3_3kfZ7rkc_2 boat d4cTjVsUbIA_0 person d44bp_UDYOQ_0 cow d6vOtyrW2eQ_0 motorcycle d6vOtyrW2eQ_1 motorcycle d6vTXY--7zw_6 truck d6xRfIz84Og_1 cat d8GWgCsv0fo_0 person d8kSiPkTvek_1 bus d9IW6kCjfmA_0 knife d9IW6kCjfmA_1 knife d9YRdtwcTOo_0 motorcycle d-CkujEJl24_0 zebra d-6-T4gkBTk_1 cow d_eu3LZxECY_0 motorcycle d_eu3LZxECY_1 motorcycle eBIZSQg7pV8_0 airplane eBSijengaq4_0 person eBVE2h6i3Do_0 person eByIZzEh-DA_1 dog eByIZzEh-DA_2 dog eCzDpCe6xvc_0 horse eDUR6UTxYhk_0 person eFXZRDC38No_0 bird eGVUtZXFcmY_1 cat eJn0yGDjytc_0 cat eKcJ2alScW8_0 cow eL4uMBEG4gE_0 bus eMsvM8G2Z0s_0 truck eM0KTbh6EZE_0 person eN0JRkzxVPw_0 elephant eOeuY4ZbTt8_0 bird ePiG-qPeJ6c_1 elephant ePiG-qPeJ6c_3 elephant eQEBmp37ZMQ_0 person eQ6zyKVuU2s_0 person eROdacH1GEk_1 horse eRsf1_omRf4_2 elephant eRsf1_omRf4_5 elephant eRsf1_omRf4_6 elephant eRsf1_omRf4_9 elephant eRsf1_omRf4_12 elephant eRsf1_omRf4_13 elephant eRsf1_omRf4_14 elephant eRsf1_omRf4_15 elephant eTfXd1DQ6mc_0 dog eU_B2dXyBkI_0 elephant eVAEQdogSqk_1 person eVLFX7RZOJM_0 person eVnnuxmvpM8_0 person eVnnuxmvpM8_1 person eVnnuxmvpM8_2 person eWU6Kk9K6lI_0 airplane eWZHute7e6Q_0 person eXAJwsjltWs_1 airplane eXAJwsjltWs_7 airplane eXvofXrEuU8_0 person eZFqrD8MAKk_0 horse eZFqrD8MAKk_1 horse eZc2BPYt4rU_0 person eZ9Qy0zfLb8_1 dog eaoH4_TdTt8_0 person ea2xP5nm53M_2 knife ea_yr_40TRY_0 airplane ebc-oEY_eDM_0 cow ecksf6PLvhw_1 dog edx1TW6jRFg_0 person ee6Zcz8Pyfk_1 cow ee6Zcz8Pyfk_2 cow efczZtAK28w_1 dog egbQbEuLDlE_0 cat egfoTu4gtZo_0 bicycle egg1WCEyuTw_0 person egmCEe7OgiE_0 person ehxHGWKtaAg_0 person eh9YpbAcMZE_0 person ejRwmx3kUI8_0 person ej0xIcEXWiU_0 horse ekfKlK5w3Lg_0 person ekwoV0dpRwI_0 person ekwoV0dpRwI_1 person ek7bnCHGZq0_0 skateboard elB6RfDJA6M_1 dog eljiGrMEYiQ_0 person eljiGrMEYiQ_1 person emISA6YzHZ4_0 bus emISA6YzHZ4_2 bus eoIk6xjgQ-4_3 bicycle eomNxgG_ivE_1 umbrella eomNxgG_ivE_2 umbrella eomNxgG_ivE_3 umbrella er7oQRfciJ8_1 person euESct6MMNg_0 person euU-dtl6yyA_0 person evyGgkwoEpU_1 horse ex_t3nR28rg_0 bird ex_t3nR28rg_1 bird ex_t3nR28rg_2 bird ezrZuVfbOPs_0 person ezyFfdIkCCQ_0 cow ez5RcUDpMoI_0 bear ez5RcUDpMoI_4 bear e0cc8KmRgDE_0 person e0cc8KmRgDE_1 person e1VJlGQGYTA_0 umbrella e37RxtyP9nk_2 person e37RxtyP9nk_1 person e5Q4wIVJR40_0 person e5a3Z_wlpUU_0 person e6FwS_DOE-U_1 horse e6FwS_DOE-U_0 horse e6xVrcpMa9Y_0 cat e8Bc9zwTFnE_0 person e9G1bOd8GlA_0 car e9QeTOo4XBE_0 person fBYtizIh0wc_0 cow fCVsRanBID8_0 person fDWKYttA3fM_1 umbrella fEA-xCaKqfI_0 train fEWxV64teMY_0 dog fEpH1AFdSqs_0 person fFGF5gVW6UU_2 bicycle fFGF5gVW6UU_0 bicycle fFGF5gVW6UU_1 bicycle fFIVNddMFuc_0 person fFT1LpdsEhQ_1 cow fFmghP5NQVA_1 horse fFw23dFiBDs_0 person fGJKT5ttUQw_0 person fHFCYOUh3vU_0 truck fJJuwfeoaWI_0 cat fJnC2nKYQVQ_0 motorcycle fMl60_fkMfc_0 knife fMu0OmctSTI_1 airplane fNTptXtpsoo_0 cow fOyaDea7Al4_0 person fPA_KgXi5v8_0 bird fPA_KgXi5v8_2 bird fP7EpJzJt0A_0 horse fQRAi5pN1Fg_0 bicycle fQRAi5pN1Fg_1 bicycle fRB4jD1Uecw_0 person fRSu9-lyuaU_0 truck fRoEX_9tHtM_0 person fSB_aY8HhJI_0 person fSFjxB1XU2E_0 person fTd-8VbsXus_1 airplane fUNAhHKf_OA_0 cow fUva5AKNiPE_0 person fUva5AKNiPE_1 person fU8NxbaMKu0_0 bus fWD8TEXWtek_0 bear fYBeigFqN7Q_0 train fYBeigFqN7Q_1 train fYWFh5BSEyg_1 cow fYup3iPmtHc_0 person fbAOGfYPur0_0 person fcFwbcMNdUo_0 bird fcFwbcMNdUo_1 bird fdMa18fwj14_0 person fdQFJz9IOso_0 umbrella fd73v3-Qjqk_0 knife feMxoQY38A8_0 person feMxoQY38A8_1 person feNEI7bD5HI_0 bus feO8Ip4MOn4_0 cat ffQKiGKTDaA_0 bird ffr6_q8liAc_0 person ffr6_q8liAc_1 horse fhVVVY5XhDI_1 knife fhWE0XDoxjM_0 airplane fh9tibERtYI_0 person fiKs6mdtsmM_0 cow fiVKh-Q-iY0_0 motorcycle fkGWb9_HVsA_0 elephant fk85Ace_-LM_0 dog fmE9seWSDfs_0 umbrella fmosIu7__Wc_1 person fmrqs2YvNCQ_0 person fm4syrPib5M_0 person fnKNDlQq-JY_0 person foWPkPNDqyU_0 bird foWPkPNDqyU_1 bird fojim3ViD7Y_0 person fpI0N9Lv5V8_0 horse fpv4fALQXpQ_0 person fqWa-DUPAGw_0 person G8IUU0gjlEI_3 boat G88QbXTQ6LI_0 skateboard G9Sdd3czaTk_0 dog G-kF2D98oms_1 elephant G-2yXvawYec_0 person G-5iXA4ERtM_0 train G__uy4I0Kzw_0 person HAOmPeNNjNc_0 bus HBUeO1WOFFk_0 motorcycle HBbWtsju37w_0 boat HBw-J_3WlCY_0 cat HF8ZrMgnyo8_0 dog HJYmTdBHVvU_1 elephant HJYmTdBHVvU_2 elephant HJ08tJU-IIA_0 dog HKNkm0t39B4_0 cow HKRKZksEGro_0 person HMfFCe-og9A_1 bus HMt7kgP0MC0_0 person HM8XKdebDvI_0 boat HNBF7AppAQQ_0 dog HNheLARZ64w_0 bicycle HNheLARZ64w_2 bicycle HN-3LaZVuCs_0 car HONOO3gmDec_1 person HP6UlpPulc8_0 bicycle HQ3nHqG24O0_1 cow HRF40e3Tbvw_0 bicycle HRF40e3Tbvw_2 bicycle HRRhkyr7U5E_2 train HRcVM9md3Xg_0 cow HTrUPWOXlvI_1 person HTrUPWOXlvI_0 person HULLjmpSRUI_0 cow HUssZ9c2Qvs_0 truck HW8Z7IdfuIg_0 person HYCFQjnuXBI_0 truck HY4XBjJWJYg_0 truck HY9NQ2zNtGc_0 cat HZVvEd_Tg_g_0 person HZngEEoQWDA_0 person HaMmo5SdpUo_0 person HaVnQ_P5HdQ_0 train HacYwonTy6w_1 skateboard HbWinZWeK2U_1 dog HbhmAMorGaw_0 person HeOWa0NNB0g_0 person Hg0fRYqZQ3U_0 person Hi384VDSwXw_1 bird Hjo95Vo38qU_0 person Hksncw-BlKU_0 giraffe HlWb7xQHFKI_0 dog HmH4hitBoc4_0 person HoSTe-9VUJA_0 cow HpdyNV4GqbM_0 person HpdyNV4GqbM_1 person HsGPGwN7vSk_0 person Hugie4Q6leo_0 bicycle HvKC4fLwUYw_1 person HvKC4fLwUYw_0 person HvOisoEmjKg_1 airplane HvU4Jz4Gd1k_0 cow Hv_d6KPoSgA_0 skateboard HwZUDp7yxxk_0 person HxPskaUPSXg_0 cow HyHQRrpWhpk_0 boat HylH7-rD0wA_0 bird HzEm2GlGzhc_1 truck HzTD_opfrqI_0 car H0QTCKxJmLY_1 train H1Oxjm0NqCg_0 person H2GwgpAKbzY_0 dog H3HrWs1HITE_0 cow H3S_DkPBWtw_0 elephant H3S_DkPBWtw_7 elephant H3S_DkPBWtw_1 elephant H3S_DkPBWtw_2 elephant H3S_DkPBWtw_3 elephant H3S_DkPBWtw_4 elephant H3S_DkPBWtw_5 elephant H3S_DkPBWtw_6 elephant H3XF5rAtuJA_2 person H3XF5rAtuJA_0 person H3a-C6RRYyo_0 person H5mmSHRHeOA_0 person H6TuJxifX64_0 train H6w4nf5H4U4_0 bird H6y9C6Ndy2A_0 bird H6y9C6Ndy2A_1 bird H7XZ5716KnI_0 person H7z05uOIPRM_1 train H92s5sHsotk_0 airplane H-4EZAh3ZiE_0 bus IA1FFP5WN-4_0 bear IA1FFP5WN-4_2 bear ICj693xC5DY_2 airplane ICj693xC5DY_0 airplane ICj693xC5DY_1 airplane ICxHfkE0XCo_0 person IDx8_34ETTQ_0 person IEyymbAxp24_0 dog IFS0QSfnbaM_4 knife IFS3ILjlHkY_2 truck IF_auR-0fxM_0 knife IGv9j-RQi0k_0 dog IG0UmL5bvEo_0 cat IHFF7DOpF4Q_0 motorcycle IHmYV5ymU08_0 cow IKEUMXjIyTQ_0 car ILZvGBKYYrE_4 bus ILZvGBKYYrE_0 bus ILZvGBKYYrE_1 bus ILZvGBKYYrE_3 bus IMTbwAOJNIc_1 train IMh4AHUZ2HQ_0 person IM4EBlgTTOg_0 bus INlrdk7hgl4_0 knife IOQt3fFTSVc_0 horse IO7-lFsWvl0_0 bicycle IO7-lFsWvl0_2 bicycle IPEJs-vLCV4_0 truck IPEJs-vLCV4_1 truck IRpgjSP4pLI_0 person IUJGm3Iu0Bs_1 bicycle IUgsoj74aWQ_0 person IVlnjlVA5rc_1 bicycle IXP1ML1tdZQ_0 bus IXRxjnkOJeo_1 motorcycle IXenlPUsqrc_0 person IZvOv7tCr00_1 train IcRjjKSX5uc_1 person IcRjjKSX5uc_0 person Icnle27cmMM_0 bicycle IdVZJW1HC9E_0 airplane IdVkEz2IF7w_0 car Ieb9oZ9eB8I_0 dog IfWSlkR8DbU_0 horse If1zPOV0idg_0 horse If1zPOV0idg_1 horse Ih2gG0269H8_0 bus IjQXXK4uYVY_0 dog IlMHPX2VcGw_0 elephant IluTkrIqsVg_1 elephant IluTkrIqsVg_3 elephant IluTkrIqsVg_6 elephant Io7bj1jNpPU_0 car IpjQJZ42zyQ_0 elephant IpjQJZ42zyQ_1 elephant IpjQJZ42zyQ_2 elephant IpjQJZ42zyQ_3 elephant IpwI5VTWHLc_0 horse IpwI5VTWHLc_2 horse Iqy4PPX-Tlc_0 person IsHTpd2cnvI_0 train Ithz7KSWCxU_0 bus IudK7ch_IIg_1 airplane IvRDw_IA0_s_0 cow Iwve-3lTmMk_0 person IyLshk4jlyo_0 cat IygCvE4_amo_2 bird IygCvE4_amo_3 bird IyjFl1Hhk3Q_0 person Iz4XK2zNDUU_0 person I1wuUCQbXLc_0 umbrella I2DkTg8wPnI_0 person I2WoCDTXONA_0 person I2WoCDTXONA_1 person I2lh579NY2s_0 bird I45pfwCBczo_0 person I6ESaCg4z_8_0 person I6TvXxQTtZQ_1 horse I6TvXxQTtZQ_0 horse I6TvXxQTtZQ_2 horse I8OfOokt6YU_0 person I8XhyDacLtU_1 bird I8m0QjcQlSo_3 bicycle I8m0QjcQlSo_4 bicycle I9ivT_P5G18_0 person I_k5qXHxb0Y_2 knife I_k5qXHxb0Y_0 knife JBkwLPruJe0_0 person JBlDwXJFbQc_1 umbrella JDZiLsus2es_1 skateboard JDvfPX9cFDg_0 dog JEpTSJRO3co_0 person JG2tVzjxhao_0 bird fsAEg5w8xTg_0 person fsCwAYYI4js_0 person fsKTO8ksQ90_0 person ftMQOwvHDF8_1 car ftns38_MSTM_0 cow fvxc7ruCiYk_0 cow fvxc7ruCiYk_3 cow fv8aFklHmko_0 skateboard fwEvL-luHlw_0 airplane fwEvL-luHlw_1 airplane fwt8LzF8Mic_0 person fyZImQFj_Y8_0 cow fycK7kJWV1I_0 umbrella fzr3kw3BDDo_1 airplane fz6ONSUlvNY_0 person f0i5E4DOFc8_0 bus f2SctRCBZQc_0 car f3Z5d9I7rIw_0 knife f4fxmsxPzrg_2 elephant f5LEkr56Efg_0 person f5Uz-TuMQ0Y_0 horse f5ZpGBYuJ7o_0 boat f5kAHBPObsw_1 cow f6fZjMRJgoM_0 horse f63aow5BRAI_5 bus f65rTlprptk_0 horse f7yNS6ltUFk_0 person f8H7Ns8cw-c_1 train f8rXEKktSCg_0 elephant f_VqZJyJ4GM_0 motorcycle gAHcWn06srk_0 person gB0-eGpMj50_0 person gB2asNpe3zY_0 person gB7jSQgkcMM_1 horse gCDC8R7IB7k_0 person gCwe-o1nqBc_0 motorcycle gCwe-o1nqBc_1 motorcycle gC9z8IzG83s_2 bicycle gDEk1TWuZug_2 person gDG5Xr2p2y8_0 elephant gDHnBnqogX0_1 airplane gDHnBnqogX0_0 airplane gDbZj1O36VU_0 airplane gDihz5aZLyA_0 bus gDihz5aZLyA_2 bus gEkiX2yFQm0_0 cat gEnLlmMhxfE_0 person gGNmKI2M8i4_0 person gGd6hYCKdEs_0 bird gHMCfvdZzMM_1 person gHYzGPx8f_4_0 zebra gHYzGPx8f_4_1 zebra gIx12Q8A3p8_1 person gJwtAwSqEow_0 train gKAPbj9esXI_0 skateboard gLqb3YuVttM_0 umbrella gMRigFNGMeY_0 person gNfQargrILo_1 car gOFgWsujZaI_0 cat gOWc7VBEwMo_0 car gPEMf91dil8_1 horse gPSB23kv5Uc_0 person gPhL52Mj1_A_1 motorcycle gQ1qmNZzaTo_0 boat gRDFlfzM_iI_4 elephant gRDFlfzM_iI_6 elephant gRDFlfzM_iI_1 elephant gRDFlfzM_iI_3 elephant gRMJhsEuiAc_0 motorcycle gRMJhsEuiAc_1 motorcycle gRMJhsEuiAc_6 motorcycle gR29_U82QeE_1 horse gSJbrV0vy8M_0 person gSz16yrF9yA_0 person gT0yjYUmf90_0 cow gUGlSiBvfOs_1 motorcycle gU8s5nxyBDk_0 airplane gU8s5nxyBDk_1 airplane gV3CcNeVZcY_0 elephant gV3CcNeVZcY_1 elephant gWkTSRUqxoo_0 person gW6HdCsty0U_0 knife gYLohMps12s_0 elephant gYLohMps12s_3 elephant gYLohMps12s_4 elephant gYLohMps12s_1 elephant gYLohMps12s_2 elephant gaKGYmLxJVU_3 bicycle gagJEV--3Pw_0 person gdAVi92ZfSc_0 horse gdx96NpU6BY_6 train gd4UfPes3YI_0 cow geEXytMwfq0_0 person gePAI8wYSdw_0 person gfTVuceAzNs_0 elephant gg8YzsSulrQ_0 truck ghciPMerSc0_0 truck giWDg00GIDw_1 skateboard gig9B4ecK3w_0 person giy_SOmkBY8_0 umbrella gjnyg97XwnA_0 person gk-cycr3xjo_0 person gmVDmxVI7n0_0 elephant gpV4Qlx6YrA_6 bus gqLSqmK3m74_0 motorcycle gqZYY0m_TuM_0 motorcycle gsrvWcnpNP4_1 motorcycle gsrvWcnpNP4_0 motorcycle gtVr7urU8c8_0 person guDQk0hVgU0_0 bird guFTeFvjr9Y_0 bird gu3DTnVjNQM_0 knife gwXwH2Cs3BY_0 knife gxHGnBrpPZs_1 airplane gxHGnBrpPZs_2 airplane gxKuLTUNhp4_0 horse gx7PFNpHd_A_0 person gyaP7qiRxfY_0 cow g1OZWFLSspQ_0 motorcycle g1rQZNA6yyo_6 cow g1rQZNA6yyo_0 cow g1rQZNA6yyo_1 cow g1rQZNA6yyo_2 cow g1rQZNA6yyo_3 cow g1rQZNA6yyo_4 cow g1rQZNA6yyo_5 cow g3HXJNMlAsM_0 airplane g3oqxu4AhBw_0 person g3swsx-acTI_1 dog g3swsx-acTI_0 dog g3vbaqnLXn8_0 cow g4bayrAEhIU_0 umbrella g5rUJOptHXQ_0 horse g5ty_7So5Dw_0 cow g51pzrSssl4_0 person g8M5d--ghFM_0 person g8vKB3IU1JY_0 horse g8wHQVpij-I_0 person g9eN0FHn4-E_0 dog g-EAZ6gVcic_0 motorcycle g-pVcRyPQG8_0 cow g-yHAyCA2KI_1 horse g_C47ek7TmI_1 knife g_C47ek7TmI_4 knife g_C47ek7TmI_5 knife g_QHWoQgmFQ_0 person g_QHWoQgmFQ_1 person g_Tk-SESaYI_0 person hBHt6mnfUeo_0 bus hBMZHx3_cTs_0 train hC69bGTvLBo_0 skateboard hD3Bn03GXNQ_1 dog hFNAxcRpGBM_0 skateboard hFSygfNIY_Y_0 skateboard hFex_TS-aUo_0 person hGnscWmehTI_0 car hG9efPyerw4_1 horse hHdBCtElIQg_0 boat hHlqyr11RiI_0 person hIWM6v4zcSM_0 elephant hKoGkl1wyCU_0 person hON0t9Dzay4_0 motorcycle hP1ViN_WadY_0 cow hR-utsUhYSg_0 person hSAUbt6-Yjc_0 knife hSAUbt6-Yjc_1 knife hSeHymINF98_1 bus hTaEY4YCVqM_0 airplane hUjzfhyM30Q_0 airplane hUjzfhyM30Q_4 airplane hUxguQsLvcs_4 knife hUxguQsLvcs_5 knife hUyAVmRxAzM_0 person hU_dAA1A0X0_0 person hU_9cs_qw1w_0 person hVjyHhYH6Ss_1 airplane hVjyHhYH6Ss_2 airplane hVowH5-Ss4I_0 train hV4tEsm-F5s_0 airplane hZdxBk4cjmg_0 bus haiW7jpl3wY_0 person hcJBaxNIvE4_1 person hcJBaxNIvE4_0 person hcV4RZPeRbo_0 airplane hcuLD1cn9GA_0 person hdUc4uUYh0E_0 boat hfWfYFG2O94_0 person hgagtwzScGQ_0 person hhFOwnYOLl0_0 giraffe hhLyE41H8nE_0 motorcycle hhNlg3Ws9Dc_0 person hhyVc2wsXVk_0 horse hhyVc2wsXVk_1 horse hh432zDMgPo_0 train hiKbm0rqEb4_3 skateboard hiN_kULL84o_5 umbrella hiN_kULL84o_4 umbrella hkEV_E85Jzw_0 car hkSv_YxmN7w_0 person hlZDJrpJzPU_0 person hljwk2WbXGY_0 person hmSeUlyLLak_0 train hnZvUHrA3CY_0 person ho6sg-47RD0_0 airplane hqNhKf3a69Q_2 truck hqYyvTeOvas_0 bear hqaNlwG0DNU_1 person hqrmbVw_EwQ_0 cat JIuyqZCU5zY_0 cow JKiG_pk4lSE_0 person JKmvEldBeEQ_0 cow JKsodtdUW-o_0 boat JMLFZcONQAs_2 skateboard JMLFZcONQAs_5 skateboard JMMci7hryUQ_0 motorcycle JMMci7hryUQ_1 motorcycle JMMci7hryUQ_2 motorcycle JNUhCGqPlFg_0 bicycle JPHPd13gaL8_0 car JQrDalAaP4w_0 person JQrDalAaP4w_1 person JQz6IarIr4E_1 person JRAVv2LgiGo_0 skateboard JRUvqZtBMrM_1 knife JR0QfXOOmaA_0 person JSml3dguiUk_0 motorcycle JTFT_iJGFUE_0 person JUdUxjC2LRE_0 bus JWU6vdEt_OU_0 person JWgjcmMh62o_0 train JWgjcmMh62o_3 train JW0-hEA4v9A_0 person JXIh3fJ4Jv0_0 person JX8ODdMUi7g_0 bird JZC15tOV-eg_0 horse JZMOzYwcTA0_0 person JasH0KtinHY_0 airplane JasH0KtinHY_3 airplane Ja5jdE_8qio_0 person JbyTZ-esDXM_0 truck JbyTZ-esDXM_1 truck Jb93SMKg5-k_0 person JcVOyLTTvKA_0 person Jc18AfXzLZU_0 person Jc18AfXzLZU_1 person Jd7uOTcPvY8_1 car JeWRfjjRMQk_0 person JerVzlWZwac_0 bus Je-lnjK_8fk_0 person JfjkltN0lZc_2 horse JfobA6aKaas_0 dog JftQEHHdO5w_0 truck JgaE8KDwg7k_1 bird JgaE8KDwg7k_2 bird Jgc2PQ8Swbo_0 cow Jgkj9pj3-tc_1 horse JhdyYrqxn_g_0 motorcycle Jh7o2iR-lRg_0 person JijsSnHthXE_0 train Jio_xBodQxY_0 person JjQ8bdq_eXk_0 person JjtkwX4npyw_0 person JlG7Wzz4uU8_0 car JlG7Wzz4uU8_2 car JmkUuTj-Nks_0 umbrella JmtuhGXlqmY_1 airplane JnNJksYeB18_0 car JoKod4XDE6o_3 bird JoKod4XDE6o_0 bird JoKod4XDE6o_2 bird Jp6_g7oF2lQ_0 cow JqEprl56N4I_0 skateboard JrIoaRmcs6o_0 cow JrNq6Z5YSoc_0 person JrUHo8zVwpo_0 bus Jsjz8hiE_iU_0 person Jt7Ojtx0TMs_1 car Jt7Ojtx0TMs_3 car JwBYrXUHdZ8_1 horse JxTKws5Dx_8_0 cat JxjXZYfiem4_0 dog Jx9mLWFxpnc_0 dog JyYBZBogBvs_1 boat JyduNnkZOiY_0 person JyrP5u2MuSo_0 motorcycle Jzcc0pjgA5c_0 person JzjRC1xYwy8_0 dog J02u46SlewE_0 person J1GtEDNcsHQ_1 horse J2JOoOxaJdw_0 person J2bB5BgR-5Q_0 bus J2hdK_vuyyw_0 motorcycle J2ycUTr0lJQ_0 cat J4T_QA6J7kw_0 boat J4T_QA6J7kw_1 boat J4T_QA6J7kw_2 boat J40neYxbEYA_0 skateboard J5-Z9tNISPw_0 car J6klPNMhLKc_0 cow J7I-QXddTIk_0 person J7hnNI0jtws_0 person J8ITxacusCI_1 person J8ITxacusCI_0 person J9-8Qe3BWoI_0 bicycle KARqX_agLpU_0 knife KAgU6SrQTlQ_0 umbrella KAgU6SrQTlQ_1 umbrella KArVkjxSGpM_0 person KBCIbwknDew_1 bicycle KCeuwWEv3ZU_0 person KCi4f4Hp6oA_0 airplane KC5ECqMiTLU_0 skateboard KD84e88aqHU_0 person KD84e88aqHU_1 person KEpHRYH8r28_0 giraffe KGdIJzBVugY_0 truck KHqFOBeHCwU_0 boat KIOilXstQLY_0 person KIOilXstQLY_1 person KJ2kEj3C5HU_0 airplane KKWUDcCI6yU_0 cat KML2msVr5mE_2 elephant KMNAnjpGqv4_2 truck KNIVWRv3awA_0 truck KOmUta2sIgk_0 person KOsm1GUs46s_0 motorcycle KOza4PGcE0M_1 bear KPLDdfk8hIg_0 train KPLDdfk8hIg_1 train KP7RzxyTTAU_1 airplane KRKxqkfpetI_0 person KRNWPLnvZz4_0 person KR7Ah1hw5gA_0 person KS8S3STq2W4_0 bird KS8S3STq2W4_1 bird KTkhMglNlCE_0 person KTpwnsz498Q_4 horse KTpwnsz498Q_6 horse KWYD2iyUmgk_0 horse KXIJLUzQi5Q_0 person KXMlBQiVeEg_0 train KXPGShfFlU8_0 person KX9MjIikBU8_3 bicycle KYc-vKtN0DI_0 person KY4mXNDM8I0_6 elephant KZdOpoUJ3Nk_0 person Kcg7gY3WD7M_0 person Kcg7gY3WD7M_1 person KeJWqAV0EgA_4 umbrella KeJWqAV0EgA_6 umbrella KedkADy9tBc_2 knife KedkADy9tBc_4 knife KgDguip9mZM_1 horse KgDguip9mZM_2 horse Kg0XH4mez1A_0 cow Kho8jpdZzTs_0 skateboard Kjd7D98QULc_0 airplane KkdLE8EkzQ8_0 cat Kkw7ZPCEz5w_0 person Kk-2ajLfeh8_0 cat Kk_LtYOgQXA_0 boat KmLYFD7xykY_1 car Kmwqg1uRPRE_0 person KnQuff1ffzM_0 skateboard KoRqIzHBQks_0 train Koq5YYiN1tc_0 train KpHpGcL_jEc_4 bird KpHpGcL_jEc_3 bird KpfTioA2qKw_4 elephant KpfTioA2qKw_5 elephant KpfTioA2qKw_0 elephant KpfTioA2qKw_1 elephant KpfTioA2qKw_2 elephant KpfTioA2qKw_3 elephant KppX5i4QRZ0_0 umbrella KqsBJAhU_Dc_0 cat KrRVwTPG26w_3 dog KsE43Lli_3U_2 horse KsE43Lli_3U_3 horse KskL-dN784o_0 airplane KtfQRtfJQ8s_2 skateboard KxDh7a8_AmU_0 person Ky4ahEexJUc_0 airplane KzDLvBPcQew_2 knife KzMFSHS4xVs_0 bird KzOxVUsduDY_3 knife Kzt2eSUr1rY_0 dog K0IvSLIQbgQ_0 bird K0SktTNMXQU_0 motorcycle K2WsSTHs45g_1 elephant K2WsSTHs45g_3 elephant K2oIvJd-d-A_0 person K4IN8pNA--U_1 person K5C2Y3JvXCU_0 skateboard K7TOmJ6RB_8_0 skateboard K89ScUqJx5E_0 person K8_u8_NkoAk_1 train K9L-BYQcepo_0 bear K9pgB6KH-EY_0 cow K-laAofNBgs_0 horse K-xigT3f2VA_0 horse K-0pug6xNEI_3 train huFyV9NBOBY_0 person hua1XfGRDoc_0 horse hulGMGXPaBE_1 elephant hvXgMKsetW8_0 elephant hxBjbg6s174_0 person hyNwXcKelY0_1 train hyNwXcKelY0_0 train hzUpr73wZz0_0 airplane h0jkFTI3qmI_1 horse h1Hv9HnMe70_0 car h1zuISckIeI_0 bus h10iwpJO4pQ_0 train h2vHhQ7_MT4_0 skateboard h3Fo82UBMRY_0 dog h3IHNdoTXT0_0 person h3PBWibdVUc_0 train h3RgUc0oY-c_1 knife h3RgUc0oY-c_2 knife h3t75PNg778_0 person h3uSlke3koc_0 motorcycle h4qpt2FEbC0_1 elephant h5JnAInpuSo_0 motorcycle h5JnAInpuSo_1 motorcycle h7_4qHh7Vas_1 truck h8TnGCoSVeQ_0 airplane h8fKxUGKz8k_0 motorcycle h8fKxUGKz8k_1 motorcycle h-pm7wD31Ss_3 train h-pm7wD31Ss_0 train h-pm7wD31Ss_1 train h-pm7wD31Ss_2 train h_VG9OpleKc_0 motorcycle h_VG9OpleKc_1 motorcycle iAZV9nCf3RE_0 motorcycle iA7evYzMygE_2 knife iDBpYSvahjE_0 person iDHjOnhAKA8_1 skateboard iE75sptNwbs_1 truck iE75sptNwbs_2 truck iFVwtlc6IYE_0 horse iFdOAHM4xDg_0 person iFwPDZE4778_0 skateboard iG4PvtWoxG8_3 cow iH6Vlg0k330_3 dog iH6Vlg0k330_5 dog iH6Vlg0k330_6 dog iIWFuFa7Z4M_2 person iIWFuFa7Z4M_0 person iIWFuFa7Z4M_1 person iIzXR3qRt48_0 person iI08dGJAOMs_4 elephant iI08dGJAOMs_3 elephant iJcf4PhS_SQ_0 person iKzpo0D7b_8_0 cat iK-7fByPADo_0 person iMdJ5Xlz0hU_0 knife iMeNXU67sVg_1 skateboard iNiiX6P-kqA_1 dog iOxVi3Tq4ts_0 train iPlXCYJ6F7w_0 skateboard iQ-tckw9_uk_0 truck iRzm-CyyW-E_0 person iSNNmpWe3LA_0 person iS7wej_vrvM_0 person iVBDQ5wm-0w_4 airplane iVTAxc633DE_0 person iXrLhQgf8HM_0 elephant iXrLhQgf8HM_1 elephant iX4gVag7ShI_0 person iYL_l0MxgMY_0 bird iYlgi1z6nYI_0 truck iavLgJ3_05c_3 horse icVQnqL0xPI_2 boat idkGZQeYvJ0_0 skateboard igbftnGj4-o_0 bicycle igg-y1toBvA_0 truck ihkqhIpO_hw_0 person ijbDg16cIC8_1 bus ik4t0sIEmTI_0 person iltKgr5JKI0_0 person il5UMLzlQts_0 bus imDfH3So8XU_0 car imDfH3So8XU_1 car im4bCIqpJns_1 bicycle im4bCIqpJns_2 bicycle im4bCIqpJns_0 bicycle ip1Y5qjDYfQ_0 airplane ip_oGEZ6zMw_1 person irvGAW8bqAw_2 bus isbtQ06yVM8_0 truck itNqceL9dLM_0 cow iuii5XHcAYA_1 dog iulQVUJanzg_0 skateboard ivGBks6evlo_2 dog ivSQWqs_u1I_0 bear ivpPLs-cqxA_0 car iwHJDgGVuCA_0 airplane iwHJDgGVuCA_1 airplane iw7zrlRPMo4_2 horse ixgGTHdobNI_0 person iyDedQNhiYI_0 cat iyaI71EqLsg_0 person izHN9JUwtJ8_0 boat izQ74nq9zh4_0 cow i0QLe6YR7yo_0 person i1OlP2Sq0a0_2 truck i1xqjStfSsc_0 person i2SgjtgmsE0_0 person i5DfO7_n0q8_0 cow i5GkqX44npg_0 car i5JWZKdNOac_0 motorcycle i5JWZKdNOac_1 motorcycle i6mzD2HGWOA_0 airplane i6sR2IY4-Ck_0 cow i8JA178zd0s_0 cow i8Z9-KSMCTA_0 bicycle i8syjc7Erco_0 motorcycle i-EijejS9Oc_0 person i-eCNLw3hVU_0 bird i_l48nIXjxw_0 horse jBYa-gqwSeY_1 cow jCiTA9oIryk_0 elephant jCuDdMn9sYA_0 person jDGrgBt83DU_7 car jD33e45nuRw_0 bear jD5K1zGLtvc_0 skateboard jEE_ZlDJ4cc_0 cow jEzxW8ylxK8_5 airplane jEzxW8ylxK8_1 airplane jEz3EToUAg8_0 person jGCLsWhdTds_0 umbrella jHhJLxyr960_0 bicycle jIqTFAgBLpc_0 dog jJkZrKOehcQ_0 person jKD0oOyMl2g_0 person jLO5kFd36OY_0 bird jMLgjCQWQY0_0 person jMmH8xfY1kw_0 cow jMyxNu6YkEQ_4 boat jN5jdXmBv2Y_0 bird jN5jdXmBv2Y_1 bird jN5jdXmBv2Y_2 bird jN5jdXmBv2Y_4 bird jN5jdXmBv2Y_6 bird jPouarzO-e4_0 cat jQPz-9OfXRM_0 zebra jRQuCIsXz1c_0 airplane jRUeQo3V1bk_0 person jR366TYYsuo_0 person jSkwPkAAiFM_0 person jTNzSUl_zOQ_2 elephant jUzhGHE_jgE_0 person jVYzDs5YRM4_0 cat jVoxxEKEOFo_0 motorcycle jX_taNw8FFg_0 skateboard jY4Dh-UAAaY_8 skateboard jZBMDKFS5D0_0 person jbp8mHJfHGI_0 person jcYNP_FWkA0_0 person jcne18p2r2c_0 cat jdttJqwg_3o_0 motorcycle jfSY_UCtq-w_0 motorcycle jfTXT98Naic_0 cow jgQiUggCu7A_0 cow jjTgUBAd4D0_0 cow jjq2PAHcLiA_1 person jjq2PAHcLiA_0 person jlBGbg_CJz0_5 train jlBGbg_CJz0_6 train jlOOUqYlNNY_0 motorcycle jlgECDznb0g_0 bear jl7oYVm0X34_0 bird jnU2n55I_LU_0 dog jouq30Wmqxg_0 motorcycle jouq30Wmqxg_2 motorcycle jo6o9BwKsUQ_1 elephant jqPPsrUULY8_0 horse jtWUSSp-JiY_0 truck juS7DvjMPoo_0 person LCzQs5ybibU_0 horse LDwE_VIc9Zc_0 cow LEL3OcoqV8k_1 knife LEPsxGhXYxY_2 truck LEPsxGhXYxY_3 truck LEXpJRLTRak_1 bear LFWlRG2B-w0_0 bus LFWlRG2B-w0_2 bus LFWlRG2B-w0_3 bus LGvjU4PVcC0_0 boat LGvjU4PVcC0_1 boat LGvjU4PVcC0_2 boat LIC3D63R3HU_0 person LIhhU9j6MI4_1 cow LLD46pbwbiU_0 person LLiy-k-4-OM_0 train LLvpoIlozKU_0 horse LLvpoIlozKU_1 horse LO0IsJZeXhU_0 elephant LO0IsJZeXhU_1 elephant LPzXMvYB97A_0 person LTh-XAE8m3M_2 train LURSawdSS9k_0 dog LUsb9vk1q6U_0 knife LU539OYJ_z8_0 person LXHO99b-uAQ_0 horse LXHO99b-uAQ_5 horse LX0HL9qztic_1 umbrella LYPeAbFVTQw_0 person LZCq31MG3yY_0 person LZEMKs6H53w_0 person LZNlxXE0_2s_1 skateboard LZNlxXE0_2s_2 skateboard LZNlxXE0_2s_3 skateboard LZ3S39QfkKA_3 bicycle LbHrVQR9f24_0 cow LcvMMvrPIug_1 cow LdNi4yjT3yE_0 person LdusiqJFR6I_0 person LesCJsHdAU0_0 cat Le2725PKYQk_0 dog LfUSKsg8JoQ_0 cat LfhPiqIDAcI_0 person LgbwFATbwhs_0 cat LhF7TJOwt8o_0 motorcycle LhOMGvkzP28_0 person LhOMGvkzP28_1 person LhkFN7f676g_0 airplane Lh1QrEwtBxU_0 skateboard LiS31CevvvA_0 person LiS31CevvvA_1 person LjRWmJThZrA_0 person LjyZ7Djyq1U_0 person LkP8lgpmCJc_0 airplane LkfML7bjGg8_0 person LmCzQ6WrePM_0 bus LnYz8cQsrWk_0 cow LpTBcxby8_U_0 cat LpT4VBLapqM_0 car LpjbdSyW__A_1 truck Lqm0JTDlIaU_0 truck LtIW9sP55N4_0 person LuC8ON_75l4_0 person LuRLF2TroVk_1 airplane LunFMJp3_Uc_0 cat Lup2fypzuD4_0 person LurlbycI8WQ_0 person Lvd7WBHnDpk_0 truck Lwxi57QRroE_0 person LyPkKroSsaU_0 bird LyPkKroSsaU_2 bird LyPkKroSsaU_7 bird Lz7uf7cmfAU_0 horse L0Y9j9DtU1o_0 dog L0mqjqU7pmw_0 person L1C1GJZuI6U_0 horse L1TihVYcfII_0 bear L1xr5gaSzeQ_0 bicycle L2lJenTKrLU_0 truck L2lJenTKrLU_3 truck L2lJenTKrLU_5 truck L22pyXEUjv8_0 bird L22pyXEUjv8_1 bird L5px8rMqxRY_0 motorcycle L8Q0lJgaUi4_0 zebra L-3-1978GvI_0 knife L-6R2vuKWhc_1 truck L--TMS61Zvw_1 boat L--TMS61Zvw_5 boat L_dOv3wd1ZM_0 person L_nI4_2RbTU_0 knife MAmHLoJdmc8_0 cow MENNFokPNbU_0 airplane MG8-IGrKVxc_0 truck MG8-IGrKVxc_2 truck MG8-IGrKVxc_3 truck MG8-IGrKVxc_5 truck MH1GdFqE_lo_0 horse MH1GdFqE_lo_2 horse MH1Kct5RCRg_6 airplane MH1Kct5RCRg_10 airplane MIkxezmilfY_0 person MI6x6FrXJqs_0 knife MI9BIgkOBjI_0 horse MJ9vJFTTV5c_0 person MKiCrBXtflw_0 cat MK8Jm3I4In4_0 dog MK8Jm3I4In4_4 dog MMiSt9MNne8_0 train MNve0XPgcGA_1 bird MN1A5E3jNSE_0 horse MPJu68gBGfI_0 person MPMudxdiIds_0 train MPfgu6-snaM_0 bird MQ1o_7gpp5E_0 person MQ1u8IEmFSA_0 person MQ3HhLmsCik_0 person MRNJmLLkjPc_1 motorcycle MRNJmLLkjPc_2 motorcycle MRqfEOhWW48_0 person MSItPvVCUN8_0 cow MSd5Ecl5-W0_0 person MSnEnQ0psW8_0 car MV6MGXhQwFQ_0 cat MWbnSN-7WG0_0 cow MWt4P6HWxMM_0 horse MXEcQSFwng0_0 cat MXTzea4MeHc_1 car MXoVDyewPBE_0 person MYFPnJIKK5k_0 person MYpdq9KvK8o_1 umbrella MasaNQLCMGE_0 person MbRvEKuvR04_0 skateboard Mb6r1es0AbU_0 cat Mcdl3s6oQrc_3 bear Mcdl3s6oQrc_1 bear Me-clc6PGkA_2 horse Me-clc6PGkA_3 horse MfYpMzLWST8_0 cat MgFhoihDD1U_0 person Mkmpoid1BvA_1 train MokOHR3wImM_0 cat MqBTk3ITQ8c_5 elephant MqBTk3ITQ8c_3 elephant MrWZEUtDBq8_0 dog MuyIuhdszH0_0 person MuyIuhdszH0_2 motorcycle MvKMtFVP5NU_0 person MvbZEiffy8s_0 person MvuGj1qR4Ic_0 motorcycle MvxUj_Du2IY_0 horse Mw6Cu1mPanU_1 cow MxtJwd0GBkA_0 airplane MzTsjMauBH8_0 truck Mzrv2OCC2GE_1 person M0TTCr9jjgc_0 horse M12KvkF1Nec_0 person M40gbbuNuL4_0 truck M5p7jyvEgPk_1 knife M52oDxJEXk4_2 horse M52oDxJEXk4_0 horse M7Kcv9fUrhA_0 cow M9CCnnc8m8k_0 giraffe NAInb4dMC_E_0 airplane NAInb4dMC_E_3 airplane NAsDBYDNhwY_0 cat NDxs_vxhhME_1 person ND-VrJY7mU0_0 person NEsCBcZFajg_2 airplane NEsCBcZFajg_5 airplane jvlyXCBSuCk_0 person jwYviTYbJYs_0 cow jxL3F-iB2S8_0 bus jxmsNv20V50_0 train jyrY4oyyA7M_0 person jzNOBsi5TtQ_0 cow jzeFDGEt_iQ_0 person j4UJ80q_s3c_4 skateboard j4UJ80q_s3c_5 skateboard j4t-Otp9ES8_0 person j6XmNyG8nYE_0 bear j8SM6uLadmU_0 motorcycle j8aX3NuEnxc_1 airplane j8aX3NuEnxc_0 airplane j93wwDC_a2I_0 skateboard j_tT90ISNnc_0 skateboard j_6ZWhyOOcA_0 person kBKG0SaNbdw_2 cow kBYFlPJJx-s_0 person kCHOoDF-pXo_0 cat kCQIRLEi88s_0 person kCefZaEK9M4_0 person kCt3G72NjyY_0 motorcycle kEx2sgiyKpY_0 dog kG5vclMyg7w_0 skateboard kHIZAi1E9gU_0 cow kH3Hwla_MUM_0 person kI7523l1Tu4_0 horse kI7523l1Tu4_1 horse kLwsGbEsMjs_5 elephant kLwsGbEsMjs_1 elephant kL52zPMgsXM_0 truck kMIRREOoSt0_0 elephant kOqKBgGRd_c_0 boat kQu7xcJmp6w_0 airplane kRLl2HLijWc_0 elephant kRqsESioKVM_0 person kSWUU8Ef-Rg_0 cow kSXkd4PYX9M_0 bear kSm9E8WwGYY_0 person kTT6onfYUug_0 bicycle kZcfsku1oJ4_1 bicycle karZg0Iifks_0 skateboard kavU8zKXrEY_0 elephant kbD6iXQ3P6M_0 cow kb4GuHpwuSw_1 cow kdPgKSrjVYQ_0 train kd9Tn_hyeb4_0 dog keka7aToy_E_0 person ke2Ap6Zvq64_0 cow ke2uXJrB9WQ_1 bird kfL1KEY53AM_0 person kfMMMSNZWeM_0 giraffe kgcb2y-aw8s_1 truck khicinfB1nY_0 person khr1-lWZOOw_0 bicycle kixX1ga8yrw_0 person ki51QTz_6iw_0 bus kjhcR5ljaDU_0 car kksfStf04pc_0 person kk41Jjw-BpQ_0 horse klxQpVdft5E_1 bicycle kmIUPZSNl5A_0 airplane knFBzlhmDMk_2 skateboard knFBzlhmDMk_3 skateboard koomOoaIF0Q_0 motorcycle ko4el3e0QFI_0 bird kqE2rNzUnvU_0 cow kqJJ6_2vGtU_0 motorcycle kqiHy-EzdcQ_0 airplane kqiHy-EzdcQ_1 airplane kqiHy-EzdcQ_2 airplane krD5WtdljCc_0 bird krR-lFUTXHo_0 cow ksbdMzGs-gs_0 person ksbdMzGs-gs_1 person ktCRlGt6408_0 train ktcXRj-Vz6c_0 bus ktcXRj-Vz6c_1 bus ktvaX1ALzwE_0 motorcycle kwMNSTE0h8U_0 bus kwMNSTE0h8U_1 bus kwyn-eed9l4_1 bird kx2jH9V7vYM_0 train kz0gVW9uWkc_0 skateboard k1C25MTUso4_0 person k1Y6Y1yocF0_1 knife k1qT5GtPmQo_0 bear k2fCUP9H4cw_0 skateboard k24lvYKkK5g_0 boat k3hYFu55iGE_0 person k3hYFu55iGE_1 person k3pTU4KNdvE_0 train k4tqy4pdlNs_0 horse k5MmpG9afSM_2 bear k5UoGZZb_RY_0 cat k5oey7bw5kA_0 person k5-IPGgeCPc_0 person k5-IPGgeCPc_1 person k8OboASs470_0 skateboard k8OboASs470_1 skateboard k9COlD7u1tI_0 knife k-tdE0VAFkc_1 person k-tdE0VAFkc_0 person k_E-cIymiis_0 train lAZQZSK_9bk_0 cat lCc5-WmCZJk_3 dog lCc5-WmCZJk_5 dog lDWAsuKkv5Y_1 bird lFObiVRO-BQ_3 airplane lGAGodreVjQ_0 train lGJB2hhw5pI_0 cat lIbOGzXhSW8_2 horse lI-A6pFtkLQ_0 train lI_jxWxWivM_0 dog lJXfbIuwTIQ_1 cow lJccP5OJjZ8_0 train lKBO-dakd8w_0 train lLyfm0vbHrw_0 train lL_4QscWdx4_0 person lM0yKqnWblw_0 person lNJbOSFK9N4_1 skateboard lOFTlhNmKD8_0 bus lOQf3A_3lPI_0 horse lOWmL3mpSeA_0 train lOvB2zlHw8w_0 dog lO-XTKPQb5I_0 train lPapZHOAdzk_0 bicycle lP5lgBlsH0U_4 airplane lP5lgBlsH0U_1 airplane lP5lgBlsH0U_2 airplane lQDy9Mri-18_0 person lQsTpo0uOIw_1 boat lQuFC-E7VUM_0 person lQuzpkDKFQ8_0 person lRuif4Zc7CI_0 boat lSZa4pAHgV8_0 horse lS-5gEkB0_o_0 motorcycle lTTquh-jLwM_0 car lThBPb6HI1U_0 cat lVeIr8AFTjY_0 person lWT2t48q164_0 motorcycle lYSpeuL7-oo_0 umbrella lZOTAg9Fofw_3 bird lZVwQoLPjBU_0 giraffe lZVwQoLPjBU_1 giraffe lahDGDRe7X8_0 horse lcKDCt1eWqg_1 knife ldQGB8gzRjA_1 cow ldhdyBduVoU_1 cow lf_tYVzrap0_0 person lge9f_bgAOk_0 person lgzIpgcvPvU_0 person lhNv9zDa1ug_0 car lhadIxHkaVg_1 person lhadIxHkaVg_0 person lhnQuOIF-2c_1 person ljLO1myCfoA_1 knife ljayNZQpp-I_1 horse ljayNZQpp-I_5 horse ljeTwRM6DWE_0 person lkvdy3Hejpw_0 person ll6gTyUguMY_0 horse ll6m5MTpf4o_0 person lmpKSF0cXSc_0 train lnfEV2dRfm4_0 motorcycle ln0_FGR8B08_0 person loVlMj9Dhkk_0 truck lotZh71qMks_0 person lpcqEaZD_Xk_5 bicycle lpcqEaZD_Xk_0 bicycle lpcqEaZD_Xk_1 bicycle lpcqEaZD_Xk_2 bicycle lpcqEaZD_Xk_3 bicycle lpcqEaZD_Xk_4 bicycle lqu4tjd3Zg4_12 bear NE9AhZPTVFY_0 motorcycle NFF4UemeH8g_0 truck NFSj66emNbM_0 cat NGS9BrtLJ0I_1 boat NGvpnRrWSKc_1 bear NHLBjlX2jeg_0 person NHgh88y4e80_1 car NHpM-oBMIRk_0 dog NHrjnZsJWOw_0 person NID_0E0tn_g_0 cow NJQNZ36lsvw_2 truck NJm81cIGO98_0 skateboard NJ22Hynv9s4_0 umbrella NJ22Hynv9s4_1 umbrella NJ7MXR2AaoY_0 cow NKQfFcfr6Ko_0 person NL1iy1TKtRI_5 car NL1iy1TKtRI_1 car NL1iy1TKtRI_2 car NL1iy1TKtRI_3 car NL1iy1TKtRI_4 car NMCijcIa_XU_2 knife NMhR_Z4Rq7g_0 person NNbRF02KnGM_1 skateboard NQiMeD83sMw_0 truck NQiMeD83sMw_1 truck NQsnyZmQoPw_0 elephant NQsnyZmQoPw_2 elephant NQve9Yujb14_0 person NRaAEznVIxQ_0 person NTGqC7kOGAw_1 bird NTRX6gLV_04_0 bus NUSnWbhvmQs_0 cow NVzCor2-ZpI_1 zebra NV-p8Vp-bdA_0 horse NWAQ1is2w98_0 airplane NYIqB-l8eKk_0 train NZ5OIYTIoYQ_0 person NaCksn1bbv4_0 airplane NaCksn1bbv4_2 airplane NaEokN7Nh-U_2 knife NadzcUmXDTk_0 person NbJ2gM5KJTM_0 cat NbJ2gM5KJTM_1 cat NdXmkm9jcPA_1 airplane Nd6ceCmRYBI_0 bird NeXVfNsggZw_0 cow NfEzlo6-i_4_0 train NfEzlo6-i_4_2 train NfEzlo6-i_4_3 train Nhi9730yIzM_0 dog NhskHQ9bqlo_0 cat Nhvr0y1tqjk_0 person NiP4AEjiwxs_1 boat Nio43-cQPh0_0 train Ni_TSyCk1Ak_0 cat NjknyzAAQpM_0 person NlOjGoYPj9Y_0 truck NlTLvOcpoEA_0 elephant NlVEu_8kdoI_0 horse NlVEu_8kdoI_1 horse NljV4UjnFJc_0 motorcycle NnRWY12wxUk_0 person NnVFfTO9-q8_0 person No84NOV3Pwk_1 skateboard NpZj-n9_STU_1 bird NqwxEAASrCo_1 airplane Nr9t7GeBwQY_2 skateboard NsbG9FcyTFk_1 elephant NsbG9FcyTFk_4 elephant NsbG9FcyTFk_2 elephant NsbG9FcyTFk_3 elephant NuKyL_c3YcQ_0 cow NulXMVhoGhU_0 knife NuutxSJHULc_1 cow NvkF9R1HsJc_0 car NxTnPIBFKdE_0 airplane Nxjnp7dqCdc_0 cow NxqGplqsmNk_0 person NyKq-nq-KlQ_0 person NzAEnNO5-fo_0 bicycle NzAEnNO5-fo_3 bicycle NzAEnNO5-fo_4 bicycle NzAEnNO5-fo_5 bicycle N0LEywKxW9o_0 cat N0e8A9q9tyU_0 train N1OYtZSKdKQ_0 train N1OYtZSKdKQ_3 train N1pTdHcekjU_0 car N28sspen6dM_3 bird N28sspen6dM_1 bird N3ffRSq8s7M_2 cow N6nP6NLTaG0_0 motorcycle N7Bv6ZMyBrU_0 skateboard N9vkS7ish9k_0 cow N_5Xf4hpanE_1 dog N_5Xf4hpanE_0 dog OBQQMo8mWLE_0 person OCA5rhgrl48_0 person OCLVaKMFCZg_1 bicycle ODI8kcB_dSs_0 truck ODJSlRRM1Uo_0 cat OD4XsgCwIKk_0 person OD9vhbbeBAE_0 horse OEhrO1p2agU_0 person OGOf9vbNJB8_0 person OG8Nfns4uh0_0 cat OHEyq1pCfZ8_0 truck OIV8ASYsqZc_0 skateboard OIV8ASYsqZc_1 skateboard OImLl2ufWqI_0 cow OJktr2-sJmY_0 motorcycle OJktr2-sJmY_2 motorcycle OKbNtRotT5w_2 horse OKbNtRotT5w_5 horse OKbNtRotT5w_7 horse OK-2ALhNWts_0 bird OLpvIpNUgY4_0 person OLyGncmosSs_1 horse OL_lZw3lqE4_0 person OMm3ReCUyGA_0 person ONlvohUS-io_0 cow OOC45SMJl6M_0 bus OPIxLQwJLaM_1 cow OPbyoGG-M_E_0 horse OPm_iAWIO2o_1 knife OR4OEYlOndk_0 motorcycle OSRtFznjiro_0 motorcycle OSUOKZdfiXQ_0 person OS6SXRjK0rU_0 horse OUeSqgMRLUg_0 bird OUrVDMMYK-4_0 person OWBXMvAtmcA_0 cow OWqaj3O-u6E_0 train OWqaj3O-u6E_1 train OWqaj3O-u6E_3 train OWvRHFQJ-5g_1 train OXjc7JlWYwk_1 bird OXpPVrdEoko_0 elephant OXpPVrdEoko_1 elephant OYCDyQPt5rU_0 truck OYRmTydmqZo_0 cow OYugCmogPD8_0 bear OZver3igS6U_1 zebra OZy-0MSWC7o_0 person OZ5z2K-vIYg_0 motorcycle Ob4ur_FS9xM_0 dog OdLj2La07lM_0 boat OdnylLd12pU_0 skateboard OdsXUxBBISo_0 airplane OePFLxtDg7k_0 horse OflyVi689KA_0 skateboard Og9LiinXMtw_0 bus Ojx6OtSIA3k_0 person Omdbd0YsB2o_0 airplane Omdbd0YsB2o_1 airplane OnRL69PzM4I_0 bicycle Oo3Uhz6L-cs_0 person OpEMSVRTyxk_0 dog OpJl0GUiLQI_0 person OptQqflXY_g_9 elephant OptQqflXY_g_0 elephant OptQqflXY_g_4 elephant OptQqflXY_g_5 elephant OptQqflXY_g_8 elephant OqmbWcekMxo_0 person OrPfakDZX64_0 person Orwr1k0mKho_0 person Orwr1k0mKho_1 person OtHHLfag4xg_2 knife OumTAMPogf4_0 person OvQFDkMjctE_0 person OyDNx0iCGUM_0 truck OyKi2PGJERI_0 person OyKi2PGJERI_1 person OyhAS52bQMA_1 person OyhAS52bQMA_0 person OzORAIgrZOg_1 knife OzQFkM92we8_1 dog O0o_u_t5Y6w_0 bus O2TgLtQU7PI_0 knife O3GPSL92hYw_0 elephant O4UhXpMuxJI_0 person O5PlzlxQuPc_0 dog O5796OHwBy8_0 bear O6cWlrockUQ_2 horse O8s1bsDJrwc_0 person O9dxeSLiF9A_0 skateboard O9dxeSLiF9A_1 skateboard O90WVIgQwww_0 person O9_riOoIpKo_4 train O9_riOoIpKo_6 train O9_riOoIpKo_10 train O_hypcyZCFo_0 airplane lryNU4SKncc_0 cow lrzxlHguluE_0 bird lr7T4YcCWSU_0 elephant lr7T9GuNUMY_0 cat lskWmTPa9Gk_0 person ls34lS6fGzw_0 person lt7kXXW5D-c_0 bus lvdU2uEdpnA_0 boat lv6aYZguv6k_0 person lxXwMvanqo4_1 boat lznoTW8tuLI_0 bus lznoTW8tuLI_1 bus lznoTW8tuLI_2 bus l0J9Km2lk2I_0 person l0TirY4L7Es_1 horse l0TirY4L7Es_3 horse l3yFwpak_LA_1 horse l38pNVKwDeo_0 bird l4sdxYUAiJQ_0 person l4_P74HRriU_0 person l5GlzRyX39s_0 person l5GlzRyX39s_1 person l5WawiGWVxg_0 person l6cEGnOtFZg_0 airplane l682n6ZmpNk_0 person l7Mmo3ow8qo_0 person l7kq2yqxPQc_4 horse l7kq2yqxPQc_2 horse l8r-mOc3-3U_1 person l9QgZQGtQWI_0 motorcycle l-4jrxgMGTQ_0 skateboard mAEnlKe67pQ_0 bicycle mAhzB1TH8mU_0 truck mAj62XUNkIM_0 horse mBgSYaKydZY_0 person mC5X6MO2y9A_0 person mDf5zsFFweg_2 knife mDf5zsFFweg_1 knife mFbUnWMAreQ_0 person mGDfepYDRRE_0 person mHFxPudSk8c_0 motorcycle mIFnGYdf0po_0 person mJm2UYBiD8w_0 cat mJo7aqOfRww_0 airplane mJ6qCcS_-AQ_0 person mJ-DsFbUPUg_0 motorcycle mKBs2L-xwdU_0 person mLVHfKExUNU_0 boat mMdGNbPpLKQ_0 truck mMy70TxInmA_0 person mNpEoUW_OPI_0 knife mOFqvrGzJiE_1 elephant mOFqvrGzJiE_2 elephant mOkmKyBZoXI_0 person mP6-RR-Vuv0_3 truck mR1y0XlZhQ4_0 person mTeNKWTwFcs_0 person mU7E6pi9PFU_0 bear mU7E6pi9PFU_2 bear mWeNwTJwEmo_0 person mWhw719wEH4_0 person mXBKJjrxqmc_0 knife mXekeIascCc_0 person mX_4T1I2ux4_0 dog mYwEvpKN2-Q_0 train mZ0VxiELg9A_2 motorcycle mZ0VxiELg9A_0 motorcycle maiqraHgwgg_0 skateboard mbZZ48h5pnY_0 person mboIIChd8tY_0 bicycle mcR2Fi6wQj8_1 train mcR2Fi6wQj8_0 train mciQ3fR1QTE_0 truck meAfvCGeyyU_0 person me-WjezBU4U_0 motorcycle mflX-nwtpzs_0 skateboard mgSJL9uL49w_0 bus mgSJL9uL49w_1 bus mhDnVhRMCHc_5 cow mhDnVhRMCHc_0 cow mhDnVhRMCHc_1 cow mhDnVhRMCHc_2 cow mhDnVhRMCHc_3 cow mhDnVhRMCHc_4 cow mhIULm3ssFk_2 airplane miJ1b0bNn9M_0 person miLapj3u_5g_0 cat miR8Xeb7SM0_0 umbrella mi4j0PrR-Gs_0 truck mi4j0PrR-Gs_1 truck mjSUb46nTjs_0 horse mj2ClgQE_Q0_3 skateboard mj2ClgQE_Q0_2 skateboard mj_R3ENyiKM_0 person mnOoqy7I3L8_0 skateboard mns4vFzs4_8_1 skateboard mns4vFzs4_8_0 skateboard mnwyrMq92so_0 person moBNY2JjuEQ_0 cow moc2yPvW_JU_1 person mpA3PWbdVWc_1 bus mp-cHp44pXo_0 bird mp-cHp44pXo_1 bird mqI9CDpsCDE_0 cat mqYD18pFqm8_0 person mrnDERbyZcM_0 skateboard mtO9ioY8AHY_0 person muk5R25UV1A_0 person mungFWJMSsg_0 dog mwRNyFvem8g_3 truck myYMS85ltwo_0 skateboard myiCWmM3XN4_1 dog mziKTFuKVco_0 person mznC1uLm_j8_0 skateboard m0z25TJV2vU_0 person m1VAqMAJ-Lw_0 elephant m2DUDsR4tWA_1 bus m2Sr_Q8JpcI_0 horse m2Sr_Q8JpcI_2 horse m2Sr_Q8JpcI_3 horse m2-nK6oZ08E_0 horse m2-nK6oZ08E_1 horse m3u_pETGaMw_0 train m4Ozpr8E1EE_1 train m5mSFt43spE_4 motorcycle m7VhCUoV_Dw_0 person m77tPf0Ulb0_0 person m8THukZrE7w_0 person m86BSOvJvS8_0 person m9hdxJE9HQE_2 train m95nb4Vl_R0_0 elephant m-Ry10-IgWg_0 horse m-sLdoVujlI_1 bird m_25GAJYGHE_1 car nAO2Y4kF7b8_0 bicycle nBllCINiO-4_0 train nF_NlCSUpFo_0 cat nIO0ZNZi6n0_0 person nIiXsRSLxZI_0 person nIiXsRSLxZI_1 person nJO5eQXPS0M_1 horse nKfhxWUyc4I_0 elephant nKfhxWUyc4I_2 elephant nLUyCQwkCds_1 motorcycle nMW7WsVKd_E_0 truck nO14Z3ggnZs_0 truck nO16C5NBMQQ_0 person nO16C5NBMQQ_1 person nPJJOI4j3UQ_0 person nQAqVHkffhY_6 train nQAqVHkffhY_7 train nQAqVHkffhY_1 train nQAqVHkffhY_5 train nQrJJZvmF74_0 cat nRu8IVZXzCU_0 airplane nR1Ng3PnYoU_0 cow nSUBF0RYH1o_1 bicycle nTfgyYqyO_Y_0 person nTtqkLze7eY_0 horse nTtqkLze7eY_3 horse nTtqkLze7eY_4 horse nW4sAWZ6dHQ_0 bicycle nXYeq3IDOFo_0 truck nXgq-W7J6ho_0 person nYGQy8peDYk_0 person nYHjMb7HoK8_3 bird nYIUSRVmY30_0 person naMdRxX0924_0 train na6hNW8gSx8_0 bus nbojUStyLvY_1 person nbojUStyLvY_0 person ncZiTQHehfk_0 person nefS_k9oFMI_0 person ngE_mlmsaqY_0 person nh4AR9Mjwmo_0 bicycle niQ2DNNlBSM_0 person niUnVyYTszc_0 person njOQqZ1pBGM_2 boat njP6uuU-G6o_6 bear njcuqdNTGfM_0 person nj8ALe3wC9c_0 horse nki1SdWtdCI_0 cow nk6FezKWYSY_0 bird nmNSM48p094_0 knife nmRZQdp3xRk_0 person nn8WcALmZ7c_3 bear noTnh5A2OHo_4 boat noTnh5A2OHo_1 boat noWsAcioI8g_0 train noe-qNQfJBo_0 bird no-b9_3kXiQ_1 dog npAPemisdEI_3 boat npGL0Kl16f0_0 person npGL0Kl16f0_1 person nqZya6Vk3iY_0 cat PAdHnsQ5png_0 cat PAi_eJ_z59w_0 skateboard PBPViL9vBZQ_0 motorcycle PBS3-SzLV2A_1 horse PBwR_Jdod_g_0 knife PCJWOz32Js8_0 person PDmAbS9Afkc_0 truck PE8yxnkayr0_0 person PE8yxnkayr0_1 person PFKrDvQuKII_1 car PFb83m0smRg_0 person PHunbTKqKwk_0 train PH5VqmGrnXs_0 cat PIG9w10uliw_0 bus PIo5FlB1sf4_3 bear PIzyVPr2kvQ_0 person PI_spS2t57M_1 horse PI_spS2t57M_0 horse PJK-c0HQksg_0 bear PJUvXC0Eumw_0 airplane PJsCV-lA78A_0 elephant PJ0Y1xQ7ZJo_0 horse PJ2kZmkL25Y_0 person PKGRn71TQGQ_6 airplane PKGRn71TQGQ_1 airplane PKtLlpi00cM_1 skateboard PK_UdRSa36U_0 motorcycle PMDSUC0_Ytg_0 bus PNxobv7rkRU_0 person POWngj1oBhQ_1 train POpePYwyHWY_0 bus POu1oPwNd4g_0 umbrella PPeaYnqzi9g_0 person PPjAhD3i-v4_0 bus PPqkkhaUIdE_3 bus PPqkkhaUIdE_0 bus PPqkkhaUIdE_1 bus PRaq5kZmO2A_0 bus PRyc4Vp0s00_0 bird PSyuR_D5C2c_0 cat PTLtv0VJ0_s_0 person PTM6VrBcP80_0 dog PTewrgfas9o_1 train PT6u63wHOhs_0 dog PT_tMCTzlSc_0 person PV_FZhj_0hI_0 car PWZIO2hdNRU_0 person PWiyz8b24es_0 airplane PXxs6Hzx7Pk_1 zebra PZ3X20r0oVc_1 bird PZ3X20r0oVc_0 bird PdPZkfHUOq0_0 person Pd9bh2hiWAk_0 person PeA8729U1jg_0 boat PeJxY7YFBTA_0 knife PgFIqGCjnc0_0 horse PgNvdw3Zges_0 umbrella PgjeF-iHzLk_0 person PgyVMv-RRL8_0 truck PiI1e3aKeos_0 person Pkju9RRBRAU_0 person Pn01hUEOICo_0 bicycle PoI-RFl6jqU_0 bird PoI-RFl6jqU_2 bird PpX6lJOP6ng_0 person Pq1kVNudVJo_0 boat PsPMm45bDZA_0 bird PskTcGACgjw_0 person PsrCCNATJd0_1 elephant Ps9peKxde4U_0 dog PvCZZzw4FKw_0 person PvQVqhtqTVk_1 person PvQVqhtqTVk_0 person Pv3IqqHid-w_0 airplane Pv3IqqHid-w_1 airplane Pw7zlPV9yh4_0 motorcycle PytUHdEhipQ_0 airplane P0FylASL6h4_0 person P06NLpHGLb8_0 truck P06NLpHGLb8_1 truck P1FTUN2gJkY_0 person P3JAtlf2-VA_0 cat P3MhJa_p-dU_1 truck P5MpdcJgQrI_0 skateboard P5NEco_Rqas_0 motorcycle P5NEco_Rqas_1 motorcycle P5v3n_5s-F8_0 horse P7i0pgLo9kg_1 car P8E7gprJa1s_1 skateboard P8_7-uFl2Go_0 bicycle P9dDbodBY8s_2 motorcycle P9dDbodBY8s_0 motorcycle P9dDbodBY8s_1 motorcycle P91LJh-_E0Y_0 cow P-FrYGR7Bf0_0 person P-phCIDPeWw_0 horse P-27cmR3CZE_0 knife P-_MzAIxz2E_1 knife QBAxag8dq6Q_0 cow QBfotDmdDkk_1 skateboard QBrAST1Q2iE_0 person QCCt8ooY4qg_0 person QCjqG8908mY_0 cow QEDWauqnaSk_0 skateboard QEGY7Dq2x9s_0 horse QE0MjXjSFjU_0 boat QFS35qERdLE_0 person QFeMKKxurVg_2 horse QFxep-yih-s_0 truck QFxep-yih-s_1 truck QGN2-Iqa4QQ_0 person QHPYpnJSf2s_0 cat QHhkx3CSiWk_0 person QJ1W4Pajbv0_0 person QLmFsJCZy_o_3 knife QMRFisCEGQc_0 person QM9Kddu2XcQ_0 train QObG-uf4v68_0 motorcycle QOjAwmQ_7vA_0 person QPtMbvxzFuE_2 bear QQC7AIIJg2Y_0 elephant QQLrVBS8VSo_0 person QQLrVBS8VSo_1 person QSTf92HwJS0_1 dog QSTf92HwJS0_0 dog QTjiYkMuDGI_0 knife QTqvJZS8ZNo_0 elephant QUIxOZH8N8c_0 person QUUgu5YvS1c_0 person QU7X6RkjKPE_1 boat QVUI5ZkkDsA_0 person QVnam2Ma6mY_0 person QY1rz6k86s0_1 person QZS3V-7xnAA_0 person QZWqiN4OA_A_0 person QZk1HSA90KA_0 knife QaUHYb5os4U_0 person QahBgQXhNfo_0 cat QbOvfWFyPzg_0 dog QbOvfWFyPzg_1 dog QbPvdKEmnrI_0 person Qb4RNeQYfPc_0 boat QcLa-GP2ITc_0 person QcLa-GP2ITc_1 person QdeUvHCiXwc_1 horse Qd0chk9vUQ0_0 bear QeISQLJERxg_0 person QfJeJLieLew_0 cow QfJk-eDxmKE_0 person Qfkb-gc72qg_0 cow QgPao5AkXFU_0 skateboard QgiX6-1aN-4_0 bus QhGx_MwYnWs_0 person QhIp71nr7Vk_0 dog Qk_VhG5lt1Q_0 cat QmRFPW81gZc_1 truck QmfJmQuF1-I_0 bus QmuLT1MpdP8_0 person Qm2yaeiexlI_2 motorcycle Qrd-Q3XrT3A_0 train QszBg-eN7F8_0 cat QtBYK8AxWCw_1 person QtpKcTyf4n4_0 knife Qtq2m-MV2q4_0 cow QvY9ysq30EI_3 elephant QvY9ysq30EI_5 elephant QvY9ysq30EI_0 elephant QvY9ysq30EI_2 elephant QwJNOYFZ3W8_1 elephant QwTIODgGfOM_0 person QxLFtmn_Igw_2 bear QyyPl-aCFUs_0 cat QzETtzOBUaY_0 person Q0HpPvC0bKA_0 person Q0M_Fog02Yw_1 horse Q0UrlXLNioY_1 umbrella Q0tQtb1npx4_0 car Q0x55aCCNxA_0 person Q31q8b3CSN8_1 skateboard Q4rAM1058Z4_0 horse Q4rAM1058Z4_1 horse Q5G2n-3zXX8_1 person Q5G2n-3zXX8_0 person Q5X1kisU8Qo_0 person Q6hwtMw2jkU_4 skateboard Q6hwtMw2jkU_3 skateboard Q7SViqj0bEg_0 dog Q83xNK10WK0_0 bear Q-lTGQgTOEg_0 person Q_rsZh5VqdY_0 person RANBJV7BN3k_0 person RAmxGTzr25A_0 person RBccU2wq7Qs_0 knife RBclSX-7rYQ_0 person RDiehz1pFVA_0 knife RD7nVPZTGEw_0 skateboard REBfrgEC_3U_0 knife REh7f-__WqU_0 cat RE40E9-qdHE_0 horse RFO8tA6rfbo_0 truck RFbhEQ4qN-A_0 person RIfxXKT-_88_1 skateboard RJZgo3_JEPs_0 person RJi5ZRGQb-A_0 person RJxPTuKUKjk_0 horse RKFpQfRSYIc_2 motorcycle RKFpQfRSYIc_3 motorcycle RKFpQfRSYIc_4 motorcycle RKFpQfRSYIc_6 motorcycle RKFpQfRSYIc_7 motorcycle RKFpQfRSYIc_8 motorcycle RKFpQfRSYIc_9 motorcycle RKFpQfRSYIc_10 motorcycle RKFpQfRSYIc_11 motorcycle RKFpQfRSYIc_0 motorcycle RKFpQfRSYIc_1 motorcycle RLcZcFP03fA_0 person RN6TzMbUlyg_0 airplane ROdg8e5a0Fk_1 cow RPwZjkygYo4_1 elephant RR-fksDmQTU_0 dog RSLwmLbf3No_0 horse RSO2IDZGDus_0 person RSQ7pHT5sU4_1 cow RSWyviTCTqk_0 cat RTAQO62dbRo_0 horse RTONY5PqRUo_0 skateboard RT0mh9U0YDc_0 person RUAbb66fW18_0 bicycle RUW8xYh84q4_0 dog RU0u42rf0Hw_2 truck RU0u42rf0Hw_3 truck RU_8ryQNxC0_1 bird RWJfJx1nXNQ_0 bicycle RWo2zaceWcc_0 bird RahqzUIhIkc_0 cow RawtpxzAbmM_0 person RdZGVs8pH40_2 skateboard RdZGVs8pH40_1 skateboard Rdge7lmfdc8_0 person RfVv6ECZ78Y_3 bear Rfa2If7RJTY_0 knife Rfa2If7RJTY_1 knife RfvNPPjs-bw_0 boat Ri3O4rz5S2o_0 boat RoMemRfbKkc_0 person RoNJ0fP0VUU_0 person RqAANAYxYz0_0 person RqqaUsDM-aI_0 person RrnixlsQyn8_0 person Rr6AsTlUNKQ_0 person RspILw0UAM8_0 person RsyjwcMkRrY_1 knife RsyjwcMkRrY_2 knife Rt1reRy5GVY_0 person RuFIanBmYzM_0 bicycle Ru9ksAvNYc0_2 cow RwVTAYsyWMo_0 person RxWOvD9i9Ig_0 car RxtS3kGOYoc_0 bicycle RxtS3kGOYoc_2 bicycle RxtS3kGOYoc_4 bicycle RxtS3kGOYoc_6 bicycle RxtS3kGOYoc_9 bicycle RxtS3kGOYoc_12 bicycle Rx9YjtdgOEI_0 person RyVdNK-PCyg_0 person RylJTxUTfF0_0 skateboard RzdsXt87bVE_0 dog R0biK134LTQ_0 person R0n9cqLQE4E_0 skateboard R3rDAaPE_s4_3 truck R45uCINxuVY_0 person R7IE_IohaIk_1 airplane R7IE_IohaIk_6 airplane R7IE_IohaIk_0 airplane R8Zg4uo1QpM_0 person R9d1vlii7cs_8 truck R9hRCG8pAHM_0 horse R9hRCG8pAHM_1 horse R_xLhXpHgp0_4 skateboard SAeiSpeFynU_1 bus SBmb0VU07rs_0 boat SCLi5OFtzQk_0 skateboard SCaWHsWzxqY_0 person SC18zgZ9Diw_0 bus SDCTiDVOdW0_0 bear SFA4mVjImxk_0 person SFoil_6CvbI_0 bird SGsRwH8YxQg_1 airplane SGsRwH8YxQg_11 airplane SHSsDGmwywY_0 cow SIZ3AYCr7PQ_0 person SIv3Hcq1ge8_0 elephant SIv3Hcq1ge8_1 elephant SJkZwyPxUTg_0 cow SJqduSR9h4g_0 elephant SJwgIeOkfTM_1 horse SKoDZimqLV0_4 bus SMF8aDGwELI_0 giraffe SNbBUZtngzM_0 person SNnofRkUk8w_2 boat SNqtno2pOzc_1 dog SNqtno2pOzc_2 dog SQn8ueHVBWc_4 elephant SQn8ueHVBWc_6 elephant SQn8ueHVBWc_1 elephant SQn8ueHVBWc_3 elephant SQ4tDbbdzr8_0 train SQ4tDbbdzr8_2 train SSjgAjilS8g_0 person SSwA_nC9rr0_0 person SThjw6JeBnQ_0 person STuEo8vap08_0 person SUHEgX-8bo0_0 person SUwLfCebumU_1 bear SUwLfCebumU_2 bear SVUAFI7bHqQ_0 person SWedQv5UnQo_0 person SXWo-zKZICs_0 person SYT4odK3Dwo_1 bird Sc_CAareVEI_1 elephant Sc_CAareVEI_6 elephant Sc_CAareVEI_7 elephant Sc_CAareVEI_2 elephant Sc_CAareVEI_3 elephant SdzIWTR-rkc_0 person SeBOeRzwqrQ_0 skateboard SeU_71ydaeA_0 elephant SehCD9wP-Pk_0 person Sf9OdV3i3I4_0 person SgglaVke5lo_3 boat SgySshdgJrQ_0 motorcycle Shves64RCp4_0 cat SiotcXGUwAs_0 person Sj56u4dFe4k_2 person SlR9qCk_m9k_0 motorcycle SlR9qCk_m9k_1 motorcycle SlZZmtOGyeE_0 airplane SlZZmtOGyeE_1 airplane SndDcPzB8Hc_0 cat Sn2SGmheI-Q_0 person Sn9gOBw9bf4_0 person SoiA6jtejG4_0 dog SpbyBYH0OjI_0 person Sph2g6B-X2M_0 cat SpjssmEyc_o_0 airplane SqHtdCP5Oao_1 horse SqHtdCP5Oao_2 horse SqLiHZHzp9w_0 person SqoR7vKYzCY_0 horse Sq-Xok-ea7U_0 person SreiPFJ6vBw_1 boat SsMS0eIy2Ws_0 person Sse7vXMMO6E_0 person Suaush4Da4s_0 person SvPL8gOREaU_0 knife SwaILKCtBVA_0 truck Sw4B_VFic3M_0 skateboard Sw7L3wImbSA_0 person SyldRIQbAGU_0 person SzVyFmQ28Xo_0 car SzkobSwGTMk_1 bird Sz2bTIe9kTo_0 airplane nrEv-Plh45s_0 bear nt_BXwq_xhA_0 giraffe nuCdww9iIOs_0 horse nuMeNIi1MPY_0 person nuMeNIi1MPY_1 person nui8beXjUlU_0 elephant nui8beXjUlU_1 elephant nvMXQKwroRY_0 person nvaO13WFhos_0 person nxBkP48NgKY_0 motorcycle nxclZ6iCf7o_0 cow nyogtZp3kIk_1 airplane nzf12QyuD4E_0 truck n0tx4V2rF3I_1 giraffe n09NxJcTEYQ_0 person n12ITkwyzvM_0 cow n15n46culQU_0 person n19nqH4078Y_0 bear n2F8uNrgh1U_1 elephant n2daSQR_dTI_0 motorcycle n3Eb6Cf77Vg_0 airplane n3aHtfCo_aw_0 person n3fhSGUvtH8_2 knife n5alwWwFPb0_0 motorcycle n5osSY0_BSo_0 person n5-RrJI-Lxw_0 person n6I0k52pV18_0 bear n8xNf-PRHnc_0 truck n9AUV2KuhLo_0 cow n9zSAZMj2Mk_0 knife n-I-WnLfnqE_0 horse n-QBM6yD7RI_0 bird n-eDiuWYJUc_0 person n-1FhryZboM_0 person n_Cv1LzGol0_0 person oBixVhXVcmY_0 person oBjIRWu_BWA_0 truck oCCV0-mP2R4_0 bus oDlSzIkDJGM_1 car oDnobYn8maE_0 person oDrYXyIN9xs_2 dog oEcyeE0kNFc_0 horse oElAgrukyOk_0 person oE0bjG0z-nk_0 person oGDp2b_LvDA_0 bicycle oHu9fCIhAjs_0 person oIQuiXJzEUI_0 person oIYCDBqfT6I_1 elephant oIZHf-r5C3w_2 bird oI3ETWYxCi8_2 person oI3ETWYxCi8_1 person oJAivZwYxDE_0 person oLTHGMleOxk_0 car oLTHGMleOxk_1 car oMZczwLgR1Q_0 boat oMZczwLgR1Q_3 boat oMZczwLgR1Q_1 boat oMZczwLgR1Q_2 boat oNMf32fzYvo_0 person oOi9E4se4ww_0 person oOp7fTxc8qY_0 person oOp7fTxc8qY_1 person oQcVQukPVdA_0 horse oRacxmfNaSM_0 cat oSwwku39aC0_0 skateboard oXHr2yBfL3Y_0 cat oXfOERZ2kMs_0 cow oXlK1t1qisA_0 person oYw8UE0VSFk_10 elephant oYw8UE0VSFk_1 elephant oYw8UE0VSFk_5 elephant oYw8UE0VSFk_8 elephant oY5CyHk-QEo_0 person oaHCd7KI_Fc_0 airplane oaK_EfFOb7o_2 skateboard oaK_EfFOb7o_0 skateboard oa5NT5mX--c_0 person oa838tg7QCk_2 elephant oa838tg7QCk_3 elephant ocJUmpBIBOo_0 person oc7XeYj7dOE_0 skateboard odjK5W70JaE_0 person oeYHzAMgoQ4_0 skateboard ofynEJHRTz4_1 person of1ISNDelz4_0 cat ogJGxnVqTWY_0 cow ogNqc-uHzQ4_0 umbrella ohkrDDXUwjY_0 person ohrYGLaImow_0 cow ohxeFH800SE_0 skateboard oiftoNj28hs_0 elephant oiwU7UpO9S4_0 person oi4GfdQBxyc_0 person ojiIyU5ibT0_0 person okPcGR4BRQM_0 person omsmPSC4u3A_0 airplane onH8ELLteHg_0 motorcycle oo3eTJKpErU_1 elephant oo3eTJKpErU_2 elephant opWm4bW5B9k_2 truck opYiNVXmySg_0 skateboard opkxXg1s8ZQ_0 horse opkxXg1s8ZQ_2 horse opkxXg1s8ZQ_3 horse opkxXg1s8ZQ_4 horse osYXdQYkiPQ_0 person otKNUa-KgUg_0 car otKNUa-KgUg_1 car otOxAXKskbI_0 boat otU4Zd1n65g_0 bear otqOLpbz4LQ_0 airplane ouK26Crplso_1 car ouSUKHZs1Dc_0 person ousG5WHZq8I_0 elephant ouwAzKpUG7k_0 train ovQiwCBG8Eg_4 elephant ovZ4In0kLUg_7 bear ovZ4In0kLUg_2 bear ovZ4In0kLUg_6 bear owW-da7Tdls_0 person owtKQFT_gNk_0 person ox0mlEooWI0_0 skateboard oyuMudJ9EM8_0 person ozRJI9h3tks_0 horse ozRJI9h3tks_1 horse ozvxKPrfdo8_0 dog oz11xvTIbvM_0 person o0QRA7gPhBI_0 giraffe o02m7tfad28_0 person o02m7tfad28_1 person o09Ks_UmmkY_1 train o3eHOnTMxnU_0 airplane o4PVsZPaxOM_0 train o4PVsZPaxOM_1 train o4VOx1SeRKY_0 bicycle o4VOx1SeRKY_2 bicycle o4VOx1SeRKY_4 bicycle o4VOx1SeRKY_5 bicycle o4VOx1SeRKY_1 bicycle o4VOx1SeRKY_3 bicycle o7wb_t8x0D8_0 person o8KS5SYj0GE_6 bird o8YfQD0GA00_0 person o9gD7-MVkJ4_1 bus o-IwJTgdr_A_4 bird o_sONKO9OMk_0 person o_7RumsdAcE_0 motorcycle pAVwx70oxIc_0 person pAthLZfnXaM_0 person pAthLZfnXaM_1 person pBWgDW8f6II_0 person pB5-haagdS8_2 bird pEUCkpfCcaw_0 boat pEtOW-iQZCA_0 person pE-OFVB2lzo_0 train pHsAHiqdb-c_0 bird pIHbW9IMV2E_1 airplane pIHbW9IMV2E_0 airplane pINK56mkS-E_0 cat pJBMnX2HBFo_0 train pJ6wkaE8-iY_3 elephant pKFd8IXz4K4_0 boat pKnRcv--qEI_0 cat pLvGIJc0ETk_1 cat pMKMeBQzCC8_0 dog pMKMeBQzCC8_1 dog pMgX9KscZSg_1 train pNG0qeNr-Vo_0 person pNWXXO380uQ_4 dog pNWXXO380uQ_10 dog pNWXXO380uQ_1 dog pNWXXO380uQ_2 dog pNWXXO380uQ_6 dog pP84ZurhiFY_0 umbrella pQAJTPvkPj4_0 bird pRArAdUzaKg_0 person pRVlgxVhtuA_0 cat pRy6kU2p41E_0 cat pS5AzmSvRPY_0 horse pU9s744_T6o_0 truck pVR9b-qG1Ig_0 giraffe pVR9b-qG1Ig_6 giraffe pVR9b-qG1Ig_7 giraffe pVR9b-qG1Ig_1 giraffe pXLbIBluyAQ_0 bus pXfO7xO-99w_0 cat pYXDml6lcAY_0 motorcycle pZCCPMu42GA_0 person pbFuk0oX6a8_0 bicycle pbFuk0oX6a8_1 bicycle pbFuk0oX6a8_2 bicycle pb3p83fw9bg_0 person pcUV4ja1VRc_0 truck pceUU6aj_ao_0 cat pdyhFh6-rCo_0 bear peBxgn7gXlw_1 motorcycle peHZd4qdOMI_3 boat pe00hbvqjDI_0 person pe_73GR1-NI_1 airplane pfED6WafVwQ_0 bear pfpKoO-GjGI_3 truck pfpKoO-GjGI_1 truck phXjZ1yxWD0_0 bus phec6_yC2HY_0 person phjJhuKxT5Y_0 train piGT-hRYHHQ_0 horse piN1RiueJhY_0 horse pjLei6UAHsE_0 airplane pjLei6UAHsE_1 airplane pjZqJuEX1ow_0 airplane S2FTgueR-80_0 person S2FTgueR-80_1 person S3U383sqlRs_0 bicycle S4UDIyyqmlY_2 motorcycle S6h6E0IKO6Y_0 dog S73sRU7b2dk_0 person S9QmlxGGxGM_4 knife S9goDsKFXAg_0 person S-qgaqzenIE_0 person TBpnes8Z-3s_0 person TCtRzPGrwls_0 horse TCycfRWpg0s_0 elephant TDKDtLliMhg_0 person TDlLgW8Fjes_0 person TFcak4kNd2c_0 person TGFSBSitWNw_0 cow TISjnLr1r-k_4 giraffe TISjnLr1r-k_5 giraffe TISjnLr1r-k_3 giraffe TJsLSuQcb7E_0 horse TKadOIk-uPI_5 truck TK61mJMHqTE_0 train TK61mJMHqTE_1 train TLxcXucOpWw_0 skateboard TMaLrtjFU34_3 cow TNNXwm3Bt5I_0 bicycle TOLyNcTSGPA_0 person TPglVxQN85I_0 dog TRH4PZkAkiE_0 person TSl3wSreplo_2 bird TSl3wSreplo_0 bird TVuX76wWzwY_0 person TW9LBSqxNWo_0 bicycle TW9LBSqxNWo_2 bicycle TW9LBSqxNWo_6 bicycle TXD-idarfhU_0 person TYsJu2G5WVY_2 knife TZdDUMDyozA_0 dog TZfFEYUY5_0_0 boat TZsigdW7Qfs_0 airplane TaL6ssJD8z4_0 airplane TalhQQ9B7vc_0 zebra Ta-JBO0InZk_0 horse Ta-JBO0InZk_1 horse Ta-JBO0InZk_2 horse Tbm_BFLOPic_0 train TcRl6wotFw4_0 horse TcR9fR_SWLg_0 bicycle TeiC-tObc4o_0 bicycle TgRRY3Mn0Ro_0 person Ti411VXWtAc_0 dog TjCiDUNoDi0_0 skateboard TkktEeCiSAo_4 knife TkktEeCiSAo_5 knife TlXSJmmN3dc_0 motorcycle TnB8G7eZm24_0 person TnY1qP0YQQ8_0 person Tnc7CCuk78Y_0 person Tn4trDBJAqE_0 person To8VzjtX70s_1 person To-lnvpzIKY_0 person TqKcS4Cx7wc_0 bird TqvuyyM_x4E_0 bird TqvuyyM_x4E_1 bird TsM45PkaTj0_1 bird Ts4iqmKVRy4_0 knife TtI1W2xFQ5k_0 person TtI1W2xFQ5k_1 person TtnuIzV01ek_2 train TtyfhN-jWcc_0 person TuEArk4EFWg_0 person TuEwZSEUe5A_0 person TuOnAlE6TRs_0 airplane TubHgt_FxYo_0 person TufSi0uSU8M_0 person TvUmQi32j08_0 person TvUmQi32j08_1 person TvuhORVyaL4_0 person TvuhORVyaL4_1 person TwH6hv5zVIU_0 airplane TwSnlq5Kma0_0 skateboard TxV4qpdgJ3Y_0 airplane TxV4qpdgJ3Y_1 airplane TyIzjLHGvjo_0 person TzUMxAOWWcc_0 bicycle TzVawH7veiM_0 bicycle T0WCoXgklkw_0 person T0r5yfzMs4g_1 bicycle T24d3EHv2GE_0 bird T406qi8vIlk_5 airplane T406qi8vIlk_2 airplane T6XxSbeAl6Q_0 motorcycle T8e9Qi4dcNY_1 bear T95G52MuPFU_0 horse T-PL14w9TV4_0 cat T-cOBQACeAw_1 bird T_2A3L49ah4_0 dog T_2A3L49ah4_2 dog T_2A3L49ah4_3 dog T_2A3L49ah4_5 dog UANkhHNWM-M_0 person UAnl6TGZhxs_0 cow UA5VCImEZ2Y_0 dog UBdNIuCPaZ4_0 car UBdNIuCPaZ4_2 car UBsG3-ocU64_1 boat UE40h6VhUaU_1 bicycle UF8l_MU2rj8_0 person UGCPxfU7FKM_0 person UG5FFY29OV0_0 cat UHO129a_p0U_0 airplane UHYwdGF9W-0_1 horse UHYwdGF9W-0_0 horse UIvJPTYu6Hc_0 train UI4IvmmFIPQ_0 person UKExOybWiRM_0 motorcycle UKExOybWiRM_1 motorcycle UKkr05PKrb0_0 bicycle UKlB9mDIXss_0 person ULdZGJs5ta8_0 motorcycle UMsR07JXCYs_0 cow UM446G0Lud4_0 knife UOUaveJ_TWA_0 person UO_zNFtEt3Q_0 person UPkEE2dnlkU_0 elephant UPkEE2dnlkU_1 elephant UQAJPD_gH7g_0 cat UQDXdgIlpDg_0 knife UQibn_ZNp9Y_0 skateboard UQibn_ZNp9Y_1 skateboard USAjeRaDlJ0_0 person UTqlz0i9KIo_0 person UVTPHohbCV0_0 person UX4dpwv6qWE_0 dog UYAtAlnvVy4_0 skateboard UYc0lVVxayQ_0 dog UcCtmXy5F4g_0 dog UcbWaG8GwRs_3 airplane UcbWaG8GwRs_2 airplane UceYFW8-zZM_0 train Ucse975FqUA_0 elephant Uc5PAhXhIzk_0 umbrella UgsSu7wC28w_0 bird Uhj0HRMHPXY_0 person Uhsh3JUb_aI_0 bicycle UisVwousE8g_0 cat Ui8yPflhqHs_0 person UjMTd3LCxyQ_0 person UjMTd3LCxyQ_1 person UlFA0xDQcS4_0 skateboard UlhZSONgFCI_1 cow UlhZSONgFCI_2 cow Umvp1XgX6Qc_0 person Um-FzEOyncc_0 person UnUlhJaHWlA_0 bear UnyyMjT0BCc_0 horse UsCJdEa7tq4_0 dog UsCJdEa7tq4_1 dog Usrv7_ONvi0_0 horse Us6dL_WD7xg_0 truck UtyaA_QRIrQ_0 truck Uu9k1VohpvA_0 horse UvptsJcl_ms_0 person UwtHiozuyRs_0 person UxPh-hnwal4_0 truck U2LvNquzuZ0_0 bicycle U2LvNquzuZ0_2 bicycle U4LhReaGH70_0 person U64eMon0R9w_1 person U74o2HGsFeI_0 dog U853uMV0qAY_0 person U86p5VtUC6c_0 knife U9YbGyTBb5k_0 person U99ENpOmVGI_0 airplane pmrTy1xQ5kI_0 person prJIAYsv8bQ_0 truck pramqy_Y1gA_0 boat prlcpxzCoyc_0 bus ps-nNC6Equg_0 cat ptF2Hqj7DGk_1 motorcycle ptF2Hqj7DGk_0 motorcycle ptPi712LDq0_3 bear ptU4EDudgg8_1 bus pt6v3JZFi4c_0 bird puifEp7W50E_0 motorcycle puifEp7W50E_1 motorcycle pu0G99aVryc_2 car pu0G99aVryc_0 car pwFqv42foTM_0 person pye4y8sPr9I_0 person py0U90-ZTkI_0 cat py2dhJjpOaI_0 bear p19EU6tw9oM_0 person p2DntTqvGT4_3 car p2DntTqvGT4_1 car p2QsmFuYxdI_0 train p2TTKNDiGv0_1 bicycle p4pf9W4qt8s_0 person p40Oqh_akS4_3 bird p43GludvR_g_0 bicycle p5F9hHDkbKc_0 train p7UAl7_bv4s_0 bus p8KQvF1DyLg_0 person p8YhfWsz1JY_1 person p8YhfWsz1JY_0 person p8gE3VpTAR4_0 person p84Z-poVaAw_0 motorcycle p9ixpjYEEag_0 motorcycle p-J_LbVq7CU_0 person p-SJ_Ym5pTA_0 cow p-XasPaki0k_0 cow p-cJamorAiY_0 person p-2rgSte1DI_1 bus p-2rgSte1DI_2 bus p-6u3d8YV70_0 person p_YVPahadQ4_0 elephant p_YVPahadQ4_1 elephant qDP6_m4bDRA_0 horse qD8NS4r2Gd8_1 train qEjyhyeCIR8_0 cow qEjyhyeCIR8_3 cow qEjyhyeCIR8_1 cow qEjyhyeCIR8_2 cow qGiLjP8-EVQ_0 person qHYuGyp8_HU_0 bear qHZsnSLmqEY_0 person qIJo1R3rHmQ_0 person qJI7mnjOp0A_1 umbrella qJOaXM8s-Yo_0 knife qJOaXM8s-Yo_1 knife qJugj62heF8_0 airplane qKqEqxMZHVg_0 person qM566R4U4Ug_0 bird qQbEwbtvdRg_0 person qSR2E4eqjqI_0 skateboard qSiMwC5e5_I_0 person qUGXSXCXUbw_1 person qVCH1ozivyk_0 person qV9Ll-N_rpc_0 dog qWpIdTdBIQU_0 boat qWpIdTdBIQU_2 boat qWpIdTdBIQU_3 boat qXaS7daelL4_0 person qXfnmaLtO-M_0 airplane qXwXdnrUo5w_0 train qXx4Vj-HwkU_2 bus qYf_XBAUa_o_2 elephant qZFwurCX4DM_0 train qZH-IY7bBzg_0 person qZQcY5PTh10_0 cat qZVUho1xBlo_1 truck qZVUho1xBlo_2 truck qZVUho1xBlo_0 truck qbYjOWN6n70_0 horse qceiUxIt1VE_0 car qcjVVDAbHUI_0 person qcmbCgcy3co_0 person qdNXPwWD9_Q_1 person qdzu1EFDYUE_0 cow qel4U0nmQOI_1 person qfp7BvAtQa8_0 person qgKnno5T6f0_0 motorcycle qguyMwcAj4M_0 person qhb1bts1fSM_0 bear qheo-lRVpfk_4 knife qheo-lRVpfk_0 knife qheo-lRVpfk_1 knife qheo-lRVpfk_2 knife qheo-lRVpfk_3 knife qhmscyJC8dM_0 elephant qh8xnvGfllE_1 bird qh8xnvGfllE_2 bird qipZi2kaQyA_3 person qi3hoxEao_g_1 person qi3hoxEao_g_2 person qptB3_MZagA_1 horse qp5tJGAi9h0_0 airplane qqL9gnwx87g_0 cow qqL9gnwx87g_1 cow qq4_m1S3AOI_0 person qt6FFVa8DGM_0 person quoX4193twY_0 dog qvMRVm660LM_0 person qvZGFb3CbxA_0 bird qvcNxorHqCc_0 person qx647iZCsoE_5 umbrella qyQFBM_7mBw_0 bird qywYqT8IzaQ_0 skateboard qz4S2Tn1Jkk_0 person q2qEXqY43ws_0 cow q2v3AmGBH-M_4 train q2v3AmGBH-M_1 train q2v3AmGBH-M_5 train q2v3AmGBH-M_6 train q3TB2Rnymkg_1 truck q3pYgC4-lrs_0 elephant q35X7FnaiGw_2 bear q5BC4AVKV4c_0 person q6nXZqEmQGQ_0 person q9MXoyUF-BU_0 person q9d2hPrip6k_0 dog q_dqx0-AtKk_0 person rA595TIyUgY_0 bird rBko9NgVOX4_0 person rB2323YW1iA_0 cow rDQ2hcIWoBY_1 train rEXtAqxJj8c_0 person rGVf1BsLfng_0 cow rHvp_Dghuho_0 person rH33U6qgd9M_1 umbrella rIqhuv94Zuc_0 person rKN5E25jozk_1 person rKN5E25jozk_0 person rLbBCTSGdzc_0 person rOoxhMEKcgc_0 bear rPEIT9eAAMY_2 bicycle rPEIT9eAAMY_3 bicycle rPUzTjaLdkk_0 cat rPuPm0ctC3s_11 train rQHtu5_Piv4_1 cat rQKV6GBQuag_0 airplane rRH0VLQDJZQ_0 person rSF1UQ01lZc_0 person rSSbdX8817Q_3 dog rSu82skaMJQ_2 skateboard rSu82skaMJQ_5 skateboard rTIN784f0CM_0 train rTIN784f0CM_1 train rTIN784f0CM_3 train rTV3ev-xyuk_0 train rTYmEM2Lhew_0 bus rT4P9ZJeBG8_0 train rT4P9ZJeBG8_1 train rT4crgFLycE_5 bicycle rUJ7zeax1zY_0 person rV1Baq6-C6Q_0 elephant rWyf2iqpfng_0 horse rXf2T3VO-kI_1 cow rYkLuW5NLic_0 train rZi9k9F8S1w_1 person rZi9k9F8S1w_0 person rbIYpEELMQc_3 horse rbIYpEELMQc_2 horse rbMVAO2mJiY_0 person rbn7_DeuItc_0 elephant rcF4-O7o_Qk_0 person rcF4-O7o_Qk_1 person rc96rbja6VI_5 skateboard rc-e_NDrZDM_0 person rdBSfuG2KBA_2 boat rdBSfuG2KBA_0 boat rdQvGZDUDJA_1 person rdhiEKvYF0w_0 car rdnDsUHCZSY_1 cat rePM3_x9tqw_7 person rePM3_x9tqw_4 person rePM3_x9tqw_5 person rfL51BZGldc_6 truck VCkpd_d1z4U_0 airplane VE-3PfVw5-Y_1 airplane VG2QbeXEwec_0 elephant VIQGgTWrg00_0 person VIr_rdbfvQQ_0 horse VJVWk9wyMjI_0 cow VJmgPBopcB4_0 horse VJ0by87MRoI_4 bicycle VJ0by87MRoI_7 bicycle VLSeTnShp54_0 motorcycle VLSeTnShp54_1 motorcycle VLSol2tA9WY_0 elephant VLcSoFR7qBw_0 car VMDBBz7G-Pg_0 motorcycle VMmtrv5OtMQ_0 boat VMxS4op_OBg_0 person VNCLtdahLmI_0 bear VNCLtdahLmI_3 bear VNHGw5Sj0Qc_0 person VN8_N7Ceofk_0 cow VP0WD1miM00_0 horse VP20LIiI9S4_3 horse VP20LIiI9S4_7 horse VP20LIiI9S4_1 horse VP20LIiI9S4_2 horse VP20LIiI9S4_5 horse VQWxUc9QOjU_4 bear VRtl4gAWELM_0 skateboard VRt9s3OQPzo_0 person VSLdNogDia0_0 bird VSrmwgo-veI_1 boat VTqoizpYNeI_0 car VTqoizpYNeI_1 car VTqoizpYNeI_2 car VTqoizpYNeI_3 car VT11p8szxZY_0 cow VUVAbtGJbuE_0 person VUh5jCDWj08_0 cat VUl6vkX7PRU_0 airplane VVn3XeSqijk_2 motorcycle VWTes_MfrOc_0 knife VXNEqQb5C4Y_0 motorcycle VXT0TH9jfZo_0 elephant VXZscyYzxqw_1 person VYYS45KWEgo_1 dog VYr49ml0uaE_0 person VZj4RHsnOWU_0 person VZqdzb_qI2g_0 person Va81siK4zeI_0 umbrella VdLqI43E7eY_0 cow Vd5pCJuOoDM_0 car VfBrelUfLFg_0 cow Vgpm6fwLIns_0 motorcycle Vhc7DKkRHOo_0 dog ViQIgBdCkh8_0 car VlBlBgxUa-U_0 horse Vlq4fYmrr6g_0 car VmVN4E_qtfM_0 person Vm9-f0pXycc_2 bicycle VngapMBo560_0 cow Vou-Sfzlpu8_2 train VqdeO4pa_rc_0 elephant Vqj-Qv5bVyE_0 person Vr1Wqz5_UA0_1 cow Vr1Wqz5_UA0_2 cow Vr1Wqz5_UA0_0 cow VsAo8VBzDTM_0 person VsOw_U6hYRY_0 motorcycle VsOw_U6hYRY_1 motorcycle Vsyd7-_CUA0_0 person Vs2JphYinjk_0 giraffe VtdrYDJFw-Y_0 person VtkV11WZWEc_0 cow VuDA6sPAa9U_0 person VuLf3ZTqniM_0 dog VuW2wDK-uZI_0 motorcycle Vv-z9_l8_ms_0 bird VwdZHZPjlT0_0 cat Vwkf0U9PZvI_0 airplane VwppYMiCI1g_0 umbrella VwvER7iR2YI_0 person VxG5gvk1mfo_2 elephant VxH52JoUd0I_0 person Vxyq13mC_uk_0 person Vxyq13mC_uk_1 person Vyf_VJEQ1jE_0 airplane V0CjVa5_1P0_0 horse V0sliERbCxI_0 person V0sliERbCxI_1 motorcycle V0w_hBBqe-g_0 person V1ufPW4ictQ_0 skateboard V25H8smvzbM_0 dog V56RVnEPG54_0 motorcycle V6rg5et7Q14_0 cat V6rg5et7Q14_1 cat V6_XA2w3sTs_0 boat V7CVQjk9-Xc_0 skateboard V8Pv-I4ovPs_0 person V9m1dMbXxug_0 truck V9qvycn1a3E_0 train V-ZKLxW5cuM_5 horse V-ZKLxW5cuM_2 horse V-ZKLxW5cuM_4 horse V-iFCgvAuCg_0 person WBcYTIQ65Ow_0 person WB6uQ708AxE_0 bird WCNpGdfG8nk_0 person WCZ4ZQ5ohf4_0 motorcycle WGw94BtHxYE_0 bird WGw94BtHxYE_1 bird WG1DuTb70bQ_0 cat WItuBm7azO0_0 cat WKpjUNNgKG0_1 person WLZkZ-4Y9fY_0 cow WN5u1Y1yGkA_0 airplane WP5JXCVRe9g_0 person WP5JXCVRe9g_1 person WQ603pEp_1k_5 airplane WTEO_Ywn9AI_0 umbrella WTw46mBWjOw_1 airplane WUvTKLEimNw_2 truck WWcVr4lbq3E_0 person WXETP4eMyD0_0 cow WZWh1M3qGAc_0 truck WbXmf511q4E_0 horse Wb9i7jssQsY_0 motorcycle WcUFxXISmb0_1 motorcycle WcUFxXISmb0_2 motorcycle WcgQXl6I-Ks_0 car Wc6RwJ_8yts_0 person Wc_-Q9ba0zs_0 airplane Wdh2SMcRQ2M_0 horse Wdh2SMcRQ2M_1 horse WfZR-VRmSB0_3 boat Wfl0LOShC_I_0 bus Wh9avYClECA_0 person WixZlWbnBdM_0 person WkvpcaxQTSg_0 dog WlFD1z5akJc_0 person WlK6sU21od0_1 dog WlP5_pcua1U_1 truck Wl1vbjfAxeA_0 dog Wl1vbjfAxeA_1 dog WmNKtcf5iLM_0 person WpxEmYBfqSU_0 elephant Wqb84sv1P68_0 cat WrClMyPxaDk_0 person WrSS3nc07hE_0 cat WsFZj4Bgtwc_0 bicycle WvGCvwHutAc_1 airplane WvUiJ8ZRRfc_0 bird WvUziN47FfY_4 horse Wwx2Vce-1oM_0 car Wx0zNFqSUZo_0 horse Wx1qid26zsw_0 dog WzCI6AqY7cg_0 bus WzrI82-Ak4I_1 motorcycle W1juH0nZ8v0_0 airplane W1yEDHYLG1Y_0 truck W14Nt0_EGQg_0 person W17CFtB5Oy4_0 truck W1-9iBLd1lg_0 person W23FACVBLgI_0 person W3Bv11o03TQ_0 cat W4cKlmHvXZ4_0 knife W4gR7_z77A0_0 person W4iSCn6ILJs_0 motorcycle W7xlWK7cuEI_1 skateboard W8U3FkkaVbc_0 person W8d2hNOMHpQ_1 horse W8yL4Qnuo4k_0 elephant W86rN6nrllQ_0 person W9lLrNUFQ9M_0 person W975mcNRX7c_0 boat W-sCMBY47ck_0 horse W_QxijO2VBw_0 zebra rftE7M9tNqI_0 person rftE7M9tNqI_1 person rhWLgPl3lt8_0 person rhjcRHB4crY_1 bicycle riNqBOlFCuw_3 dog riVZCbT4LDE_2 person rih7ECmHfRs_1 cat rkIzABhjHkA_0 person rk1ByqQSwtI_1 elephant rlWlgyP-3-s_1 umbrella rlWlgyP-3-s_2 umbrella rlWlgyP-3-s_4 umbrella rlqtE0bF9nk_0 bicycle rmVxFro55IQ_0 skateboard rmxx9X1ytcA_0 airplane rm4XeENehOU_0 skateboard rn9-fIMYEkA_2 motorcycle rn9-fIMYEkA_0 motorcycle roUwF9YU21U_0 person rsne3z-CaDw_1 train rtjlk_iOmdE_2 train rtjlk_iOmdE_0 train rt4Qm6HPVTY_1 boat rvBm-SnbjVI_0 cow rwQl_jKPcyM_0 person rww5DvtCsG4_0 horse rwzjQSTLmhk_0 person ryUMZWWwJUk_0 person r0P-2rp1Hpk_1 bus r0vIwhp5RLo_0 knife r03Za0dP0d8_0 person r09YKBrwa8M_0 horse r3PUq_cy6Mc_0 truck r3cOrAN6BI8_0 train r3cOrAN6BI8_1 train r7WW1Fl-s6s_5 bus r7WW1Fl-s6s_4 bus r7WW1Fl-s6s_6 bus r7WW1Fl-s6s_7 bus r7WW1Fl-s6s_1 bus r7xw4qHLKIY_2 horse r7xw4qHLKIY_1 horse r7yOsosLuHI_0 cow r8NwODfEuhI_0 dog r8NwODfEuhI_2 dog r9LAMeOEcsI_0 person r9jyOtbfWs8_0 person r9osF8drSbo_0 person r-Dva6GT-a0_1 dog r-tFy30HVCw_0 person r-0UD9KQhvY_0 car r_sRdP_5WaM_0 skateboard sByCUshWhWs_0 dog sB613NHl89g_0 elephant sB8zpg-GrRo_0 person sD_9McrL3UQ_0 skateboard sD_9McrL3UQ_1 skateboard sEzZ3JnSzaM_0 bird sFxTS449nUg_0 person sG0q9rphsoY_0 cat sIIFHk89TT0_0 person sI17jkxX6tE_3 skateboard sJyknuUaIOg_0 skateboard sKCW1p03okE_0 person sKD6TBNqy6s_0 person sKD6TBNqy6s_1 person sKJ0JtWZeWw_1 cow sKJ0JtWZeWw_3 cow sLZh8XaxoYw_0 person sLfyo1VrX3g_3 knife sLfyo1VrX3g_2 knife sLnYAS4LAY8_1 person sLnYAS4LAY8_2 person sMVMaH9aWHw_0 horse sNV29dtSqYs_1 umbrella sOfNz788QiQ_2 horse sP4jeoUjHZM_1 motorcycle sRb7OHsI6s4_0 bird sV9L8gpGDmA_0 motorcycle sWbk2Sw9Rew_0 person sWfMpwviOCA_0 car sXSjs2EV61Y_2 knife sXw73oA1Tq0_0 horse sX5GCwZG8d8_1 bus sbkHA-DWPSI_0 person scyRfbyCzJU_0 cat sc15m4_lcvw_0 person sdAAObJErSA_0 motorcycle sezamC2zGqg_0 bird sf76JIFYKB0_1 cat sgHdQYSWPXg_0 car sgU4wTZ6k5s_1 person shXeONsfVmU_0 person shiIdcOonRs_0 person siFucH6jjIs_0 boat siFucH6jjIs_1 boat sj7NOYq8KBA_0 person skEWWsL6k9g_0 horse skl1lsZUG4k_0 person sm346w9J4zA_0 knife snZjH03fjVk_1 person soNDR07vxhQ_1 person soNDR07vxhQ_0 person sofKbpbuX84_0 person sofKbpbuX84_1 person spVw0PNXErs_0 dog sqLiQtbkEO4_0 cow sqv-uPhtxwk_0 airplane sq-wqsIw5hw_0 train ssspgc75B08_0 giraffe steKGH-8MZw_0 horse steKGH-8MZw_2 horse sts2vAv4BQo_0 person suERIXWx_z0_1 person svCBYM2zl80_0 horse swuFjNkTmQY_0 dog syZTh043BkQ_0 horse s0YqBVjRDyU_0 person s1Pd7evRn0U_2 dog s2PyqAoOqrY_0 cow s2x8llFphNY_0 elephant s3WiR_wFUBE_0 cat s3ijyNmvxpE_0 person s4rr5OrSI4k_0 skateboard s5I219neN7c_0 person s5jmkD6lkbU_0 dog s5n7L55KpWE_1 skateboard s7or9ZhEyXE_0 person s74eu-v6aqA_0 person s8W4NK7dWe0_0 person s83wzR7ySyM_0 skateboard s9G4llLAJiU_0 skateboard s9OmvmQH9hA_0 elephant s94ng_sG6Dg_0 boat s-Jnbfjkmak_0 skateboard s-Jnbfjkmak_1 skateboard s-guJTrtfSU_0 skateboard s-yjgHx_YWg_0 train tAGvlfgdOsI_0 skateboard tAGvlfgdOsI_2 skateboard tBlPdyu-syw_0 bird tBlPdyu-syw_2 bird tBryhvKADFQ_0 dog tGyP_SbWsVA_0 person tHA_VdGe90Y_0 airplane tHA_VdGe90Y_1 airplane tHcqw8Cejs8_0 person tHfOMcj62SY_0 zebra tI2i9_rBdwo_1 bird tI2i9_rBdwo_3 bird tKpbcnqu6bY_0 bird tK0pl2_wbWU_2 elephant tLJpuELQgxY_0 person tLa4F5ekKW0_0 cat tLzUBeOwhyM_1 bicycle tMojfxB-9zA_0 person tMp5Y1zucfI_1 train tMp5Y1zucfI_0 train tM3FYC5IVPo_0 motorcycle tNiu2o7-KPY_1 car tOK5TnF8eHQ_2 bird tOL0kPV03Uw_0 train tOlXErF8Z4o_0 horse tPCRXfE_aGo_0 bus tQj85vHtmeE_0 bus tQnUccPTkck_1 truck tQ_Vy-9pvoQ_0 skateboard tSlXTInFXss_0 person tTSVU8IU10c_0 motorcycle tUdWqmNDeY8_0 person tUm_oehvEpM_1 person tVOS6wht6oQ_1 horse tV17SBx-oqE_0 person tXBDRj1c-Uc_0 person tXf9xVs5ZGk_0 train tYKrjpIMYb0_1 skateboard tYciFvRQuec_1 truck tYciFvRQuec_0 truck tY-4fAv_YRU_0 horse tY-4fAv_YRU_1 horse XA65Kh83GmE_0 cow XA65Kh83GmE_1 cow XBNPaOqVqds_0 bird XBUvxtvKWM0_0 cat XByg_hQRQDM_2 bird XDNVcbDkafM_2 airplane XDNVcbDkafM_3 airplane XDNVcbDkafM_4 airplane XD0ydIAwgGM_0 cow XD_iMe4m2vQ_1 person XGX6SRd3ZkE_0 bird XHu9PxuBpXg_0 airplane XIzQLXQTsRo_0 cow XI3_0lXrnfY_0 cow XJq9qp3jhq0_0 motorcycle XJq9qp3jhq0_2 motorcycle XJq9qp3jhq0_1 motorcycle XLgI0VgtzEw_0 cow XL50qkg4qdA_2 elephant XL50qkg4qdA_0 elephant XMIsf8xuMh4_0 train XPi83QmsR90_0 cat XQliC40rP9M_0 person XRKZRwdqhNo_0 bird XSMGAlakHWY_0 person XS5wfvz6XZI_0 bird XTWeBFPqdh0_0 person XT0t6ims_FI_2 skateboard XVabRVMuX4Q_0 motorcycle XVabRVMuX4Q_1 motorcycle XVabRVMuX4Q_2 motorcycle XVabRVMuX4Q_3 motorcycle XVabRVMuX4Q_4 motorcycle XYA6HKrVVQQ_0 cow XZBFfRl6DkA_0 person XaVZr4HPh2M_0 cat XalkAzccT5I_0 person Xa6tjMVGH2I_0 motorcycle Xa6tjMVGH2I_2 motorcycle Xd9tLIFo_7E_0 cow XeIssB-JkcU_1 bicycle XeIssB-JkcU_2 bicycle Xevq2dskQWo_0 truck XfUIrHPVj-s_0 cat Xf09qM8SYBc_0 truck XgDJ16iRhxs_0 elephant XgDJ16iRhxs_1 elephant XgDJ16iRhxs_2 elephant XgFaXb7Vb58_0 elephant XgxYznR79R0_0 dog XhOx4rgdI-8_0 bird XhTWW9CwFzM_0 motorcycle XiSjHcHG5IU_1 bird XjXFktrwSOk_0 bear XkpxlUwx4oc_5 truck XkpxlUwx4oc_1 truck XkpxlUwx4oc_2 truck Xkr3OHSz_CA_1 person Xkr3OHSz_CA_0 person XlIxLJTiphI_1 airplane XlSvIczm3JA_0 person XlcJsAWbsyA_0 dog Xmwv-NZZat8_0 person Xm_CKSNQE3E_0 bird XnfAvhHnH6M_0 train XnfAvhHnH6M_1 train XoWHAeOAXg0_0 motorcycle XoXMpm6Yxfs_0 person Xoa_dCJDiTE_0 motorcycle XocaP_gyqJU_0 person XopbyM2SJbc_0 bicycle XopbyM2SJbc_1 bicycle Xr_3UPISgT0_0 skateboard XsK5KxttYBA_0 person XtTLGRBrm3I_0 skateboard XtVTdegdzvI_0 motorcycle Xu6xzBcJySk_0 person Xu6xzBcJySk_1 person XvvA9Zc1TMA_0 person XvwOXlVdehA_1 person Xwqm_wzZDQI_0 cow XxkkXeLqqu8_2 airplane XxkkXeLqqu8_0 airplane XxmNQjB1D_Y_0 cat XyldpxZmUN8_0 dog X0CZDjRqcKg_0 horse X02e7Fj9BLM_0 umbrella X0-n3maCrZU_1 dog X2uXOY9J_UU_0 person X3HCAEcRaW8_0 bicycle X3qbUW_qT7k_2 airplane X4SbOXRpo0A_1 dog X7xm2nZL7jc_0 bear X79vSvy6SOQ_0 skateboard X9L-jwA6Ozg_1 train X9L-jwA6Ozg_0 train X9a5wEDFXc8_0 boat X_TnIuY27eM_8 bird YA4-rm-dcsw_0 person YA-N841dD-0_0 person YB1trUAUzhg_0 person YB2wzBLh7MU_0 zebra YCU3daBCWsU_0 umbrella YCXHNoYaQRc_3 skateboard YCXHNoYaQRc_4 skateboard YDd_skWNTMs_0 skateboard YDyc1Yv9j_s_0 person YEPfw3k3vEw_0 person YEvBzZ5KBYY_1 horse YEz7v7toUwM_0 truck YFQlAc3qTBQ_0 motorcycle YIHcQxH9e1o_0 train YIzqB2G1UvY_0 person YI4lmC3imb4_0 horse YJiqdRcs_gU_1 person YKlWROFtcxc_1 skateboard YKlWROFtcxc_0 skateboard YKoT-GgRSw0_0 elephant YKoT-GgRSw0_1 elephant YKrdwZe1vq8_0 dog YL97h6yps6w_1 knife YMbqULxZJpg_1 horse YMbqULxZJpg_2 horse YMkOJNatD88_0 person YNEDPsAWm5I_0 person YQXwRsP0zvE_1 person YQgUV8TrYcw_0 person YRWC7Tdc5oI_0 person YTD8j8z44qQ_0 person YTd8Rxtpt1E_0 train YTd8Rxtpt1E_3 train YTd8Rxtpt1E_4 train YTd8Rxtpt1E_6 train YTd8Rxtpt1E_7 train YTd8Rxtpt1E_8 train YTd8Rxtpt1E_9 train YTzuVYGpDhA_0 motorcycle YUhgrCNuMGQ_3 bear YVDCTyDcjjA_1 cow YWRbi_v93Mo_0 person YWhwljQ3efA_3 train YWhwljQ3efA_4 train YXeaiwTZ3ZE_0 cow YXz7CDJ11jY_0 bird YYUo7EkkJeg_0 bicycle YYUo7EkkJeg_1 bicycle YZmhYkqgBi0_0 skateboard YZmhYkqgBi0_1 skateboard YZmhYkqgBi0_2 skateboard YZ3kcrHk4N8_1 horse YZ3kcrHk4N8_0 bicycle Yax1xdgRbt4_0 person Ya2zfpe-_ro_0 bus YcjMrWCSRSA_0 person YdooYDhKq00_0 person YeTYMiaLkWY_1 cow YfvvO_T8j8k_0 skateboard Yf9jBSXQTLo_0 car Yf9jBSXQTLo_1 car Yf9jBSXQTLo_2 car Yf9jBSXQTLo_6 car Yf-okdUBk9g_1 bird YgM058nmMnQ_0 person YjZoPTjqDGw_0 skateboard Yj6XWsgomO0_0 cat YluDona_474_2 bus YmlQVVQx4SA_0 person Ym3lE2u4vxE_3 skateboard Ym3lE2u4vxE_1 skateboard Ym37vW7b0U0_0 cow YnZU-Qa6yeI_2 bus Ynyd8SBB5Wg_0 knife YoFfsRgrNeY_0 person Yof6XFKNuNY_2 horse YorREGtes1I_0 person Yo9XVrgl_GM_0 cat YpDsXa1kNZU_0 truck Ypb0U6Ga5pk_3 train Ypb0U6Ga5pk_1 train Ypb0U6Ga5pk_2 train Yp1kl6xU-Og_0 person YqvGb_tDI38_1 bird YrhvCSxifRc_0 car YtrNZ4mlMw4_0 elephant YvAlZo3quqE_0 person YvwW9T4Qpek_0 motorcycle Yv3YH0nImQI_3 truck YxRG0JQrpwI_0 person Yxia21K4O6I_3 truck Yy0lIDbLxQ8_0 elephant Yy0lIDbLxQ8_3 elephant Yy0lIDbLxQ8_1 elephant Yy0lIDbLxQ8_2 elephant YzTl0Nf0Kpw_0 cow YzT_UsE8Mhs_0 airplane Y0Hz5Hw1AiM_0 person Y1lKSppJhdI_0 cow Y16c_yGYw1M_0 elephant Y16c_yGYw1M_1 elephant Y2jXJzRVhMI_0 person Y2x6ow80IkQ_0 person Y3TtBVfW6gs_0 person Y3ZDfyDvFi4_0 elephant Y3c_6Zv0dxg_1 knife Y3mx4jYyagQ_0 train Y5Atu2VWemQ_0 train Y5BEvakwvuM_0 dog Y64ky0LNHko_2 elephant Y-YU80ccuXg_0 elephant ZBJsNXYIQ4o_0 person taPyucc_cOU_0 person taPyucc_cOU_1 person tafdN9GXP0g_2 skateboard tbLnjlX1xF8_2 bird tbuu2U3o02Y_0 person tcOx8KjmHPo_0 person tc98WTYT-VI_0 elephant tdIWlg4_01E_1 bird tgRYkhC-gJU_0 person thZqLw7IxVw_0 knife tj2-fSeuMRI_0 bird tmch--OGZhY_0 giraffe tmsInTqqzHI_0 zebra tof4QiBHPQQ_0 person towJyxwm3wE_0 bird to8OyPMfkaI_0 person tpQv6Sn5z3o_0 motorcycle tpcuQY4eNaI_1 bus tpeBIe69wr0_1 bus tpeBIe69wr0_3 bus tpwUnqxQYjo_0 train tqy3XprB11s_1 horse tqy3XprB11s_2 horse tq9WP-2U1QM_0 person tsMTiOeM52E_0 cat tsg-S4Hk2go_0 person ttzJbLLAR34_0 cat tvSJKUR21UM_0 train twewRZpG7Fs_0 cow twxvNeK9FZo_1 bear txDhTthoXSk_0 motorcycle tx0mtmimu0k_1 person tx2PSvwf7FU_1 cow tyem40ZMKGE_0 person tygG1C5DURU_0 person ty3iURJku9k_0 person tzH_tvBDeJA_0 skateboard tzPForR9Ejs_1 train tzvKjCoHBMI_0 bird t0TW8zZxCWQ_0 person t1N1ijCr5NE_0 bicycle t1N1ijCr5NE_1 bicycle t4FZmjCINtw_0 bus t4naVz1a0sg_0 train t4zuUZQozs8_0 horse t5B7vIbyRNQ_0 person t5kzdnId2sI_0 horse t5s4Fs07WLM_0 dog t50QLEhcZCE_0 person t6C6ukC_zEA_1 bird t6C6ukC_zEA_2 bird t6C6ukC_zEA_0 bird t7YFOxuWxtg_0 umbrella t7YFOxuWxtg_3 umbrella t7s424DNznk_0 cat t8MqK7LWqs8_0 airplane t8mVwobdP40_0 boat t_qvtoXbLRI_0 person uAWXGcWWgSU_0 person uAZF38u6SOo_0 umbrella uAzws057QjE_0 skateboard uA1sb8QyXuU_0 skateboard uCZi19CC7rk_1 train uCZi19CC7rk_2 train uCZi19CC7rk_3 train uE5rIJoAafE_0 bird uE5rIJoAafE_1 bird uH0jKXHq7Lw_0 horse uH35b2DEXFw_1 skateboard uH9vcwYxL2s_1 person uIu2jQswp94_0 person uJcu-YlAtbc_0 bird uKJqU3gtIWM_0 umbrella uLPuf056wH4_0 horse uMAkaCYTDuc_0 truck uMYGWhLdrlc_0 boat uMiNpG3NcEw_0 person uMpufBdwRn8_0 giraffe uNpHGE63PdQ_2 truck uNpHGE63PdQ_8 truck uOmCLzEMPGc_0 train uRFXE4UfdTE_0 cow uR8MqB3VgSI_0 truck uS1QmKXc0uY_0 person uTsfiR5FPdM_0 person uT9uk3mtt98_0 bird uUU-VpxxSiM_0 cow uVrW8Mm2xGY_0 person uWyTGtedEqU_1 person uWyTGtedEqU_0 person uarSTtaV_Ps_4 boat ua6Xyj9aWT4_0 bear ua6Xyj9aWT4_1 bear ua6Xyj9aWT4_2 bear ubHgpaAseuo_1 elephant ubijaVodfKg_0 person ubijaVodfKg_1 person ubsr27_dQOk_0 elephant ubsr27_dQOk_2 elephant ubsr27_dQOk_3 elephant ubsr27_dQOk_1 elephant ucUearjcPHk_1 airplane ucfXE6fw3go_0 cow udlyGSCujUU_0 truck ufB4EORClps_1 knife ufMXT_CmtK4_0 airplane uhm0JnSA-kQ_0 person uiLBqX72k4k_7 boat uiM-lDuYaeY_0 person ujoJwRvjEdI_0 person ujz4u55Tp1U_0 cat ul47aFS8dQE_1 motorcycle ul47aFS8dQE_2 motorcycle ul47aFS8dQE_3 motorcycle umkNI2_0Lqc_0 person umxZfostBlE_0 train um22CD4bkqo_0 cow un6QDPagbfo_1 cow un6QDPagbfo_0 cow up6VT6l38-A_1 skateboard uqn85v1WM7A_0 motorcycle urAYVS5Lz7k_0 person usAsP-m-qs4_0 dog uuhWeHmlvt4_0 person uu3KluYuhc0_0 person uu3pH95cmtk_0 person uwXhzSsAIJw_0 person uw9TxuXeiP0_0 train uxgUbys1eD8_1 bus uzMFzDPfsws_0 knife uzsdMqrgiL8_0 person u14Sp3wCQew_0 car u2BHvsjQGjw_0 person u25Jazd2yJM_0 person u4KPFsw5W5c_0 motorcycle u4oma0FVycA_8 knife u69KRu61wXM_0 person u7xTeWelI-U_3 knife u8mmwwrdNb0_4 airplane u8mmwwrdNb0_5 airplane u8mmwwrdNb0_9 airplane u80Y4lA5xT0_0 dog u85tUrDgmOQ_0 bus u9HkSfjYpnA_0 motorcycle u9rfXD33UIM_0 person u9_P9HFh_NY_0 dog u-_A36Ha04o_0 cow u_D1eyd8AOM_0 car vAUSfFO5UI4_1 dog vFMzMNDlnBs_0 person vGIYDcnNTvA_0 knife vHQkxg7kPUk_0 dog vH0ZiiuSQzU_2 person vH7sKynwjD4_0 person vJypzwSdyN4_0 train vMt5AD41SKM_0 person vMt5AD41SKM_1 person vOY2IRNsjYg_1 person vOY2IRNsjYg_0 person vQ6eOB8rxUE_0 person vRjErSbQNNY_0 person vTa2zdbIyUw_0 person vT2JpCnT6rg_0 boat vWqexY1OdWg_1 skateboard vXbTARLug3M_0 person vYN_Gy6fUbI_0 bus vYhPihwivZs_0 person vaaqJVWoSf0_0 person vadASNfLl9I_0 dog vas3iNRcsK8_0 elephant vas3iNRcsK8_1 elephant vbLhfzHqEKc_2 horse vbSnjtc3vIs_0 cat vcALsxetYU4_0 airplane vc-_aAQAXs0_0 knife vdXD-HTzyFM_0 cat vfeKOPKE6l8_0 person vf7NtV1T5Jc_0 train vf7NtV1T5Jc_1 train vjb_l1_hEXk_0 person vjojFy4rPeo_3 car vjojFy4rPeo_1 car vj_BAwFKqtQ_0 umbrella vklwqjQis8Y_1 cat vlPgSny76H8_0 person vlflI5iuszQ_0 person vnD3gELVAq8_0 person vnyBVn70QLY_0 cat vnzsKpfAS_M_1 horse vpBxBDjiJxw_1 dog vvamB_-Z0so_0 horse vv3gfxFz2zw_0 person vwe8ZaV-4z8_0 bicycle vwtokH03eW0_0 skateboard vwxzh1lJ7iw_5 motorcycle vxmdsyEpU6A_2 bus vx0oKJcOQb0_0 train vx0oKJcOQb0_3 train vx0oKJcOQb0_4 train vyLqolkoVIM_0 person vzBbUEwED60_0 person vzBbUEwED60_1 person vzU0GH4cZM4_0 cow v0tUEeE4RGc_1 truck v0xTNbrYZY0_0 giraffe v01IvIxWXTo_0 person v1iIhTWRjg8_0 boat v1-PGfS1YCY_0 boat v3LIQHdveBA_0 person v4H5VwQyKEU_0 train v4H5VwQyKEU_1 train v4QYOX-FHhY_1 motorcycle v40pc8KBg0I_2 horse v5YzVj25_hs_0 truck v5lUHsxx0mc_1 skateboard v50Qa_KMCzQ_0 truck v51CdpETaug_0 bird v6UDfM50GIM_1 truck v7XVyg16ens_0 cat v8Kp0jhKsKk_0 person v8ceKkKdqrE_1 knife v8hOOgLXRjg_0 person v8kyeMoFLqk_0 horse v8rj3jIndSE_0 dog v8tktR3aE38_0 airplane v_yEG5_Qm8Y_0 person wCu6xsT18qo_0 person wDHRro9mXuM_0 horse wDcnUJFHguE_0 horse wE8LYkzcq0o_1 horse wE8LYkzcq0o_0 horse wGPW8I8nGmc_0 train wGWIrs5ja0Y_0 bicycle wGyJeWBe8VA_0 umbrella wIapUcRvgTM_0 bear wIapUcRvgTM_5 bear wI0a0fzgy3w_0 horse wJdfgWlSY5M_0 person wJdfgWlSY5M_1 person wK7yIg1qfZ4_0 person wLA244rmq6g_0 cat wLHLSvMwmjM_0 skateboard wL0z6-jkCcc_0 dog wL0z6-jkCcc_3 dog wL0z6-jkCcc_1 dog wL9iOnWhckI_1 skateboard wL9iOnWhckI_3 skateboard wMShicf3N_E_0 person wMyAEfVE_u4_1 elephant wNKWZ43SioQ_0 airplane wNKWZ43SioQ_2 airplane wNWW59wDinQ_1 train wNcjU9-ck10_0 person wODzPBxcT0A_0 motorcycle wODzPBxcT0A_2 motorcycle wOLrGAo0vFo_0 horse wOSL7OPRBXM_1 dog wPRCf3v0EfI_0 motorcycle wQtHgysmmFg_1 boat wQvPlByUvB0_1 knife wSSTL6uuM9Y_0 train wSmVgAahSUw_0 skateboard wSmVgAahSUw_1 skateboard wSmVgAahSUw_2 skateboard wTMj2Gp8wz4_1 bird wTMj2Gp8wz4_0 bird wTtXB0Z2eMk_0 car wV1VMLQfTYo_0 skateboard wWpNKbsF6q8_0 bear wa1KdARQXXg_0 truck wa3jVRzsWGo_2 truck wbmT4LB3lVQ_2 knife wb9x3QDpcYA_0 person wb9x3QDpcYA_1 person wcOuc6Y3Gek_0 train wcjnFIBHoc8_0 bear wdb2-oX7HqU_0 boat wdhqMpQcsjc_0 dog wdhqMpQcsjc_2 dog weH4PvRo2GU_1 bear wgZbNzu2Mdw_0 person wguspvl5Ioo_0 person wg1ZFP15W8U_0 horse wg6XS3q4Vg8_0 train wifl75i2zGw_0 person wiiV9QdYsYM_3 bus wjfHYr4lXU0_0 cow wmfJAE6gu7w_0 person wmjfHsCs1CE_0 person wmn4YG9rirU_1 bird wmn4YG9rirU_0 bird wmx0UeWsPyU_0 person woEUh2mzEkE_0 horse wqD1WkfidVw_1 bear wr5b8Op3LUM_2 bear wuAwZ_wX7jk_0 knife wuFVuJjgpLk_0 airplane wvadJ-1Ls80_0 person wymDvXB08SM_0 person wzBmon2jJxI_2 bird wzlA0qMLDV8_1 cow wzlA0qMLDV8_2 cow wzlA0qMLDV8_3 cow wzuQhwWLllk_2 bird w0JzCkELpj8_0 cat w0bfVrI7CPQ_0 bear w1j-YVcZpfc_0 person w2WW3bYmA7s_0 truck w247rqoLoGg_0 bear w3F_8A8kY7o_3 elephant w3F_8A8kY7o_5 elephant w3F_8A8kY7o_6 elephant w3adXMIxupk_0 cat w35-xR0Vn_0_0 zebra w5Pb_ORVLKI_0 airplane w6A2W9VQeZk_0 car w6JEUZI5Vh8_2 skateboard w6JEUZI5Vh8_0 skateboard w6JEUZI5Vh8_3 skateboard w7IKxGLuaQA_0 horse w7g5pDCGteg_0 person w8zrFmMpPmc_0 motorcycle w8-ovxjadNo_0 train w93q7lv9In8_0 person w-eAEp0TUi0_0 horse w-eAEp0TUi0_1 horse w_euwPW5ukA_0 bicycle xAUupk4sGI0_0 person xAedjC0r5KY_0 person xAfxJQL2_aY_0 zebra xDgoaE-g50s_2 bear xFnFWM8KXcE_0 person xFzsK94M68U_1 person xGbFeCuGypE_0 person xHOcerZTZxM_0 person xIUJ8zlr0TU_0 bear xIizuktSVrM_0 truck xJ_xdRV9lzo_0 cat xKd8dHsveKg_0 person xMiQuC8eKGU_0 person xMp4dCjzI08_0 cat xMuQzm__4bo_1 person xMuQzm__4bo_0 person xNBT-PZEMH0_0 bicycle xOLvPvBg-8U_1 horse xOtxf0cmHyA_2 horse xPDDIKF9T3A_0 person xRJNEyms-F8_0 train xSIjCyHBypw_0 umbrella xSIjCyHBypw_1 umbrella xSL4NZUmhW4_0 person xUB3mR57tLE_0 bicycle xUtGzUu5Ryc_0 umbrella xU_2MZdWfxM_0 cow xVuNCF2vbXs_0 person xWWnn5OWp4I_0 airplane xYVriT4YV0M_0 person xZLHtt1yjYk_0 truck xZZ_W6fRi8E_0 knife xbL4hiu8qh0_0 horse xbQZucd8eu0_0 bicycle xbQZucd8eu0_3 bicycle xbQZucd8eu0_2 bicycle xcY11ewiUMM_1 horse xd_raY9PCHM_0 bus xd_raY9PCHM_1 bus xeAkz6Kg108_0 bird xeBhbPbmS8w_0 person xfzxTuJ85A4_0 airplane xfzxTuJ85A4_1 airplane xitZyv8gMgQ_1 horse xjdEiJ_z4T8_0 motorcycle xj3FKNXP-cw_0 bird xkKoATbAX0w_0 dog xkeTuOlBIMM_0 cat xlT93OXr3uc_0 person xlT93OXr3uc_1 person xlfOatU3OyY_0 boat xljqBqpwIHo_0 person xl110TqE0kQ_0 cat xmWAmSXnWCY_0 car xo54E-kQcoA_1 boat xpGDfRYqtSE_0 cow xpcNJG8acpU_0 dog xp_ShmZCoDw_2 airplane xqNQIYHzAGk_0 person xrGm-1D2Zqk_1 train xsrHSco3Zcs_0 person xsrNtKa0oZg_1 person xs1kBHxDpxU_0 train xs1kBHxDpxU_1 train xs1kBHxDpxU_2 train xtHE1-GIP_w_0 person xtXt8Vm3Qps_2 dog xuAm_BWnXRc_1 motorcycle xuAm_BWnXRc_0 motorcycle xucBFquWbi8_1 bear xv4fy9zyuNE_0 person xv6NQvvvIhk_1 bicycle xxEtEzi7YiY_0 bus xxcJJA7hCQY_0 person xxdOVyEU-c4_0 person xyg1xFLohGI_0 cow xyyz5QJ7wi8_0 dog xzC5_r9raeY_0 person xzFcPnglQf4_0 person x0RxwpR4wIc_0 bird x0RxwpR4wIc_1 bird x0nlchdJVJw_0 bear x0nlchdJVJw_1 bear x0q0JMiiw1A_0 cat x0xsHmQGaB8_0 dog x1RBYEheBRQ_0 person x2MJ_zDJY3k_0 person x2Tfa1fMOyE_0 person x29EcPsdK1Q_0 dog x29EcPsdK1Q_1 dog x4h9pGwdSMU_0 horse x4r2tx9_9wQ_1 person x4r2tx9_9wQ_0 person x4uX_33GiJk_1 truck x48Ogx7C31g_0 person x4-I_EckNls_0 bus x4-I_EckNls_1 bus x4-I_EckNls_2 bus x4-I_EckNls_3 bus x5nImw1YH94_0 person x6sZc4EoI8o_0 person x6298plJ-7M_0 cow x7jo9uCmWA0_0 bear x8VC2CXIDBI_0 person x96LXIEQ3SM_1 cow x96LXIEQ3SM_0 cow x-2AUxPCkVM_0 person x-26Z1zy1-E_1 person x-26Z1zy1-E_2 person x-26Z1zy1-E_3 person x-26Z1zy1-E_0 person x_CImXdwsg4_0 truck x_XV2Y3pwDA_1 bicycle x_XV2Y3pwDA_0 bicycle yCYtcDx1zzE_0 umbrella yCaJQKIGAjg_0 motorcycle yCz3VdCGZMA_0 person yDw-9GLrYj0_0 person yF0X9hui-Go_0 person yGD_BY9mQlM_0 boat yIkwS9Vkq-k_0 elephant yJOGbyQ8qs8_0 person yJZU3h3_06M_1 cat yLFd8GdaqBg_0 person yLL5Dv2F1rs_1 elephant yLL5Dv2F1rs_5 elephant yLL5Dv2F1rs_0 elephant yLNuhB7I5iI_1 knife yLNuhB7I5iI_2 knife yLkMk9nMaos_0 train yLkMk9nMaos_7 train yLkMk9nMaos_1 train yLkMk9nMaos_2 train yM9_GnJpXsM_0 airplane yNnOUMUIIno_0 bicycle yOrqtKYEfNs_0 train yOrqtKYEfNs_1 train yOrqtKYEfNs_2 train yPscRV8ebRg_0 person yQLGypU_WiY_0 knife yTZekxz2awI_4 airplane yTZekxz2awI_1 airplane yT-tBu_wqEo_0 cat yVO-nlNYxrU_0 person yV1EsNcE3kY_0 airplane yYIY-K1Hk-0_0 cat yYUnGStTnHE_0 train yYUnGStTnHE_1 train yYr5tuCEb3w_0 cat yY6S-xTKWGc_1 person yaNT5d8H3ho_0 person yahVo8Nqxks_0 person ybCbkJl7tog_0 person ybt9EtMfrdI_0 person ydxMYuiOJAI_0 person ygK39Pz1tKw_1 motorcycle yhp30idsPKU_0 boat yiCMaealOnQ_0 cow yiujj_fUOg8_0 person yjOTRS1-3Is_0 cow yjUDTPRe-tg_1 person yjnR7dP-hxE_1 bird ykQnvD35jxs_0 bus ymoggco-rpw_1 elephant ynHMWKjfsNk_0 car ynYz6f5FCOk_0 motorcycle yoTs9WxR0mI_0 person yo3wwD8VMLA_0 person yo9gwC7gpEk_0 boat ypC9L5um-ic_0 person yp9kACFk9KU_0 car yqWKo_T-YsM_0 person ysb6LLJ0t-c_0 person yssYMx-tQs4_0 horse yu2v206waMs_0 person yvDdzmW5jGs_0 cat yxURDHgvWrs_0 train yxURDHgvWrs_7 train yyMtxTJNnUM_0 skateboard yzE2GgYffew_0 person y0HZlHGSvHk_0 horse y0ptIotKNVU_1 horse y0qGszhFtUc_0 bird y2BOVk7bg7k_0 cow y2BOVk7bg7k_1 cow y2xzls--cC4_0 person y2_iaWWx-C0_1 zebra y3VNGZBlDb0_0 cat y3hSeUaVwAY_0 bus y34cSfArQnM_0 cat y6nBJ0OUtDs_0 person y6nBJ0OUtDs_2 person y67A9YHKh1U_0 person y8ib31rVZA0_0 bicycle y8ib31rVZA0_1 bicycle y8r2SJltJ1M_0 dog y9hu6CyRi5s_0 airplane y_O1AiuRLGA_0 umbrella y_5uacneFuc_0 horse zAvoyJ0_PSA_0 cow zBtuA6r8o0M_0 cat zCG95maa310_0 person zCnZg9VP1xw_0 truck zDs4lXFLJuM_1 horse zD59UHvdpmY_0 person zESRFobSQMU_0 truck zESRFobSQMU_1 truck zHRsZ9HlcBk_0 person zIDehNZ1yiM_0 person zIvzY3cVVbM_0 person zI5cBWlyAMo_0 dog zI5cBWlyAMo_1 dog zJdOWFEL_CQ_0 person zLflV_7noSM_1 airplane zMhr8GZ1QeY_1 airplane zMjW-G29IRA_3 bear zMjW-G29IRA_1 bear zMjW-G29IRA_2 bear zMjW-G29IRA_4 bear zNFb--FJ2A4_0 person zNF5YxfaNTk_0 cat zNfVxQPGrvM_1 elephant zN8rF-AchY0_1 motorcycle zN9Tz6jp7AY_0 person zOLTybhsJ5s_0 cat zORNq_7nmVQ_1 giraffe zORNq_7nmVQ_0 giraffe zOoxYmqzDyc_1 dog zPvrRc94j6s_0 person zP2DkEcgJFo_0 person zP8Recx-KgA_0 boat zQbeiOf9ljM_0 person zU0g6JCyxAs_2 elephant zVVQ63dPpe4_2 bicycle zWQQBElMPYI_0 person zX9OX5I2574_0 person zYvjN5ShZDI_0 person zYzASiLjHgY_0 person zZ8f7oFIg_c_0 person zbtsVe8RQqI_0 person zb8-yrB5SlI_1 bird zcgArp_fmjc_5 skateboard zcsREBhC1Rc_0 dog zdWtCunlv1c_0 cow zdqJTtHvwk4_0 person zd3rNWQ-OUQ_0 person zgJHKszSf2o_0 person zgJHKszSf2o_1 person zgRxry9FvEk_1 horse zgSx8Y5FaPI_1 knife zhDC_SqN7lQ_0 bear zhNNahIXxC8_0 bear zjQG5PadkFQ_0 person zj4cs0_VpTk_0 truck zkSIG3AE7tY_0 elephant zmDkkM7Buuo_0 cow zmEU5n2Dy8Y_0 dog zmdKmfMPuvA_0 bird znTYxWfU2XM_0 truck zpEtPFxxD5M_0 horse zqE3Jnn6_gw_0 person zqYLN7vCqcw_0 train zqq508NRpOY_0 person ztMFfJj7jb0_0 knife zt3ojCKnIYM_0 cat zwSnaqQ-5UU_0 person zxiZnbMo3io_0 motorcycle zxiZnbMo3io_1 motorcycle zxzApvuo8Lg_0 person zx0RzA6ts8U_0 cow zyXxWBoTuww_0 person zyXxWBoTuww_1 person zyftQz018g0_0 bus zy0lNSoVB0A_0 cat zzRnX2EiOYU_0 cat z0Tl2FDG69g_0 elephant z1kOi92oBDI_0 truck z1kOi92oBDI_1 truck z1qQ7Ma5C5U_1 truck z1qQ7Ma5C5U_0 truck z18s4h6yW2A_0 bird z2M6XJGE1QM_0 dog z2RqakqNnIM_1 skateboard z29ijVd-dvc_0 airplane z3rcLKwHCxM_1 truck z5-nsuFvaR8_0 motorcycle z7FTg1R3Hik_0 horse z7mLqljZMP8_0 person z709zOu3tM8_0 car z9HO__A5ryw_0 dog z9wpJN1R63w_0 person z-iM0zVi7a4_0 bus z_CQX_gwU_o_0 person z_w1gsSfZhQ_0 person 0AroA_SBRtQ_0 person 0BUPQDR99KY_0 bear 0DDYOUzExSY_0 person 0DGPzzGhUgI_0 person 0DHLS1VDcnA_1 bear 0EeBXB53BQE_0 airplane 0EnI7ZqJvqI_1 car 0EnI7ZqJvqI_2 car 0GzrKbW6Reo_0 person 0G0mSrzOZ2M_8 bus 0G0mSrzOZ2M_9 bus 0G0mSrzOZ2M_10 bus 0IHYTCKh8HM_0 person 0KWfi9m1uZg_0 horse 0KWfi9m1uZg_2 horse 0KWfi9m1uZg_1 horse 0L0JFDbAEZg_0 knife 0Neg9vT08to_0 cow 0NtpuqPU3YI_0 airplane 0N7yCdf7DPs_0 truck 0ORpOxJZo-Y_1 bear 0OqnKMwSULM_0 skateboard 0OqnKMwSULM_1 skateboard 0Pk8OLmmqrM_0 motorcycle 0Pu-_5lNYZM_0 bird 0QKe3M6GiT4_0 person 0Tu3KWEm4SE_0 cow 0Tu3KWEm4SE_1 cow 0TwpPpqiVQ8_0 cow 0U6SmZC1j40_0 person 0VKozmEWjZ4_0 person 0VaX_g70BaY_0 motorcycle 0ZGdpgF-bGI_0 bus 0ZQ_-4ia7z0_0 person 0c-Cwr5rI_A_0 elephant 0c-Cwr5rI_A_1 elephant 0fyRjxenSfY_0 bear 0fyRjxenSfY_1 bear 0f4alYlvEQw_0 person 0gelRcDsNio_0 airplane 0ghRNQFgHow_0 bicycle 0gl1mPRzCqo_0 person 0h9x35zsnyo_0 bird 0iLR3BtDujk_0 train 0iYm4g4D2wY_0 person 0iv0Xw_u-sc_0 bicycle 0i-Nv28lRT0_0 bicycle 0kZSWqFOr0c_0 person 0kidYsWSVvc_0 person 0mbZJnNhckg_0 person 0omh-B4giqI_0 umbrella 0owf_YERias_0 skateboard 0pAMIiK_RDo_0 person 0pm7YRiUKTc_0 horse 0qVc1Whb3GA_0 person 0qwRoiWnwmQ_0 person 0rQzfr4WVKc_0 cat 0sA23Q_HQr8_2 zebra 0sA23Q_HQr8_1 giraffe 0sA23Q_HQr8_0 giraffe 0sfu67JuBFg_0 person 0ss0_Sgy72g_1 skateboard 0tNuUAe5sNE_1 person 0tNuUAe5sNE_0 person 0txAuEdZYTI_0 motorcycle 0uJKDzuaiys_0 train 0urYbdFc55k_0 train 0utGbb5enqA_2 dog 0utGbb5enqA_1 dog 0vQFT9tfq40_0 person 0viKlMZRKdk_0 person 0v7GMl2k-Sk_3 train 0yCCEL3tl24_0 elephant 0zmzEkQWyps_0 boat 0zraBBQY8ew_0 umbrella 0zyhohOeIM4_0 train 00xcm8_ZTBc_0 person 01CYScp2Yc0_1 horse 01mkUffAvo8_0 person 02zor_ScZfo_1 person 02zor_ScZfo_0 person 03p9Ao9JvpY_0 train 03p9Ao9JvpY_2 train 03u5BWTYiRg_0 train 04Sh9tJvOAc_0 airplane 04UO1jSx2p4_0 person 04gNIg-kFI8_0 person 057f0LfDVoA_1 train 08Nunz5Qngc_0 bus 09jyC-o18uU_3 elephant 09kq3b7cMwc_0 cat 1AcsNm2kiok_0 horse 1BfbSv9ZCu4_0 knife 1BfbSv9ZCu4_3 knife 1BiqFD2BD7Y_0 horse 1C3_qaiKlwo_0 truck 1DHXDdSkk0s_0 bicycle 1DeIbpIRrAc_0 knife 1Dfkbv8bi9k_0 person 1Dz4x50F-RQ_0 dog 1EYL4Mm3dfA_0 bear 1EiH3PTqhLE_0 person 1ExRnJBXYP4_0 knife 1FVN3QOPlR0_0 person 1FVN3QOPlR0_1 person 1GJ0iwyNHIc_0 airplane 1JWHb6FAbmI_0 person 1Knz9s55vjc_0 car 1Knz9s55vjc_1 car 1Knz9s55vjc_2 car 1Knz9s55vjc_3 car 1LmCkh8Dd-o_0 dog 1MmlnQKtd6g_0 umbrella 1M6GhIT94zE_0 cow 1M6GhIT94zE_2 cow 1NThnoBEkmc_0 person 1ONRbj8GKJ4_1 bear 1ONRbj8GKJ4_2 bear 1ONRbj8GKJ4_8 bear 1ONRbj8GKJ4_10 bear 1ONptqLyHxQ_0 dog 1OSa1ptYmzE_0 train 1OSa1ptYmzE_1 train 1Ob23hwFaDg_0 motorcycle 1PSIOY62FBg_1 bear 1Pe9JpKgjGY_0 car 1P8yUGru9R4_0 knife 1RCZCLIZzc4_0 boat 1RGxleB_Ezk_0 person 1RKOWfpa5Dc_0 knife 1RuPxpqNjBI_0 horse 1Tpmsev8onw_0 cat 1TsLUvJiluI_1 person 1TsLUvJiluI_0 person 1UhZKsDTuQs_2 boat 1V-7ErZ83ZY_0 bus 1ZN9xVmQojU_0 umbrella 1ZbSl9tPtbA_0 bird 1Z7CVnRjVT0_0 person 1as5iG4PPas_0 bus 1bFvYEA0U3U_1 elephant 1bveGPhOKuU_0 cow 1cKjzUG0YCQ_0 bicycle 1ceprZO-VEU_2 train 1ecpkwMLabI_0 person 1fOM-kkuRsw_0 car 1ggOn5NDRco_0 cat 1hUe5E9cjiU_0 motorcycle 1iQKKup2m3I_0 truck 1iQKKup2m3I_1 truck 1iSjb4IlqfU_0 person 1i7lugA55RU_0 bicycle 1i7lugA55RU_1 bicycle 1kZMlCvKoe8_0 skateboard 1kZMlCvKoe8_1 skateboard 1kZMlCvKoe8_2 skateboard 1ksBabVqkMY_0 car 1ltK_3kkqfg_4 elephant 1l7LOpfDmXY_0 person 1ohoCoKJLDU_0 motorcycle 1oyjAtaWDZA_0 truck 1sQ3EL13Vqo_0 person 1tK31PAVNJM_5 elephant 1tK31PAVNJM_0 elephant 1tK31PAVNJM_2 elephant 1tK31PAVNJM_3 elephant 1v2enBiUcqA_0 bus 1wIGd0H1CUo_0 person 1xSI36nguW0_0 bear 1xs-ibIaMMU_0 person 1xyKgJUu0lM_0 skateboard 1zVWBQWZxV0_0 person 1zVWBQWZxV0_1 person 1zqpqKWhr1Y_0 person 10la9pvd-pk_0 knife 11kfBYxzlFA_0 person 12f1R5wMVPs_0 person 12_S_8HkAvA_0 person 1462k8mwVB0_0 elephant 15Lx-nGngUo_0 skateboard 18WxVaz5Ue4_1 skateboard 19A2XM5NIWs_0 person 19UmUpkjRbs_0 person 19oZ30mOTkU_0 boat 1-p8vd0PFQ4_0 dog 1_6ymF7z_iM_0 truck 2ASHEEgYHcU_0 cat 2CF0oQ38cBQ_0 motorcycle 2DM1oM4HFjI_0 motorcycle 2FXE_xO8Mb4_0 bus 2FvnQne8he8_0 train 2GTexq12sBY_0 person 2GTtMvLQqio_4 truck 2GZphW1DkS4_0 person 2HvVFwq85n0_0 person 2Hwu-YpHKw0_0 elephant 2H8AZ00ONQE_0 elephant 2IJ4H46ZxEE_0 person 2INYBScuPM8_0 car 2IqEaQ0oyQg_0 airplane 2JN_uMTDa9I_0 skateboard 2KWlj_ZAw94_0 horse 2KWlj_ZAw94_1 horse 2KWlj_ZAw94_2 horse 2K2gLrhP9AU_1 airplane 2K2gLrhP9AU_2 airplane 2K6iDBPdcHk_0 motorcycle 2LBHZoJ5skk_0 person 2L3uwdhZtV0_0 car 2MJHsLxKUBg_0 person 2MiqTBWBlEc_0 umbrella 2NjC1r6v4IQ_0 person 2O-2zfQxbnA_0 person 2PaTs4s2Ybw_1 bear 2PaTs4s2Ybw_7 bear 2PaTs4s2Ybw_4 bear 2Pa1anwpeKE_0 person 2Q3_TaV8vcg_0 dog 2Rc-oAwMJBs_0 horse 2Tp0YJi7JwQ_0 giraffe 2UpHhiQWzD4_0 truck 2VZlkg5HjME_0 cow 2WTwzNufol8_0 dog 2WTwzNufol8_1 dog 2WtNxQ0RBfc_0 person 2ZXlS-GRWAw_0 knife 2Z6wSOr0jLI_1 person 2a5TUccpQ08_0 dog 2a_-AyOXTXg_0 skateboard 2cFRz-musVA_0 airplane 2cFRz-musVA_1 airplane 2cFRz-musVA_2 airplane 2cFRz-musVA_3 airplane 2dZFWL9XGmw_0 cow 2fCH7TpvtlM_0 train 2fCH7TpvtlM_1 train 2fJ1hPXpiQc_3 knife 2fJ1hPXpiQc_0 knife 2gGuKs-4t94_0 boat 2i45n6p8AT8_0 person 2i_wjgk6DiA_0 horse 2lK0mmHTvB8_3 train 2lK0mmHTvB8_1 train 2lqlNq6aII0_0 skateboard 2lxPwFW5YQo_0 umbrella 2l2gnrYWuWQ_0 truck 2l7MPXzF64M_0 cat 2l7TuAfDgO8_0 truck 2mO7-ybapaQ_1 umbrella 2nqGkC9ebf8_0 boat 2oA7J6HSmt8_6 bicycle 2oA7J6HSmt8_9 bicycle 2tSpb14o7SA_0 person 2vF8Va9DGSM_5 bicycle 2vF8Va9DGSM_4 bicycle 2vF8Va9DGSM_14 bicycle 2vF8Va9DGSM_15 bicycle 2vF8Va9DGSM_2 bicycle 2vrbssf2sDM_0 truck 2v808Hn8_do_0 person 2v808Hn8_do_1 person 2yEUVUqYMPc_0 giraffe 2ya3SN5pLyU_0 car 2065vf90oIM_0 person 2065vf90oIM_1 person 21GQbN_4k9M_0 cow 21Hp5g5RrOc_1 person 21Hp5g5RrOc_0 person 22iFltXYCcQ_0 cow 22ztStWwd8g_0 train 22ztStWwd8g_2 train 22ztStWwd8g_3 train 23qU2q5u0OE_6 bird 24Zxq5TuxzI_0 cow 26kWe8Ikgxk_0 bird 28AecePdVok_0 truck 281z-ZLrI3g_7 bicycle 281z-ZLrI3g_4 bicycle 29bWSLuiEl0_1 person 2_R2wz82ugQ_0 umbrella 3A4oCDgMkHw_0 cow 3A-dEIjnmyE_1 skateboard 3Bag9o-z-Ks_4 bear 3DN2iQJzM-k_0 train 3DaASBRARLQ_0 cow 3D8wwibqkYo_0 cow 3EtIKWgGaKY_0 person 3FJ4ZWRq_S0_0 person 3GLXlSuXWcs_1 cow 3GQxmRKhMMY_1 airplane 3GQxmRKhMMY_2 airplane 3GQxmRKhMMY_3 airplane 3GQxmRKhMMY_4 airplane 3GULyU-IOhA_0 person 3HFqP9a97kA_0 bird 3IgOwKkKALw_0 cat 3LruhG4SULI_1 truck 3LruhG4SULI_2 truck 3LruhG4SULI_7 truck 3LxUuC1C4y8_0 bird 3L7LWpMShiw_0 skateboard 3L759GhRx6M_0 person 3MiM8HSul5A_0 cow 3MiM8HSul5A_2 cow 3MiM8HSul5A_4 cow 3M9T5RFr_9s_0 person 3OmdALGspY8_0 person 3O4ynxtRIDk_5 train 3O4ynxtRIDk_2 train 3RLrjX-XB98_0 person 3RhgYReCxjo_0 bus 3S-lQgiUWVU_1 horse 3S-lQgiUWVU_0 horse 3UDEQElT2yQ_0 train 3WhmVhG1ZwU_0 boat 3WrB7zPpcHU_0 cow 3XDvXaNmGpM_0 dog 3XDvXaNmGpM_1 dog 3X29L9uQCqc_0 train 3X29L9uQCqc_1 train 3Y7-acGE4Wc_0 person 3ZBYYBUfT6E_0 train 3Zwa4XoeZcA_0 person 3bSWlbx1o3I_2 bear 3cOMDXFxcOQ_0 cat 3dvUlr2yxz4_0 train 3g4c88ocJ38_0 skateboard 3hMszgfh_qA_0 bicycle 3hR78-EVNEE_0 truck 3jdK8UPhpO8_1 skateboard 3jdK8UPhpO8_0 skateboard 3kdpeeQ1Jnc_0 car 3kd_QEZRUWc_1 truck 3kd_QEZRUWc_5 truck 3lHqsoi5cgo_0 person 3liK-2EflUk_0 car 3mIRDwcY1Lg_1 person 3m5eMVv4z6w_1 bear 3nD6nhJtxIU_1 skateboard 3nbim5nlANI_1 horse 3q6LFZBelUs_0 person 3rSUjqH5Wlw_0 truck 3sEpU7UoQP8_0 person 3sg9txiHCp0_0 bear 3szPqA1S6P0_0 person 3tv_dUR84cE_1 airplane 3tv_dUR84cE_0 airplane 3uG4S1gvMxs_0 bird 3uVS_DAYfvY_3 car 3vuykX663QA_0 person 3wI_ureHDBY_0 train 3xLvnY9w5y0_0 person 3xy8Fz8Nsgk_0 bear 3zV0wmpiS78_0 person 3zccg30U6vs_0 person 30AwDyYIr7o_0 skateboard 325FEWXtOYw_0 person 3293hM-lzx8_0 person 32_1y90B5eQ_0 person 34L4iiCFTXM_0 airplane 34Pma_R21A8_2 person 34jFMRay1zg_0 person 35-MplWeZYQ_0 motorcycle 36zopo-HS48_0 person 38fx_nvlYDE_0 truck 39yxd86tGLU_1 boat 3-ugxoEDuFY_0 person 3_DeqcBRuwE_1 elephant 3_DeqcBRuwE_3 elephant 3_w3NNPGotM_0 person 4ARhlapmEmI_0 dog 4Ac5edN3qIA_0 elephant 4Ac5edN3qIA_1 elephant 4BItGVIP3_w_0 cow 4BItGVIP3_w_1 cow 4BO3P7E3NDE_0 truck 4BO3P7E3NDE_1 truck 4BO3P7E3NDE_2 truck 4Bw4gKDBQCM_1 dog 4C8rmAORSg8_0 person 4Dcg1W7RRmQ_1 train 4ENxW7OPynQ_1 car 4ExA1FWRfMM_0 dog 4FVfzA07rVs_0 person 4FVfzA07rVs_1 person 4GgzQqhrTmA_0 train 4GrMZIyjUdo_0 person 4IUjw1DfTd4_0 cow 4ItJTYAUV3Q_0 cat 4IxmhmTsSRM_0 person 4I72WJJrc1o_0 person 4I72WJJrc1o_1 person 4KFEzxXCjmw_0 car 4KYtNfb0-64_0 person 4KqP6ylUZpI_0 umbrella 4LHOLAPnjV8_0 boat 4LXlXP1epJE_0 person 4MFPOb36tfo_2 bear 4MFPOb36tfo_1 bear 4MZrjdSF01s_1 boat 4Me3lyNuZ7k_0 person 4M9sKAzevzo_0 train 4NI5ycFo2TA_0 airplane 4NI5ycFo2TA_1 airplane 4NKnUR1OMGo_0 horse 4NKnUR1OMGo_1 horse 4Ng6OxFQ9RY_3 bear 4Nx45ho9gSg_0 person 4PNJ3ZV4f8E_0 airplane 4PNJ3ZV4f8E_1 airplane 4PNvdZPZIdM_0 train 4PhakAK74GE_1 motorcycle 4PxLGSy75rk_2 knife 4QOhfEMrhzU_0 airplane 4Q0M6mWNDiU_0 horse 4RhaYtFsnGY_0 person 4SrP2aSHoRk_0 person 4TyWpb19rk4_0 umbrella 4U9sm_eqKTM_1 car 4U9sm_eqKTM_2 car 4Xd_k2REw4I_3 bear 4YRd-9lHLko_0 truck 4ZIgGDQB_R0_0 airplane 4ZYWcd-Fdzg_0 person 4Zxsg6aJ9tA_0 person 4aOWHpM7rOM_0 skateboard 4avaoLry8L0_2 skateboard 4bHGieqZfUk_1 knife 4duFrAfYG8k_0 person 4d6P5umc9j0_0 bird 4fIznTWAFRw_0 horse 4fIznTWAFRw_1 horse 4fIznTWAFRw_2 horse 4f_X4WbQu4M_0 elephant 4hCLCX2lLGk_0 person 4iBMfS5mIt8_0 bird 4ibKNzoA1tQ_0 truck 4igLFns238c_0 motorcycle 4kGNxHIXcUA_0 person 4kLhVZ9UGDE_0 skateboard 4lC7BU1eHxc_0 bus 4l683stlRno_0 knife 4mv1Nx0j3k4_0 person 4nz8CN4XlBE_0 dog 4oWXZIsPnEg_4 elephant 4ofuHARhFlQ_0 person 4pYH5Cm7Vkg_1 boat 4p3JGxvfiNE_4 bicycle 4p3JGxvfiNE_8 bicycle 4p3JGxvfiNE_10 bicycle 4qBYTh0AcfM_0 train 4qIx-9Qs3Zs_0 airplane 4qIx-9Qs3Zs_2 airplane 4qRkIra0ARM_0 person 4rhkfDV0QC8_1 truck 4ry_MJjFDUA_0 cat 4skAfQd8nX8_0 person 4t79zNxVi0Y_0 elephant 4t79zNxVi0Y_1 elephant 4uFHcf-qpkU_0 horse 4uwly-P5oxg_0 person 4uwly-P5oxg_1 person 4u7pm-h8fiE_0 person 4wox28JkSKY_1 person 4w3ykGq-Q_E_0 bicycle 4w3ykGq-Q_E_2 bicycle 4w5q5RdJ5g4_0 horse 4w5q5RdJ5g4_2 horse 4w5q5RdJ5g4_4 horse 4x80RbpjCPM_0 bear 4x80RbpjCPM_4 bear 4yFIyyevEVY_1 airplane 4ycylGSteiU_0 truck 4yjvwunpMKI_0 car 4yjvwunpMKI_1 car 4yjvwunpMKI_2 car 4yw2hFyx47Q_0 person 4y3qJAq5ap0_0 car 40QgDL4dxrc_0 airplane 40deMboVqPI_1 bird 44FNsfkuWOI_0 elephant 44hlNbUHL2c_0 person 44672wUoOwM_0 person 46NXMVbpzZw_1 boat 468w3XkLHwc_1 boat 47Nn3ywWOlU_1 person 47cBD-Sq9mw_1 person 48ujtCaCdX0_0 person 49CwzbRIUpI_1 bird 49a6EgDu-ZU_0 truck 4-GpBan9Z8s_0 horse 4_A8f6NAa3w_0 person 5BHekdOG9JA_0 elephant 5Bw22C4nsb4_0 train 5CPZUe4hn0M_0 airplane 5DS23LkFit8_0 cow 5DVU9wTDzN8_0 skateboard 5DjSsYt5N4Q_0 skateboard 5FAbvaslTQE_0 motorcycle 5FXOzzaKrcw_0 airplane 5Fro7Bo628Y_0 boat 5FxLl3jd7I0_0 skateboard 5F5fgLUXow8_3 car 5F5fgLUXow8_7 car 5F5fgLUXow8_8 car 5F5fgLUXow8_0 car 5F5fgLUXow8_1 car 5F5fgLUXow8_2 car 5F5fgLUXow8_4 car 5GMISyAZA9o_0 horse 5GpziDmwRTc_0 cow 5JPqrGj3CgM_0 giraffe 5Ko6ZHOz4IY_0 person 5Lbguv7FGLM_1 bird 5M7Wx_HJ_XQ_0 person 5Nz4g-YykuI_0 person 5O41yfenxMM_1 cow 5PeDI6XI7is_3 horse 5Qd986abGHo_0 person 5Tza7UHp3xE_0 train 5WTw98UVUCo_1 horse 5WpjuP9uJrI_2 bird 5W8Hg8uhxgQ_0 car 5W8Hg8uhxgQ_1 car 5XEAIdyb_ng_0 person 5XcopMzRch4_0 skateboard 5YbA5Uw-5xQ_0 person 5YbA5Uw-5xQ_1 person 5bIO0Gl25u0_1 boat 5bIO0Gl25u0_0 boat 5dGbxAkTDPM_1 cow 5dRnssv_jug_0 cow 5eRQh3Rv1Lk_0 horse 5eak0nLYZC0_0 airplane 5enKNMe1Dpg_0 person 5eq6WBGMyME_0 giraffe 5eum6r7kxbw_1 giraffe 5eum6r7kxbw_4 giraffe 5e84K5OEIj4_0 person 5fXoyIBk_gI_0 person 5gNgZQ0nDW8_4 knife 5gNgZQ0nDW8_5 knife 5gNhZJMFmis_0 bear 5gNhZJMFmis_1 bear 5gbLo2hItTs_0 person 5geZjQ9qAJU_0 motorcycle 5iDhgUX1kdc_0 person 5iwoWJK4GGo_0 car 5ll8fjNhIzg_0 person 5lv2GCs3_E0_0 person 5l9rlcuS7pE_0 bus 5mocfP3c3JE_0 bear 5mqvNWXtMCU_0 cat 5nAuDbKmWLY_0 elephant 5nC2ZXfE-sg_0 train 5nkh3PK6lBs_0 cow 5of5t38DQL4_0 cow 5okxoIw3cJI_0 skateboard 5ovlgihl130_0 knife 5phhj08_8hI_0 dog 5psIBlFu-yQ_0 person 5rh7nf5z_O0_1 cow 5rkM4mLsQoU_0 knife 5sIj93XnVc0_1 motorcycle 5sjUnvABkko_0 airplane 5s4kqURLLo4_0 person 5toRpAYrY_4_0 person 5uYObEyAbCQ_0 horse 5ukcjpXOopg_0 person 5vPXxAEGTrw_0 airplane 5vUtusnPXXs_0 bird 5vaBUAh4HkU_0 airplane 5yMeqHPiJgY_1 horse 5yMeqHPiJgY_2 horse 5yMeqHPiJgY_3 horse 5yeSANffSRk_0 person 5yeSANffSRk_1 person 5zJuhMtO1F8_0 bird 5zKtWxffw-0_0 boat 51rDJW0FO8w_0 horse 51yQTVmaMXw_1 motorcycle 52UjkVxSSHg_0 person 52VFNDCXUHg_0 person 52pNzl4wrxs_0 person 52wdqvYrGv4_0 person 522wkm19sH0_0 bus 54icMYqqx_w_1 bus 55H1IVgQj3E_0 boat 56BI7lH0z1g_0 person 56bgv0J-cXw_1 knife 56bgv0J-cXw_4 knife 56r2wDCnuQQ_0 horse 57BY7QjcYbQ_0 person 574FA_5qp-s_0 bus 58K_ZPS7U8M_0 person 58gdyHWU6do_1 truck 5802XdQdAkU_0 cow 59JJGcB2jRE_0 horse 59JJGcB2jRE_4 horse 59JJGcB2jRE_2 horse 59cXOQc39JI_1 zebra 5928Zhy26yI_1 giraffe 5-Oeo8tmauc_0 bus 5-Oeo8tmauc_1 bus 5-Oeo8tmauc_2 bus 5-O2xma48Tw_0 bird 5-y_Rrr8shw_2 person 5_njhyGAXdE_0 truck 5_njhyGAXdE_1 truck 5_njhyGAXdE_2 truck 5_2sGSrZblY_0 person 6AD9GHHEVkE_1 boat 6AYkCla5Oak_0 car 6A2LC4_gts4_0 person 6A2LC4_gts4_1 person 6BB65BA-pS0_1 knife 6CKS3WJRpHI_0 person 6C1C-L7L6CE_0 person 6DQ-H73b62Y_0 person 6EHcwJiML3g_2 person 6GlBa-DUEqc_0 person 6HlTwF1ZDkc_0 person 6HrWOx9GfzI_0 person 6JrhpITR8po_1 cow 6JrhpITR8po_0 cow 6KpKxtwB1Ww_0 person 6LiW0KF3fME_0 person 6Meaw8zK8sU_0 person 6M3wDWZDZJ8_0 car 6M4oJG9NsRM_0 person 6Nc1z3BVzlI_0 bear 6OlxDr5vZuI_2 horse 6Ona04rOyZk_0 cat 6PBKPTCkWOo_0 person 6PH-mFChsi0_0 airplane 6PwE6q6pebc_1 person 6QFs4uNsSt4_0 person 6RIFox7kLqY_0 cat 6SBj14dkVPM_0 cow 6SdX0oE9Qm8_0 cat 6SizSdOT9_k_0 horse 6TEQ098RfzE_0 cow 6TQ8X9G4BAY_0 dog 6UQbOOWv_ws_0 cow 6UQbOOWv_ws_2 cow 6XUe2u2YWkQ_2 umbrella 6bJPo4tzJvQ_0 person 6bco275PcUs_0 truck 6bco275PcUs_1 truck 6gwBOlfJ34I_1 skateboard 6gww5ltOLQY_0 bird 6gww5ltOLQY_1 bird 6hAG7632JjA_0 cat 6htKDjHsXPQ_0 cow 6id5A0aiJbE_0 train 6jwTUZocHXY_0 horse 6j07-PcNv70_0 truck 6kjb3q8EygI_0 elephant 6lAxaY4AYB8_0 person 6lPPfWdeBvU_0 cat 6l3SpVgqJY0_0 person 6mYi-vXre4Q_0 truck 6med3JZ2k40_0 person 6miVJWDTBCY_1 train 6n6fVeWD_m0_0 knife 6o61j0KZ9cA_0 person 6pPjKIlVlfY_0 bicycle 6pnenPlFGIc_0 motorcycle 6pnenPlFGIc_1 motorcycle 6pny8Td3Lvs_0 horse 6qRIuIHqJco_0 train 6qSDUh2ES7Q_0 person 6qVpY1VC2hU_1 cat 6qhp1FiVbBQ_0 knife 6rlBtCRp25g_0 cat 6r0rYZCL4Qc_0 person 6r0rYZCL4Qc_1 person 6uMmknjq0mg_0 bicycle 6uSZqFsKMGI_0 cow 6um2PoiKfT4_0 motorcycle 6vAGEaKFuyY_1 bus 6vAGEaKFuyY_2 bus 6vafM_LKdhA_0 umbrella 6vc8u4MPWkY_0 bird 6v_NKAM10sA_5 bicycle 6v_NKAM10sA_9 bicycle 6v_NKAM10sA_10 bicycle 6v_NKAM10sA_11 bicycle 6v_NKAM10sA_12 bicycle 6v_NKAM10sA_0 bicycle 6v_NKAM10sA_1 bicycle 6w-nwNFVYm8_0 motorcycle 6y78kiGuIAk_0 person 6zPET0HFVaM_3 train 6zPgsocp4bY_1 bicycle 6zPgsocp4bY_2 bicycle 6zPgsocp4bY_3 bicycle 6zPgsocp4bY_7 bicycle 6zPgsocp4bY_9 bicycle 6zW1omjPFRs_0 elephant 6zW1omjPFRs_1 elephant 62MEsd3U1aQ_0 person 62PpG0cOcbU_0 person 63vKOQ-SCBw_0 airplane 63_kFJCm2pQ_0 person 64yGcACuF0g_0 cat 64yZxDGH92I_0 person 64-njkqyF7k_0 bus 65u4BXZ10RY_0 dog 65u4BXZ10RY_1 dog 654ylXfWndU_0 boat 66HPgc7Up3o_6 horse 66HPgc7Up3o_3 horse 66HPgc7Up3o_4 horse 66HPgc7Up3o_7 horse 66N_Ju8hg2U_0 knife 665JKK-JrTc_0 person 67kix34dj7A_0 truck 67wgEifQYpg_0 person 68KnEa1hVf8_0 bicycle 6-Z9S0qy8ys_1 dog 6-7x1BQGuQE_0 person 6_nq4o_21CY_0 elephant 7BBHz6wfABM_0 person 7CYm8WQftfw_0 bus 7DIXCjEBWLw_0 airplane 7D-ypPzaTDI_0 person 7GvsFRhnxWc_1 bird 7G2sXxpbA-0_0 motorcycle 7HXox1j1X2A_0 person 7Hthj7LhsoI_1 elephant 7H1AhHiyip0_0 person 7JXhfaNTsUQ_2 bird 7K61aiu3UsM_0 person 7K61aiu3UsM_1 person 7LKG4ReUlZA_0 person 7LTKFUY3Xo8_0 bird 7MQZWaHzUOo_0 cow 7Mb_dcvNENM_7 bicycle 7Mb_dcvNENM_3 bicycle 7Mb_dcvNENM_4 bicycle 7Mb_dcvNENM_5 bicycle 7Mb_dcvNENM_6 bicycle 7NDhXBp57BY_0 person 7NFMDZwqdw4_0 person 7Ng49Wed4Y4_0 cow 7Ng49Wed4Y4_2 cow 7NxvW5DSQrI_0 cat 7O8grUKQopY_0 person 7PeZgsBNi5g_0 car 7QauV6mvt98_0 car 7RxzfGFIxSg_0 cat 7Strg7qJtW0_0 elephant 7Strg7qJtW0_7 elephant 7Strg7qJtW0_1 elephant 7Strg7qJtW0_2 elephant 7Strg7qJtW0_3 elephant 7VQ8QZRnxD8_0 cow 7Vcfkjk--Fc_1 dog 7V5Q7Te4KNI_0 bus 7WZRhdW3Ysw_0 elephant 7XQ-ufhX7gc_0 cow 7XQ-ufhX7gc_1 cow 7YCox5adS-U_0 person 7YQM-nFSHW4_0 knife 7Ya_jh9VO9U_0 person 7aTla4KAK_U_1 knife 7bqlApH5GwI_1 bicycle 7dFEYp-1Hgo_0 person 7e8WNmzDHUQ_0 person 7fF7heSCMTw_0 motorcycle 7fRxyCT-Wao_0 giraffe 7fRxyCT-Wao_2 giraffe 7fSMUG5W8vk_2 bicycle 7g8SI9aAn70_1 umbrella 7hIJP5KExbE_1 elephant 7hjOcuaQm7I_0 elephant 7kPsaqRQBCk_0 knife 7kl1hNW3aVs_0 motorcycle 7k7H9RKhOF8_1 skateboard 7k7H9RKhOF8_3 skateboard 7ledBa3nuVs_0 train 7ledBa3nuVs_2 train 7m98zjjFHbU_0 person 7ntsSm-LFZA_0 person 7ntsSm-LFZA_1 person 7nzY38tPTM0_0 person 7nzY38tPTM0_1 person 7n8C_td0Th8_0 horse 7p4RxRFB_Eg_0 horse 7rE5dIroJwQ_0 person 7rifGM-TuPA_0 horse 7trl2U6nLPc_0 horse 7vyHv7_GxbQ_0 person 7wte1pPBwQ0_1 bear 7w616uMnI_8_0 elephant 7w616uMnI_8_1 elephant 7x8K4JervhE_0 bus 7y0joj813H0_3 bus 7zRaB-2B7B0_0 train 72RzEHZFYtM_2 airplane 72RzEHZFYtM_1 airplane 73Wonc3xnLI_0 person 73Z4KnnAMlU_0 person 74gRlu6vJLY_0 person 747bRdBUPSw_0 person 76LU6w1a7UA_1 airplane 76PIBEC3WVo_0 skateboard 77GychcVDRI_0 person 77dvi_3OU4M_0 person 79MY0qku9uc_1 horse 8AgZqrCi9no_0 horse 8BK44tI3ACo_0 person 8BQJVHpHFsU_1 dog 8BQJVHpHFsU_2 dog 8B3bbakza_Q_0 person 8CJRCoA1Rps_0 person 8ClOgfNAjXs_0 giraffe 8DlXcc1IXlw_0 car 8EwDzFi34nA_0 cow 8FEp5ORJ27g_0 truck 8FyuS809d24_0 dog 8FyuS809d24_1 dog 8GGi0BXLCaM_0 person 8G_vBzM-Ws4_1 umbrella 8HcyzPUv5ag_0 person 8JIpa6tfWzo_0 airplane 8JKJnuN_UTI_0 cow 8JhHIO_7m-0_0 cow 8LGnOH6nDbc_0 dog 8LGnOH6nDbc_1 dog 8Lx004yCltY_6 elephant 8Lx004yCltY_12 elephant 8Lx004yCltY_18 elephant 8MO_kng7L-s_0 person 8MO_kng7L-s_1 person 8NlznvdsNJQ_2 boat 8N8hB2Au4JE_0 person 8Pbd3dd3v5E_0 person 8Pz3xq3KFo0_6 elephant 8Pz3xq3KFo0_4 elephant 8Qr-5_567tI_1 truck 8Q8g9z-DNF8_0 motorcycle 8RZsKbffdqI_0 cat 8Sbz2MGzhp4_0 person 8UcqXCLmq-M_1 elephant 8UcqXCLmq-M_3 elephant 8UcqXCLmq-M_6 elephant 8UcqXCLmq-M_7 elephant 8Ul_lS0g_RU_0 skateboard 8UmKRVMR08g_2 bird 8U7BmrkcgcU_2 truck 8VkbfdMQrR8_0 person 8VzjERSpeS4_1 elephant 8VzjERSpeS4_0 elephant 8WcBoYh-IMg_0 bird 8X27eyH-tx0_0 car 8Zi2bsTpMeY_0 person 8ZmfZDMaVhg_0 cat 8Z1GvAHPEnU_0 cat 8a1bD-UgfKE_0 truck 8bD-aqWPxwM_0 motorcycle 8bE_FhrjBuM_2 skateboard 8bE_FhrjBuM_0 skateboard 8bE_FhrjBuM_1 skateboard 8bypIjdKgEI_0 person 8b5fedIr-WQ_0 person 8cNzCe26dSM_0 person 8cSOpd9gaPE_0 cow 8c8TJ_Jzngk_0 horse 8d6950aGpD8_0 dog 8eK3ktD9j5o_0 horse 8eK3ktD9j5o_1 horse 8ewNcrMhg-w_0 person 8gsiG2Wu3YM_0 giraffe 8hFEJz0GvfU_0 elephant 8hwa44VMdLs_0 person 8h8Cpkugo-Y_0 elephant 8h_eY7zEIqk_3 truck 8iBiHoA_OJk_0 person 8jRFQ8RKZ0s_1 car 8kTREwiI1-8_0 cow 8kn6PJbtsyA_0 bicycle 8kn6PJbtsyA_1 bicycle 8kn6PJbtsyA_2 bicycle 8kn6PJbtsyA_3 bicycle 8kn6PJbtsyA_4 bicycle 8lKXEr2W3yM_0 knife 8lMRKCKyBwk_0 person 8lonNtE99PI_1 person 8l7UmXXnAJs_0 truck 8mlHevSC8cc_0 car 8m-GtOBjbzY_1 bicycle 8nWSGwlJyPQ_0 cat 8nsl-r_i0AI_0 person 8n3A8io4GNU_0 person 8okfUuO0Pvc_1 bird 8poWB-6q4xk_1 bicycle 8p2saqn2kiQ_0 person 8qFJg_AoKeY_0 cow 8qulLm8MYrM_0 bus 8rBxRMDJEFY_0 person 8sOWPIfWpCM_0 horse 8tKto2zQWUg_0 elephant 8uoYlmdJlAo_1 knife 8wdvLn40CTk_5 bus 8wdvLn40CTk_0 bus 8wdvLn40CTk_1 bus 8wv3WJBJmog_1 dog 8yFZUTSjpos_0 motorcycle 8zBx-nHUqBY_0 person 8zUAF30Hu6c_1 train 8zUAF30Hu6c_2 train 8zftjn0I9TQ_0 truck 8zftjn0I9TQ_2 truck 8zjgYuK3nVY_0 person 8z-YLOzAxb4_2 bicycle 8z-YLOzAxb4_4 bicycle 8z-sTr28AWk_0 skateboard 80CcMFD-Rcw_1 person 80CcMFD-Rcw_0 person 81cNVk8boEM_0 person 82lK9rB-e08_1 motorcycle 84P6L_HrN48_0 bird 88N5__h7Zdo_0 bicycle 89a461_gh2o_0 bicycle 89mGhzBokZ8_1 bear 89qfsC77BYk_0 person 8_oUj2cuPdo_0 dog 9A-VO1zCZJ4_1 motorcycle 9BVgbNz-bi8_0 person 9BVgbNz-bi8_1 person 9BpvtvUGG5g_0 person 9DGpFjuUVBk_0 person 9DY0dTRH5xI_0 bird 9D5ORdC7BuQ_6 bus 9ELQq5BMR1U_0 person 9E8VBIYmTGY_1 cow 9E8VBIYmTGY_0 cow 9FAB9BrcQls_0 person 9FTOvdcnzDQ_0 airplane 9GdhKEBm0pA_6 bicycle 9GdhKEBm0pA_1 bicycle 9GdhKEBm0pA_3 bicycle 9HqapwdLVzk_4 knife 9KfdTsjy53o_0 truck 9LHbQA-pT0U_2 horse 9LJRUmW_AII_0 boat 9LOpNoTFWKg_0 truck 9LOpNoTFWKg_4 truck 9LOpNoTFWKg_1 truck 9LOpNoTFWKg_2 truck 9LqExSHe9y8_0 knife 9Ls7gSZQt1w_2 bear 9NsmnTdRiik_0 airplane 9PsezNNV0Jc_1 airplane 9PsezNNV0Jc_2 airplane 9PsezNNV0Jc_0 airplane 9Q3srzApSJU_0 person 9RGlWjTKvE0_0 bus 9RZCK24Shec_0 cat 9ScZtgWAJZA_1 person 9SgrA5Q1d94_0 person 9ShZpsmuvc4_2 skateboard 9ShZpsmuvc4_1 skateboard 9UU2h6M8DJk_2 truck 9UwLiWKOIGY_0 person 9U-tccGetsk_0 knife 9VwSYjCCRYk_1 truck 9VwSYjCCRYk_2 truck 9WDPvYpnrfU_1 truck 9WDt0JjOFIA_0 person 9YVkZ7QxD5E_0 person 9Y6XZFO31JU_0 cow 9ZpZZoTtySo_1 bear 9Z0Jz1tesQ4_4 cow 9Z0Jz1tesQ4_1 cow 9Z0Jz1tesQ4_2 cow 9Z0Jz1tesQ4_3 cow 9aQOAnspXGo_1 bird 9bYPYgMQVjU_0 person 9bzmQFGK8m8_0 person 9dOPPvgyMqk_0 person 9eI_0DoOE08_0 person 9eI_0DoOE08_1 person 9g8o260G10k_0 bird 9hAU80xKWy0_0 truck 9jS5MThAtmo_0 person 9kGuuCx39JA_0 motorcycle 9lsXenPJ-X8_1 bird 9ltdzlYXfp8_0 cow 9ltdzlYXfp8_3 cow 9muklrcigJY_0 dog 9nqU8e9IUPU_0 skateboard 9pEB8cjvPSQ_1 horse 9qamzN9bwxw_0 person 9rvVWyyuud0_0 person 9r1FvK19XV8_0 person 9uhZRDsQKnc_0 person 9yt1if13PHk_0 elephant 9y5txKR57mc_0 bird 9zBCjCtH3Eg_0 horse 9zqk5w8Qx1Q_1 bicycle 9zroWMwZHGI_1 person 907A5I4-LpA_0 motorcycle 91SWvU-5TcI_0 person 92MaWPuO8PI_0 boat 92560YiwSP0_0 person 93gyPa_dPGU_0 truck 946wiAK4Seg_1 person 95CV_olHtcI_0 person 96WWGXa4QrI_0 car 96akJFw5SPU_0 truck 96iqXHgOXKY_0 person 98XiF-Z__aI_0 cat 99Tb7HSFn3I_0 person 9_bFE0FUq_c_1 knife -A-tBuMjU8s_0 cat -B4YQQLrOfI_2 skateboard -C0rYHhL_x4_0 motorcycle -DYf49hlRSE_0 person -Ebcfmg0-eE_0 person -E05a-eQSwY_0 umbrella -FMaVn21dYU_1 horse -Fu9coX9J-A_0 person -Fu9coX9J-A_1 person -Gk4iMiEMCc_0 person -LVtIbelA3M_0 horse -LXr7LdXtrk_0 boat -LjAFTF5WP4_0 bicycle -LjAFTF5WP4_1 bicycle -LjAFTF5WP4_3 bicycle -MpLPuviQ00_0 person -M_jT3EYgcc_0 person -NWvB2g952Q_2 bird -OZt785bbpY_0 airplane -P37Y1G6oHk_2 airplane -P37Y1G6oHk_3 airplane -P37Y1G6oHk_0 airplane -QBeUV_OkJg_1 dog -QQCINzsXpw_0 person -Q6g2xZ0PxY_1 airplane -RjxMfaV-Vo_1 knife -RjxMfaV-Vo_2 knife -SPHavKGd3M_0 skateboard -S8L2HACCPE_12 elephant -S8L2HACCPE_1 elephant -S8L2HACCPE_10 elephant -TKKOo1FfAI_0 bird -VgWHKeRRjs_0 airplane -VgWHKeRRjs_1 airplane -WyEyKxdZOQ_0 person -XWeGpACKwc_0 skateboard -Xj6MiGVWt0_0 person -XwZnoNm0FU_0 dog -ZDO95E0pl8_0 person -anX-ad_gHQ_0 person -avz2OsPIq4_2 bicycle -bJkl4q5f-A_0 bird -c1b7nHzGn4_0 airplane -dQnNlBQp3o_0 person -db_SToBhkg_2 motorcycle -eZUdm8ERQQ_0 person -e42Pb0YeOY_0 cat -fnhznKC3CU_0 person -f0JLwuyuTM_0 person -jL0HOXwYls_0 person -kLIF2a7yeU_0 person -k1TxEpOgnA_0 person -l9NS6DuRPI_0 person -mgNwLW3ODc_0 person -mwDgqLpu-k_0 skateboard -nOfuA8B7As_1 bicycle -nzXunuZac4_0 cat -oG6YVPhC_I_0 horse -o28rb1UnYA_0 car -sJOJNjOCBI_0 motorcycle -sJOJNjOCBI_1 motorcycle -sWch1rnO10_0 person -th9NS9hl6s_0 cow -uP01llwXFY_5 boat -uP01llwXFY_1 boat -u5MNR-9ClU_0 person -vkMKVuweFA_0 person -v7FXEhgwtE_0 person -y652b4w3Ss_0 bird -zqHD6Jthqg_0 person -0U1vm6LIi8_0 person -1je1K1ihbk_2 skateboard -2iw3MzUP2Y_0 motorcycle -3OvKcu5P2U_0 car -3fzr21Ov5w_0 person -6vJDV8XnWE_0 boat -7Im8MyvaXU_0 cat --8shIp3t0I_0 knife -_iBuJTwjw8_1 horse -_xag4X_Do0_0 bird _ATEx5gbBEQ_0 knife _ATEx5gbBEQ_1 knife _AcvI8VF5ig_0 cow _Ae4vmwt8uA_0 person _Auvs-o5Pck_0 truck _A8nA25Tq8c_1 person _C_yvxdjVGA_2 horse _C_yvxdjVGA_0 horse _DXAxnPIiBU_0 cow _D-9w3aSX50_0 person _GyE3cPQ6U8_0 car _HN1_MjnjWo_2 elephant _HYaLoOKE84_1 cow _IhkqtAQHBw_0 train _InrHPE8Umw_0 motorcycle _IpUnYit3Pg_0 dog _JNG6qK6INs_3 bear _KzDIvt0cCk_0 person _K6jYgDC1JU_0 airplane _NZ4o-omJLE_0 umbrella _NtOMcyVAp4_1 dog _OmnjH4t-IY_0 person _QF0A9B-xB8_0 person _QRy9nd4kcg_0 airplane _Q9M8QAjSMk_0 person _Rd-wEO2r10_0 person _R6nlDzh6Tc_0 person _R6nlDzh6Tc_2 person _T0O1BlYjaU_1 bear _VegkTdhrQE_0 motorcycle _WKJaPPBz8Q_0 umbrella _WcqTpLKkww_1 truck _Y6_E1l4blQ_1 knife _ZDU4qi4lcI_2 cow _ZDU4qi4lcI_0 cow _ZDU4qi4lcI_1 cow _ZHmkH59bCQ_0 person _ZXqLyRe4n0_0 elephant _ZsogS9uPJQ_0 person _akq_DieEWE_0 person _akq_DieEWE_1 person _bO2sdIelLY_0 person _dC_upYbxWI_0 knife _eCb7mFYyIg_0 motorcycle _egWujmdZtw_0 person _epdfuB0qRM_0 car _e5Vvy9DJ9E_4 bear _e5Vvy9DJ9E_0 bear _foK5Dvj1As_0 bird _hryEVGKNuw_0 horse _iY4AnGfq0Y_0 train _jBzwdg0QRA_1 bus _jci9tIBIB4_5 truck _kdhlRke8uI_0 person _kfdh_5bI-Q_0 person _lmD-useijU_0 person _mJBwuCegJ0_12 truck _mJBwuCegJ0_1 truck _mJBwuCegJ0_2 truck _mJBwuCegJ0_8 truck _mJBwuCegJ0_9 truck _oRtPVRmtwo_0 dog _pEHwWe2seA_5 elephant _sV1Jd1uiYg_0 person _tZU1XTOML4_0 boat _usyDpllGBo_0 horse _vBAv8cBoqE_0 skateboard _vV0wdWq0cU_0 person _xMVx44FbT4_0 horse _xQn3TupjYs_0 cat _xy58m6yCko_0 motorcycle _yQQjARqD1s_0 boat _yfoe4GCA0Q_4 airplane _yfoe4GCA0Q_2 airplane _yv5Cwbm9EA_0 person _zIDofZkgS4_1 truck _zQt1CSSKyA_1 bicycle _0eR2vQAEqE_0 elephant _0eR2vQAEqE_1 elephant _17u-cPTYt0_0 car _17u-cPTYt0_1 car _2mIWIhbDPY_0 bus _37U5Elgnck_0 person _5fE6dP48FM_0 cow _5sIT4l5izM_0 knife _6qUuUUYvUQ_0 person _7zbbqEa3nw_1 train _7zbbqEa3nw_4 train _8VTthFkvS0_0 bird _8iyumFI4sQ_1 elephant _8iyumFI4sQ_2 elephant _8iyumFI4sQ_3 elephant _81FImml2gk_0 dog _9bypka_Q4c_0 bus _-CvwC7H730_0 person _-XcxnQLKPM_0 dog __Q5A7gExpI_0 person ================================================ FILE: artrackv2_mindspore/lib/train/dataset/COCO_tool.py ================================================ __author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # annToMask - Convert segmentation in an annotation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>annToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import copy import itertools from pycocotools import mask as maskUtils import os from collections import defaultdict import sys PYTHON_VERSION = sys.version_info[0] if PYTHON_VERSION == 2: from urllib import urlretrieve elif PYTHON_VERSION == 3: from urllib.request import urlretrieve def _isArrayLike(obj): return hasattr(obj, '__iter__') and hasattr(obj, '__len__') class COCO: def __init__(self, dataset): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset)) self.dataset = dataset self.createIndex() def createIndex(self): # create index print('creating index...') anns, cats, imgs = {}, {}, {} imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print('index created!') # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print('{}: {}'.format(key, value)) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if _isArrayLike(catNms) else [catNms] supNms = supNms if _isArrayLike(supNms) else [supNms] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if _isArrayLike(ids): return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if _isArrayLike(ids): return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if _isArrayLike(ids): return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns, draw_bbox=False): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception('datasetType not supported') if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((int(len(seg)/2), 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = maskUtils.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) if draw_bbox: [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox'] poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]] np_poly = np.array(poly).reshape((4,2)) polygons.append(Polygon(np_poly)) color.append(c) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption']) def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print('Loading and preparing results...') tic = time.time() if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode): with open(resFile) as f: anns = json.load(f) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = maskUtils.area(ann['segmentation']) if not 'bbox' in ann: ann['bbox'] = maskUtils.toBbox(ann['segmentation']) ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print('DONE (t={:0.2f}s)'.format(time.time()- tic)) res.dataset['annotations'] = anns res.createIndex() return res def download(self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print('Please specify target directory') return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urlretrieve(img['coco_url'], fname) print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic)) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print('Converting ndarray to lists...') assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print('{}/{}'.format(i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle def annToMask(self, ann): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann) m = maskUtils.decode(rle) return m ================================================ FILE: artrackv2_mindspore/lib/train/dataset/__init__.py ================================================ from .lasot import Lasot from .got10k import Got10k from .tracking_net import TrackingNet from .imagenetvid import ImagenetVID from .coco import MSCOCO from .coco_seq import MSCOCOSeq from .got10k_lmdb import Got10k_lmdb from .lasot_lmdb import Lasot_lmdb from .imagenetvid_lmdb import ImagenetVID_lmdb from .coco_seq_lmdb import MSCOCOSeq_lmdb from .tracking_net_lmdb import TrackingNet_lmdb ================================================ FILE: artrackv2_mindspore/lib/train/dataset/base_image_dataset.py ================================================ import torch.utils.data from lib.train.data.image_loader import jpeg4py_loader class BaseImageDataset(torch.utils.data.Dataset): """ Base class for image datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.image_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_images() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_images(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.image_list) def has_class_info(self): return False def get_class_name(self, image_id): return None def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_images_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_image_info(self, seq_id): """ Returns information about a particular image, args: seq_id - index of the image returns: Dict """ raise NotImplementedError def get_image(self, image_id, anno=None): """ Get a image args: image_id - index of image anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: image - anno - dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: artrackv2_mindspore/lib/train/dataset/base_video_dataset.py ================================================ import torch.utils.data # 2021.1.5 use jpeg4py_loader_w_failsafe as default from lib.train.data.image_loader import jpeg4py_loader_w_failsafe class BaseVideoDataset(torch.utils.data.Dataset): """ Base class for video datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader_w_failsafe): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.sequence_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_sequences() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def is_video_sequence(self): """ Returns whether the dataset is a video dataset or an image dataset returns: bool - True if a video dataset """ return True def is_synthetic_video_dataset(self): """ Returns whether the dataset contains real videos or synthetic returns: bool - True if a video dataset """ return False def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_sequences(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.sequence_list) def has_class_info(self): return False def has_occlusion_info(self): return False def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_sequences_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_sequence_info(self, seq_id): """ Returns information about a particular sequences, args: seq_id - index of the sequence returns: Dict """ raise NotImplementedError def get_frames(self, seq_id, frame_ids, anno=None): """ Get a set of frames from a particular sequence args: seq_id - index of sequence frame_ids - a list of frame numbers anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: list - List of frames corresponding to frame_ids list - List of dicts for each frame dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: artrackv2_mindspore/lib/train/dataset/coco.py ================================================ import os from .base_image_dataset import BaseImageDataset import torch import random from collections import OrderedDict from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings from pycocotools.coco import COCO class MSCOCO(BaseImageDataset): """ The COCO object detection dataset. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None, split="train", version="2014"): """ args: root - path to coco root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() # the parent class thing would happen in the sampler self.image_list = self._get_image_list(min_area=min_area) if data_fraction is not None: self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction)) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): ann_list = list(self.coco_set.anns.keys()) image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] if min_area is not None: image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area] return image_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def has_segmentation_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def _build_im_per_class(self): im_per_class = {} for i, im in enumerate(self.image_list): class_name = self.cats[self.coco_set.anns[im]['category_id']]['name'] if class_name not in im_per_class: im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4,) mask = torch.Tensor(self.coco_set.annToMask(anno)) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.coco_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/coco_seq.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import torch import random from pycocotools.coco import COCO from collections import OrderedDict from lib.train.admin import env_settings class MSCOCOSeq(BaseVideoDataset): """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"): """ args: root - path to the coco dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the images will be used split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) # Load the COCO set. self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() self.sequence_list = self._get_sequence_list() if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_seq_per_class() def _get_sequence_list(self): ann_list = list(self.coco_set.anns.keys()) seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] return seq_list def is_video_sequence(self): return False def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def _build_seq_per_class(self): seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name'] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def get_sequence_info(self, seq_id): anno = self._get_anno(seq_id) bbox = torch.Tensor(anno['bbox']).view(1, 4) mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0) '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels''' valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, seq_id): anno = self.coco_set.anns[self.sequence_list[seq_id]] return anno def _get_frames(self, seq_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, seq_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, seq_id): cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] return cat_dict_current['name'] def get_frames(self, seq_id=None, frame_ids=None, anno=None): # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a # list containing these replicated images. frame = self._get_frames(seq_id) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0, ...] for _ in frame_ids] object_meta = self.get_meta_info(seq_id) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/coco_seq_lmdb.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import torch import random from collections import OrderedDict from lib.train.admin import env_settings from lib.train.dataset.COCO_tool import COCO from lib.utils.lmdb_utils import decode_img, decode_json import time class MSCOCOSeq_lmdb(BaseVideoDataset): """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"): """ args: root - path to the coco dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the images will be used split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO_lmdb', root, image_loader) self.root = root self.img_pth = 'images/{}{}/'.format(split, version) self.anno_path = 'annotations/instances_{}{}.json'.format(split, version) # Load the COCO set. print('loading annotations into memory...') tic = time.time() coco_json = decode_json(root, self.anno_path) print('Done (t={:0.2f}s)'.format(time.time() - tic)) self.coco_set = COCO(coco_json) self.cats = self.coco_set.cats self.class_list = self.get_class_list() self.sequence_list = self._get_sequence_list() if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_seq_per_class() def _get_sequence_list(self): ann_list = list(self.coco_set.anns.keys()) seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] return seq_list def is_video_sequence(self): return False def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco_lmdb' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def _build_seq_per_class(self): seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name'] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def get_sequence_info(self, seq_id): anno = self._get_anno(seq_id) bbox = torch.Tensor(anno['bbox']).view(1, 4) mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0) '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels''' valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, seq_id): anno = self.coco_set.anns[self.sequence_list[seq_id]] return anno def _get_frames(self, seq_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name'] # img = self.image_loader(os.path.join(self.img_pth, path)) img = decode_img(self.root, os.path.join(self.img_pth, path)) return img def get_meta_info(self, seq_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, seq_id): cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] return cat_dict_current['name'] def get_frames(self, seq_id=None, frame_ids=None, anno=None): # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a # list containing these replicated images. frame = self._get_frames(seq_id) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0, ...] for _ in frame_ids] object_meta = self.get_meta_info(seq_id) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/got10k.py ================================================ import os import os.path import numpy as np import torch import csv import pandas import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings class Got10k(BaseVideoDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None): """ args: root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split, not NOT the official got-10k validation split. To use the official validation split, provide that as the root folder instead. seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids' options can be used at the same time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().got10k_dir if root is None else root super().__init__('GOT10k', root, image_loader) # all folders inside the root self.sequence_list = self._get_sequence_list() # seq_id is the index of the folder inside the got10k root path if split is not None: if seq_ids is not None: raise ValueError('Cannot set both split_name and seq_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt') elif split == 'val': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt') elif split == 'train_full': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt') elif split == 'vottrain': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt') elif split == 'votval': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt') else: raise ValueError('Unknown split name.') # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist() seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze("columns").values.tolist() elif seq_ids is None: seq_ids = list(range(0, len(self.sequence_list))) self.sequence_list = [self.sequence_list[i] for i in seq_ids] if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.sequence_meta_info = self._load_meta_info() self.seq_per_class = self._build_seq_per_class() self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def get_name(self): return 'got10k' def has_class_info(self): return True def has_occlusion_info(self): return True def _load_meta_info(self): sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list} return sequence_meta_info def _read_meta(self, seq_path): try: with open(os.path.join(seq_path, 'meta_info.ini')) as f: meta_info = f.readlines() object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1], 'motion_class': meta_info[6].split(': ')[-1][:-1], 'major_class': meta_info[7].split(': ')[-1][:-1], 'root_class': meta_info[8].split(': ')[-1][:-1], 'motion_adverb': meta_info[9].split(': ')[-1][:-1]}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def _build_seq_per_class(self): seq_per_class = {} for i, s in enumerate(self.sequence_list): object_class = self.sequence_meta_info[s]['object_class_name'] if object_class in seq_per_class: seq_per_class[object_class].append(i) else: seq_per_class[object_class] = [i] return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _get_sequence_list(self): with open(os.path.join(self.root, 'list.txt')) as f: dir_list = list(csv.reader(f)) dir_list = [dir_name[0] for dir_name in dir_list] return dir_list def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "absence.label") cover_file = os.path.join(seq_path, "cover.label") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) with open(cover_file, 'r', newline='') as f: cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) target_visible = ~occlusion & (cover>0).byte() visible_ratio = cover.float() / 8 return target_visible, visible_ratio def _get_sequence_path(self, seq_id): return os.path.join(self.root, self.sequence_list[seq_id]) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible, visible_ratio = self._read_target_visible(seq_path) visible = visible & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def get_class_name(self, seq_id): obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] return obj_meta['object_class_name'] def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return frame_list, anno_frames, obj_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/got10k_lmdb.py ================================================ import os import os.path import numpy as np import torch import csv import pandas import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings '''2021.1.16 Gok10k for loading lmdb dataset''' from lib.utils.lmdb_utils import * class Got10k_lmdb(BaseVideoDataset): def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None): """ args: root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split, not NOT the official got-10k validation split. To use the official validation split, provide that as the root folder instead. seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids' options can be used at the same time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default use_lmdb - whether the dataset is stored in lmdb format """ root = env_settings().got10k_lmdb_dir if root is None else root super().__init__('GOT10k_lmdb', root, image_loader) # all folders inside the root self.sequence_list = self._get_sequence_list() # seq_id is the index of the folder inside the got10k root path if split is not None: if seq_ids is not None: raise ValueError('Cannot set both split_name and seq_ids.') train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt') elif split == 'val': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt') elif split == 'train_full': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt') elif split == 'vottrain': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt') elif split == 'votval': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt') else: raise ValueError('Unknown split name.') seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist() elif seq_ids is None: seq_ids = list(range(0, len(self.sequence_list))) self.sequence_list = [self.sequence_list[i] for i in seq_ids] if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.sequence_meta_info = self._load_meta_info() self.seq_per_class = self._build_seq_per_class() self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def get_name(self): return 'got10k_lmdb' def has_class_info(self): return True def has_occlusion_info(self): return True def _load_meta_info(self): def _read_meta(meta_info): object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1], 'motion_class': meta_info[6].split(': ')[-1], 'major_class': meta_info[7].split(': ')[-1], 'root_class': meta_info[8].split(': ')[-1], 'motion_adverb': meta_info[9].split(': ')[-1]}) return object_meta sequence_meta_info = {} for s in self.sequence_list: try: meta_str = decode_str(self.root, "train/%s/meta_info.ini" %s) sequence_meta_info[s] = _read_meta(meta_str.split('\n')) except: sequence_meta_info[s] = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return sequence_meta_info def _build_seq_per_class(self): seq_per_class = {} for i, s in enumerate(self.sequence_list): object_class = self.sequence_meta_info[s]['object_class_name'] if object_class in seq_per_class: seq_per_class[object_class].append(i) else: seq_per_class[object_class] = [i] return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _get_sequence_list(self): dir_str = decode_str(self.root, 'train/list.txt') dir_list = dir_str.split('\n') return dir_list def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt_str_list = decode_str(self.root, bb_anno_file).split('\n')[:-1] # the last line in got10k is empty gt_list = [list(map(float, line.split(','))) for line in gt_str_list] gt_arr = np.array(gt_list).astype(np.float32) return torch.tensor(gt_arr) def _read_target_visible(self, seq_path): # full occlusion and out_of_view files occlusion_file = os.path.join(seq_path, "absence.label") cover_file = os.path.join(seq_path, "cover.label") # Read these files occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\n')[:-1])) # the last line in got10k is empty occlusion = torch.ByteTensor(occ_list) cover_list = list(map(int, decode_str(self.root, cover_file).split('\n')[:-1])) # the last line in got10k is empty cover = torch.ByteTensor(cover_list) target_visible = ~occlusion & (cover>0).byte() visible_ratio = cover.float() / 8 return target_visible, visible_ratio def _get_sequence_path(self, seq_id): return os.path.join("train", self.sequence_list[seq_id]) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible, visible_ratio = self._read_target_visible(seq_path) visible = visible & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return decode_img(self.root, self._get_frame_path(seq_path, frame_id)) def get_class_name(self, seq_id): obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] return obj_meta['object_class_name'] def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return frame_list, anno_frames, obj_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/imagenetvid.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import xml.etree.ElementTree as ET import json import torch from collections import OrderedDict from lib.train.admin import env_settings def get_target_to_image_ratio(seq): anno = torch.Tensor(seq['anno']) img_sz = torch.Tensor(seq['image_size']) return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt() class ImagenetVID(BaseVideoDataset): """ Imagenet VID dataset. Publication: ImageNet Large Scale Visual Recognition Challenge Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei IJCV, 2015 https://arxiv.org/pdf/1409.0575.pdf Download the dataset from http://image-net.org/ """ def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1): """ args: root - path to the imagenet vid dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. min_length - Minimum allowed sequence length. max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets which cover complete image. """ root = env_settings().imagenet_dir if root is None else root super().__init__("imagenetvid", root, image_loader) cache_file = os.path.join(root, 'cache.json') if os.path.isfile(cache_file): # If available, load the pre-processed cache file containing meta-info for each sequence with open(cache_file, 'r') as f: sequence_list_dict = json.load(f) self.sequence_list = sequence_list_dict else: # Else process the imagenet annotations and generate the cache file self.sequence_list = self._process_anno(root) with open(cache_file, 'w') as f: json.dump(self.sequence_list, f) # Filter the sequences based on min_length and max_target_area in the first frame self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and get_target_to_image_ratio(x) < max_target_area] def get_name(self): return 'imagenetvid' def get_num_sequences(self): return len(self.sequence_list) def get_sequence_info(self, seq_id): bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno']) valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0) visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte() return {'bbox': bb_anno, 'valid': valid, 'visible': visible} def _get_frame(self, sequence, frame_id): set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id']) vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id']) frame_number = frame_id + sequence['start_frame'] frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name, '{:06d}.JPEG'.format(frame_number)) return self.image_loader(frame_path) def get_frames(self, seq_id, frame_ids, anno=None): sequence = self.sequence_list[seq_id] frame_list = [self._get_frame(sequence, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) # Create anno dict anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] # added the class info to the meta info object_meta = OrderedDict({'object_class': sequence['class_name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta def _process_anno(self, root): # Builds individual tracklets base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train') all_sequences = [] for set in sorted(os.listdir(base_vid_anno_path)): set_id = int(set.split('_')[-1]) for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))): vid_id = int(vid.split('_')[-1]) anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid))) frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0])) image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)] objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object') for f in anno_files] tracklets = {} # Find all tracklets along with start frame for f_id, all_targets in enumerate(objects): for target in all_targets: tracklet_id = target.find('trackid').text if tracklet_id not in tracklets: tracklets[tracklet_id] = f_id for tracklet_id, tracklet_start in tracklets.items(): tracklet_anno = [] target_visible = [] class_name_id = None for f_id in range(tracklet_start, len(objects)): found = False for target in objects[f_id]: if target.find('trackid').text == tracklet_id: if not class_name_id: class_name_id = target.find('name').text x1 = int(target.find('bndbox/xmin').text) y1 = int(target.find('bndbox/ymin').text) x2 = int(target.find('bndbox/xmax').text) y2 = int(target.find('bndbox/ymax').text) tracklet_anno.append([x1, y1, x2 - x1, y2 - y1]) target_visible.append(target.find('occluded').text == '0') found = True break if not found: break new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id, 'start_frame': tracklet_start, 'anno': tracklet_anno, 'target_visible': target_visible, 'image_size': image_size} all_sequences.append(new_sequence) return all_sequences ================================================ FILE: artrackv2_mindspore/lib/train/dataset/imagenetvid_lmdb.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import torch from collections import OrderedDict from lib.train.admin import env_settings from lib.utils.lmdb_utils import decode_img, decode_json def get_target_to_image_ratio(seq): anno = torch.Tensor(seq['anno']) img_sz = torch.Tensor(seq['image_size']) return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt() class ImagenetVID_lmdb(BaseVideoDataset): """ Imagenet VID dataset. Publication: ImageNet Large Scale Visual Recognition Challenge Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei IJCV, 2015 https://arxiv.org/pdf/1409.0575.pdf Download the dataset from http://image-net.org/ """ def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1): """ args: root - path to the imagenet vid dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. min_length - Minimum allowed sequence length. max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets which cover complete image. """ root = env_settings().imagenet_dir if root is None else root super().__init__("imagenetvid_lmdb", root, image_loader) sequence_list_dict = decode_json(root, "cache.json") self.sequence_list = sequence_list_dict # Filter the sequences based on min_length and max_target_area in the first frame self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and get_target_to_image_ratio(x) < max_target_area] def get_name(self): return 'imagenetvid_lmdb' def get_num_sequences(self): return len(self.sequence_list) def get_sequence_info(self, seq_id): bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno']) valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0) visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte() return {'bbox': bb_anno, 'valid': valid, 'visible': visible} def _get_frame(self, sequence, frame_id): set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id']) vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id']) frame_number = frame_id + sequence['start_frame'] frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name, '{:06d}.JPEG'.format(frame_number)) return decode_img(self.root, frame_path) def get_frames(self, seq_id, frame_ids, anno=None): sequence = self.sequence_list[seq_id] frame_list = [self._get_frame(sequence, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) # Create anno dict anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] # added the class info to the meta info object_meta = OrderedDict({'object_class': sequence['class_name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/lasot.py ================================================ import os import os.path import torch import numpy as np import pandas import csv import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings class Lasot(BaseVideoDataset): """ LaSOT dataset. Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): """ args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().lasot_dir if root is None else root super().__init__('LaSOT', root, image_loader) # Keep a list of all classes self.class_list = [f for f in os.listdir(self.root)] self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)} self.sequence_list = self._build_sequence_list(vid_ids, split) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_class_list() def _build_sequence_list(self, vid_ids=None, split=None): if split is not None: if vid_ids is not None: raise ValueError('Cannot set both split_name and vid_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt') else: raise ValueError('Unknown split name.') # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist() sequence_list = pandas.read_csv(file_path, header=None).squeeze("columns").values.tolist() elif vid_ids is not None: sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids] else: raise ValueError('Set either split_name or vid_ids.') return sequence_list def _build_class_list(self): seq_per_class = {} for seq_id, seq_name in enumerate(self.sequence_list): class_name = seq_name.split('-')[0] if class_name in seq_per_class: seq_per_class[class_name].append(seq_id) else: seq_per_class[class_name] = [seq_id] return seq_per_class def get_name(self): return 'lasot' def has_class_info(self): return True def has_occlusion_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "full_occlusion.txt") out_of_view_file = os.path.join(seq_path, "out_of_view.txt") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) with open(out_of_view_file, 'r') as f: out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) target_visible = ~occlusion & ~out_of_view return target_visible def _get_sequence_path(self, seq_id): seq_name = self.sequence_list[seq_id] class_name = seq_name.split('-')[0] vid_id = seq_name.split('-')[1] return os.path.join(self.root, class_name, class_name + '-' + vid_id) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = self._read_target_visible(seq_path) & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def _get_class(self, seq_path): raw_class = seq_path.split('/')[-2] return raw_class def get_class_name(self, seq_id): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/lasot_lmdb.py ================================================ import os import os.path import torch import numpy as np import pandas import csv import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings '''2021.1.16 Lasot for loading lmdb dataset''' from lib.utils.lmdb_utils import * class Lasot_lmdb(BaseVideoDataset): def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): """ args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().lasot_lmdb_dir if root is None else root super().__init__('LaSOT_lmdb', root, image_loader) self.sequence_list = self._build_sequence_list(vid_ids, split) class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list] self.class_list = [] for ele in class_list: if ele not in self.class_list: self.class_list.append(ele) # Keep a list of all classes self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)} if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_class_list() def _build_sequence_list(self, vid_ids=None, split=None): if split is not None: if vid_ids is not None: raise ValueError('Cannot set both split_name and vid_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt') else: raise ValueError('Unknown split name.') sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist() elif vid_ids is not None: sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids] else: raise ValueError('Set either split_name or vid_ids.') return sequence_list def _build_class_list(self): seq_per_class = {} for seq_id, seq_name in enumerate(self.sequence_list): class_name = seq_name.split('-')[0] if class_name in seq_per_class: seq_per_class[class_name].append(seq_id) else: seq_per_class[class_name] = [seq_id] return seq_per_class def get_name(self): return 'lasot_lmdb' def has_class_info(self): return True def has_occlusion_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt_str_list = decode_str(self.root, bb_anno_file).split('\n')[:-1] # the last line is empty gt_list = [list(map(float, line.split(','))) for line in gt_str_list] gt_arr = np.array(gt_list).astype(np.float32) return torch.tensor(gt_arr) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "full_occlusion.txt") out_of_view_file = os.path.join(seq_path, "out_of_view.txt") occ_list = list(map(int, decode_str(self.root, occlusion_file).split(','))) occlusion = torch.ByteTensor(occ_list) out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(','))) out_of_view = torch.ByteTensor(out_view_list) target_visible = ~occlusion & ~out_of_view return target_visible def _get_sequence_path(self, seq_id): seq_name = self.sequence_list[seq_id] class_name = seq_name.split('-')[0] vid_id = seq_name.split('-')[1] return os.path.join(class_name, class_name + '-' + vid_id) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = self._read_target_visible(seq_path) & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return decode_img(self.root, self._get_frame_path(seq_path, frame_id)) def _get_class(self, seq_path): raw_class = seq_path.split('/')[-2] return raw_class def get_class_name(self, seq_id): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/tracking_net.py ================================================ import torch import os import os.path import numpy as np import pandas import random from collections import OrderedDict from lib.train.data import jpeg4py_loader from .base_video_dataset import BaseVideoDataset from lib.train.admin import env_settings def list_sequences(root, set_ids): """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name) args: root: Root directory to TrackingNet set_ids: Sets (0-11) which are to be used returns: list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence """ sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, "TRAIN_" + str(s), "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list class TrackingNet(BaseVideoDataset): """ TrackingNet dataset. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None): """ args: root - The path to the TrackingNet folder, containing the training sets. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the sets (0 - 11) will be used. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().trackingnet_dir if root is None else root super().__init__('TrackingNet', root, image_loader) if set_ids is None: set_ids = [i for i in range(12)] self.set_ids = set_ids # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and # video_name for each sequence self.sequence_list = list_sequences(self.root, self.set_ids) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction)) self.seq_to_class_map, self.seq_per_class = self._load_class_info() # we do not have the class_lists for the tracking net self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def _load_class_info(self): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt') with open(class_map_path, 'r') as f: seq_to_class_map = {seq_class.split('\t')[0]: seq_class.rstrip().split('\t')[1] for seq_class in f} seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = seq_to_class_map.get(seq[1], 'Unknown') if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_to_class_map, seq_per_class def get_name(self): return 'trackingnet' def has_class_info(self): return True def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] bb_anno_file = os.path.join(self.root, "TRAIN_" + str(set_id), "anno", vid_name + ".txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def get_sequence_info(self, seq_id): bbox = self._read_bb_anno(seq_id) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame(self, seq_id, frame_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] frame_path = os.path.join(self.root, "TRAIN_" + str(set_id), "frames", vid_name, str(frame_id) + ".jpg") return self.image_loader(frame_path) def _get_class(self, seq_id): seq_name = self.sequence_list[seq_id][1] return self.seq_to_class_map[seq_name] def get_class_name(self, seq_id): obj_class = self._get_class(seq_id) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): frame_list = [self._get_frame(seq_id, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] obj_class = self._get_class(seq_id) object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/dataset/tracking_net_lmdb.py ================================================ import torch import os import os.path import numpy as np import random from collections import OrderedDict from lib.train.data import jpeg4py_loader from .base_video_dataset import BaseVideoDataset from lib.train.admin import env_settings import json from lib.utils.lmdb_utils import decode_img, decode_str def list_sequences(root): """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name) args: root: Root directory to TrackingNet returns: list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence """ fname = os.path.join(root, "seq_list.json") with open(fname, "r") as f: sequence_list = json.loads(f.read()) return sequence_list class TrackingNet_lmdb(BaseVideoDataset): """ TrackingNet dataset. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None): """ args: root - The path to the TrackingNet folder, containing the training sets. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the sets (0 - 11) will be used. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().trackingnet_lmdb_dir if root is None else root super().__init__('TrackingNet_lmdb', root, image_loader) if set_ids is None: set_ids = [i for i in range(12)] self.set_ids = set_ids # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and # video_name for each sequence self.sequence_list = list_sequences(self.root) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction)) self.seq_to_class_map, self.seq_per_class = self._load_class_info() # we do not have the class_lists for the tracking net self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def _load_class_info(self): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt') with open(class_map_path, 'r') as f: seq_to_class_map = {seq_class.split('\t')[0]: seq_class.rstrip().split('\t')[1] for seq_class in f} seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = seq_to_class_map.get(seq[1], 'Unknown') if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_to_class_map, seq_per_class def get_name(self): return 'trackingnet_lmdb' def has_class_info(self): return True def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] gt_str_list = decode_str(os.path.join(self.root, "TRAIN_%d_lmdb" % set_id), os.path.join("anno", vid_name + ".txt")).split('\n')[:-1] gt_list = [list(map(float, line.split(','))) for line in gt_str_list] gt_arr = np.array(gt_list).astype(np.float32) return torch.tensor(gt_arr) def get_sequence_info(self, seq_id): bbox = self._read_bb_anno(seq_id) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame(self, seq_id, frame_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] return decode_img(os.path.join(self.root, "TRAIN_%d_lmdb" % set_id), os.path.join("frames", vid_name, str(frame_id) + ".jpg")) def _get_class(self, seq_id): seq_name = self.sequence_list[seq_id][1] return self.seq_to_class_map[seq_name] def get_class_name(self, seq_id): obj_class = self._get_class(seq_id) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): frame_list = [self._get_frame(seq_id, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] obj_class = self._get_class(seq_id) object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: artrackv2_mindspore/lib/train/run_training.py ================================================ import os import sys import argparse import importlib import cv2 as cv import torch.backends.cudnn import torch.distributed as dist import torch import random import numpy as np torch.backends.cudnn.benchmark = False import _init_paths import lib.train.admin.settings as ws_settings def init_seeds(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.set_num_threads(4) cv.setNumThreads(1) cv.ocl.setUseOpenCL(False) def run_training(script_name, config_name, cudnn_benchmark=True, local_rank=-1, save_dir=None, base_seed=None, use_lmdb=False, script_name_prv=None, config_name_prv=None, use_wandb=False, distill=None, script_teacher=None, config_teacher=None): """Run the train script. args: script_name: Name of emperiment in the "experiments/" folder. config_name: Name of the yaml file in the "experiments/". cudnn_benchmark: Use cudnn benchmark or not (default is True). """ if save_dir is None: print("save_dir dir is not given. Use the default dir instead.") # This is needed to avoid strange crashes related to opencv torch.set_num_threads(4) cv.setNumThreads(4) torch.backends.cudnn.benchmark = cudnn_benchmark print('script_name: {}.py config_name: {}.yaml'.format(script_name, config_name)) '''2021.1.5 set seed for different process''' if base_seed is not None: if local_rank != -1: init_seeds(base_seed + local_rank) else: init_seeds(base_seed) settings = ws_settings.Settings() settings.script_name = script_name settings.config_name = config_name settings.project_path = 'train/{}/{}'.format(script_name, config_name) if script_name_prv is not None and config_name_prv is not None: settings.project_path_prv = 'train/{}/{}'.format(script_name_prv, config_name_prv) settings.local_rank = local_rank settings.save_dir = os.path.abspath(save_dir) settings.use_lmdb = use_lmdb prj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) settings.cfg_file = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_name, config_name)) settings.use_wandb = use_wandb if distill: settings.distill = distill settings.script_teacher = script_teacher settings.config_teacher = config_teacher if script_teacher is not None and config_teacher is not None: settings.project_path_teacher = 'train/{}/{}'.format(script_teacher, config_teacher) settings.cfg_file_teacher = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_teacher, config_teacher)) expr_module = importlib.import_module('lib.train.train_script_distill') else: expr_module = importlib.import_module('lib.train.train_script') expr_func = getattr(expr_module, 'run') expr_func(settings) def main(): parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.') parser.add_argument('--script', type=str, required=True, help='Name of the train script.') parser.add_argument('--config', type=str, required=True, help="Name of the config file.") parser.add_argument('--cudnn_benchmark', type=bool, default=False, help='Set cudnn benchmark on (1) or off (0) (default is on).') parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training') parser.add_argument('--save_dir', type=str, help='the directory to save checkpoints and logs') parser.add_argument('--seed', type=int, default=42, help='seed for random numbers') parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0) # whether datasets are in lmdb format parser.add_argument('--script_prv', type=str, default=None, help='Name of the train script of previous model.') parser.add_argument('--config_prv', type=str, default=None, help="Name of the config file of previous model.") parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0) # whether to use wandb # for knowledge distillation parser.add_argument('--distill', type=int, choices=[0, 1], default=0) # whether to use knowledge distillation parser.add_argument('--script_teacher', type=str, help='teacher script name') parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name') args = parser.parse_args() if args.local_rank != -1: dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) else: torch.cuda.set_device(0) run_training(args.script, args.config, cudnn_benchmark=args.cudnn_benchmark, local_rank=args.local_rank, save_dir=args.save_dir, base_seed=args.seed, use_lmdb=args.use_lmdb, script_name_prv=args.script_prv, config_name_prv=args.config_prv, use_wandb=args.use_wandb, distill=args.distill, script_teacher=args.script_teacher, config_teacher=args.config_teacher) if __name__ == '__main__': main() ================================================ FILE: artrackv2_mindspore/lib/train/train_script.py ================================================ import os # loss function related from lib.utils.box_ops import giou_loss from torch.nn.functional import l1_loss from torch.nn import BCEWithLogitsLoss # train pipeline related from lib.train.trainers import LTRTrainer from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader, sequence_sampler # distributed training related from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DataParallel as DP # some more advanced functions from .base_functions import * # network related from lib.models.ostrack import build_ostrack # forward propagation related from lib.train.actors import OSTrackActor # for import modules import importlib from ..utils.focal_loss import FocalLoss def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] #settings.use_lmdb = True for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def slt_collate(batch): ret = {} for k in batch[0].keys(): here_list = [] for ex in batch: here_list.append(ex[k]) ret[k] = here_list return ret class SLTLoader(torch.utils.data.dataloader.DataLoader): """ Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset. """ __initialized = False def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): if collate_fn is None: collate_fn = slt_collate super(SLTLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn) self.name = name self.training = training self.epoch_interval = epoch_interval self.stack_dim = stack_dim def run(settings): settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2' # update the default configs with config file if not os.path.exists(settings.cfg_file): raise ValueError("%s doesn't exist." % settings.cfg_file) config_module = importlib.import_module("lib.config.%s.config" % settings.script_name) cfg = config_module.cfg config_module.update_config_from_file(settings.cfg_file) if settings.local_rank in [-1, 0]: print("New configuration is shown below.") for key in cfg.keys(): print("%s configuration:" % key, cfg[key]) print('\n') # update settings based on cfg update_settings(settings, cfg) # Record the training log log_dir = os.path.join(settings.save_dir, 'logs') if settings.local_rank in [-1, 0]: if not os.path.exists(log_dir): os.makedirs(log_dir) settings.log_file = os.path.join(log_dir, "%s-%s.log" % (settings.script_name, settings.config_name)) # Build dataloaders dataset_train = sequence_sampler.SequenceSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_GAP, max_interval=cfg.DATA.MAX_INTERVAL, num_search_frames=cfg.DATA.SEARCH.NUMBER, num_template_frames=cfg.DATA.TEMPLATE.NUMBER, frame_sample_mode='random_interval', prob=cfg.DATA.INTERVAL_PROB) loader_train = SLTLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, shuffle=False, drop_last=True, pin_memory=True) if "RepVGG" in cfg.MODEL.BACKBONE.TYPE or "swin" in cfg.MODEL.BACKBONE.TYPE or "LightTrack" in cfg.MODEL.BACKBONE.TYPE: cfg.ckpt_dir = settings.save_dir bins = cfg.MODEL.BINS search_size = cfg.DATA.SEARCH.SIZE # Create network if settings.script_name == "ostrack": net = build_ostrack(cfg) else: raise ValueError("illegal script name") # wrap networks to distributed one net.cuda() import torch.distributed as dist rank = dist.get_rank() device_id = rank % torch.cuda.device_count() net = net.to(device_id) if settings.local_rank != -1: print(f"Start running basic DDP example on rank {rank}.") # net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) # add syncBN converter net = DDP(net, find_unused_parameters=True, broadcast_buffers=False) print(f"Finish running basic DDP example on rank {rank}.") # net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True, broadcast_buffers=False) #net = DP(net) settings.device = torch.device("cuda:%d" % settings.local_rank) else: settings.device = torch.device("cuda:0") settings.deep_sup = getattr(cfg.TRAIN, "DEEP_SUPERVISION", False) settings.distill = getattr(cfg.TRAIN, "DISTILL", False) settings.distill_loss_type = getattr(cfg.TRAIN, "DISTILL_LOSS_TYPE", "KL") # Loss functions and Actors if settings.script_name == "ostrack": focal_loss = FocalLoss() objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss, 'cls': BCEWithLogitsLoss()} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2., 'cls': 2.0} actor = OSTrackActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size) else: raise ValueError("illegal script name") # if cfg.TRAIN.DEEP_SUPERVISION: # raise ValueError("Deep supervision is not supported now.") # Optimizer, parameters, and learning rates optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg) use_amp = getattr(cfg.TRAIN, "AMP", False) trainer = LTRTrainer(actor, [loader_train], optimizer, settings, lr_scheduler, use_amp=use_amp) # train process trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True) ================================================ FILE: artrackv2_mindspore/lib/train/train_script_distill.py ================================================ import os # loss function related from lib.utils.box_ops import giou_loss from torch.nn.functional import l1_loss from torch.nn import BCEWithLogitsLoss # train pipeline related from lib.train.trainers import LTRTrainer # distributed training related from torch.nn.parallel import DistributedDataParallel as DDP # some more advanced functions from .base_functions import * # network related from lib.models.stark import build_starks, build_starkst from lib.models.stark import build_stark_lightning_x_trt # forward propagation related from lib.train.actors import STARKLightningXtrtdistillActor # for import modules import importlib def build_network(script_name, cfg): # Create network if script_name == "stark_s": net = build_starks(cfg) elif script_name == "stark_st1" or script_name == "stark_st2": net = build_starkst(cfg) elif script_name == "stark_lightning_X_trt": net = build_stark_lightning_x_trt(cfg, phase="train") else: raise ValueError("illegal script name") return net def run(settings): settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2' # update the default configs with config file if not os.path.exists(settings.cfg_file): raise ValueError("%s doesn't exist." % settings.cfg_file) config_module = importlib.import_module("lib.config.%s.config" % settings.script_name) cfg = config_module.cfg config_module.update_config_from_file(settings.cfg_file) if settings.local_rank in [-1, 0]: print("New configuration is shown below.") for key in cfg.keys(): print("%s configuration:" % key, cfg[key]) print('\n') # update the default teacher configs with teacher config file if not os.path.exists(settings.cfg_file_teacher): raise ValueError("%s doesn't exist." % settings.cfg_file_teacher) config_module_teacher = importlib.import_module("lib.config.%s.config" % settings.script_teacher) cfg_teacher = config_module_teacher.cfg config_module_teacher.update_config_from_file(settings.cfg_file_teacher) if settings.local_rank in [-1, 0]: print("New teacher configuration is shown below.") for key in cfg_teacher.keys(): print("%s configuration:" % key, cfg_teacher[key]) print('\n') # update settings based on cfg update_settings(settings, cfg) # Record the training log log_dir = os.path.join(settings.save_dir, 'logs') if settings.local_rank in [-1, 0]: if not os.path.exists(log_dir): os.makedirs(log_dir) settings.log_file = os.path.join(log_dir, "%s-%s.log" % (settings.script_name, settings.config_name)) # Build dataloaders loader_train, loader_val = build_dataloaders(cfg, settings) if "RepVGG" in cfg.MODEL.BACKBONE.TYPE or "swin" in cfg.MODEL.BACKBONE.TYPE: cfg.ckpt_dir = settings.save_dir """turn on the distillation mode""" cfg.TRAIN.DISTILL = True cfg_teacher.TRAIN.DISTILL = True net = build_network(settings.script_name, cfg) net_teacher = build_network(settings.script_teacher, cfg_teacher) # wrap networks to distributed one net.cuda() net_teacher.cuda() net_teacher.eval() if settings.local_rank != -1: net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True) net_teacher = DDP(net_teacher, device_ids=[settings.local_rank], find_unused_parameters=True) settings.device = torch.device("cuda:%d" % settings.local_rank) else: settings.device = torch.device("cuda:0") # settings.deep_sup = getattr(cfg.TRAIN, "DEEP_SUPERVISION", False) # settings.distill = getattr(cfg.TRAIN, "DISTILL", False) settings.distill_loss_type = getattr(cfg.TRAIN, "DISTILL_LOSS_TYPE", "L1") # Loss functions and Actors if settings.script_name == "stark_lightning_X_trt": objective = {'giou': giou_loss, 'l1': l1_loss} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT} actor = STARKLightningXtrtdistillActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, net_teacher=net_teacher) else: raise ValueError("illegal script name") # Optimizer, parameters, and learning rates optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg) use_amp = getattr(cfg.TRAIN, "AMP", False) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp) # train process trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True, distill=True) ================================================ FILE: artrackv2_mindspore/lib/train/trainers/__init__.py ================================================ from .base_trainer import BaseTrainer from .ltr_trainer import LTRTrainer ================================================ FILE: artrackv2_mindspore/lib/train/trainers/base_trainer.py ================================================ import os import glob import torch import traceback from lib.train.admin import multigpu from torch.utils.data.distributed import DistributedSampler class BaseTrainer: """Base trainer class. Contains functions for training and saving/loading checkpoints. Trainer classes should inherit from this one and overload the train_epoch function.""" def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ self.actor = actor self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.loaders = loaders self.update_settings(settings) self.epoch = 0 self.stats = {} self.device = getattr(settings, 'device', None) if self.device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() and settings.use_gpu else "cpu") self.actor.to(self.device) self.settings = settings def update_settings(self, settings=None): """Updates the trainer settings. Must be called to update internal settings.""" if settings is not None: self.settings = settings if self.settings.env.workspace_dir is not None: self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir) '''2021.1.4 New function: specify checkpoint dir''' if self.settings.save_dir is None: self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints') else: self._checkpoint_dir = os.path.join(self.settings.save_dir, 'checkpoints') print("checkpoints will be saved to %s" % self._checkpoint_dir) if self.settings.local_rank in [-1, 0]: if not os.path.exists(self._checkpoint_dir): print("Training with multiple GPUs. checkpoints directory doesn't exist. " "Create checkpoints directory") os.makedirs(self._checkpoint_dir) else: self._checkpoint_dir = None def train(self, max_epochs, load_latest=False, fail_safe=True, load_previous_ckpt=False, distill=False): """Do training for the given number of epochs. args: max_epochs - Max number of training epochs, load_latest - Bool indicating whether to resume from latest epoch. fail_safe - Bool indicating whether the training to automatically restart in case of any crashes. """ epoch = -1 num_tries = 1 for i in range(num_tries): try: if load_latest: self.load_checkpoint() if load_previous_ckpt: directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_prv) self.load_state_dict(directory) if distill: directory_teacher = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_teacher) self.load_state_dict(directory_teacher, distill=True) for epoch in range(self.epoch+1, max_epochs+1): self.epoch = epoch self.train_epoch() if self.lr_scheduler is not None: if self.settings.scheduler_type != 'cosine': self.lr_scheduler.step() else: self.lr_scheduler.step(epoch - 1) # only save the last 10 checkpoints save_every_epoch = getattr(self.settings, "save_every_epoch", False) save_epochs = [79, 159, 239] if epoch > (max_epochs - 1) or save_every_epoch or epoch % 1 == 0 or epoch in save_epochs or epoch > (max_epochs - 5): # if epoch > (max_epochs - 10) or save_every_epoch or epoch % 100 == 0: if self._checkpoint_dir: if self.settings.local_rank in [-1, 0]: self.save_checkpoint() except: print('Training crashed at epoch {}'.format(epoch)) if fail_safe: self.epoch -= 1 load_latest = True print('Traceback for the error!') print(traceback.format_exc()) print('Restarting training from last epoch ...') else: raise print('Finished training!') def train_epoch(self): raise NotImplementedError def save_checkpoint(self): """Saves a checkpoint of the network and other variables.""" net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ state = { 'epoch': self.epoch, 'actor_type': actor_type, 'net_type': net_type, 'net': net.state_dict(), 'net_info': getattr(net, 'info', None), 'constructor': getattr(net, 'constructor', None), 'optimizer': self.optimizer.state_dict(), 'stats': self.stats, 'settings': self.settings } directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path) print(directory) if not os.path.exists(directory): print("directory doesn't exist. creating...") os.makedirs(directory) # First save as a tmp file tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch) torch.save(state, tmp_file_path) file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch) # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure os.rename(tmp_file_path, file_path) def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False): """Loads a network checkpoint file. Can be called in three different ways: load_checkpoint(): Loads the latest epoch from the workspace. Use this to continue training. load_checkpoint(epoch_num): Loads the network at the given epoch number (int). load_checkpoint(path_to_checkpoint): Loads the file from the given absolute path (str). """ net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ if checkpoint is None: # Load most recent checkpoint checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: print('No matching checkpoint file found') return elif isinstance(checkpoint, int): # Checkpoint is the epoch number checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type, checkpoint) elif isinstance(checkpoint, str): # checkpoint is the path if os.path.isdir(checkpoint): checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No checkpoint found') else: checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.' if fields is None: fields = checkpoint_dict.keys() if ignore_fields is None: ignore_fields = ['settings'] # Never load the scheduler. It exists in older checkpoints. ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info']) # Load all fields for key in fields: if key in ignore_fields: continue if key == 'net': net.load_state_dict(checkpoint_dict[key]) elif key == 'optimizer': self.optimizer.load_state_dict(checkpoint_dict[key]) else: setattr(self, key, checkpoint_dict[key]) # Set the net info if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None: net.constructor = checkpoint_dict['constructor'] if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None: net.info = checkpoint_dict['net_info'] # Update the epoch in lr scheduler if 'epoch' in fields: self.lr_scheduler.last_epoch = self.epoch # 2021.1.10 Update the epoch in data_samplers for loader in self.loaders: if isinstance(loader.sampler, DistributedSampler): loader.sampler.set_epoch(self.epoch) return True def load_state_dict(self, checkpoint=None, distill=False): """Loads a network checkpoint file. Can be called in three different ways: load_checkpoint(): Loads the latest epoch from the workspace. Use this to continue training. load_checkpoint(epoch_num): Loads the network at the given epoch number (int). load_checkpoint(path_to_checkpoint): Loads the file from the given absolute path (str). """ if distill: net = self.actor.net_teacher.module if multigpu.is_multi_gpu(self.actor.net_teacher) \ else self.actor.net_teacher else: net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net net_type = type(net).__name__ if isinstance(checkpoint, str): # checkpoint is the path if os.path.isdir(checkpoint): checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No checkpoint found') else: checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network print("Loading pretrained model from ", checkpoint_path) checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.' missing_k, unexpected_k = net.load_state_dict(checkpoint_dict["net"], strict=False) print("previous checkpoint is loaded.") print("missing keys: ", missing_k) print("unexpected keys:", unexpected_k) return True ================================================ FILE: artrackv2_mindspore/lib/train/trainers/ltr_trainer.py ================================================ import os import datetime from collections import OrderedDict from torch.nn.utils import clip_grad_norm_ #from lib.train.data.wandb_logger import WandbWriter from lib.train.trainers import BaseTrainer from lib.train.admin import AverageMeter, StatValue from memory_profiler import profile #from lib.train.admin import TensorboardWriter import torch import time import numpy as np from torch.utils.data.distributed import DistributedSampler from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler from lib.utils.misc import get_world_size class LTRTrainer(BaseTrainer): def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ super().__init__(actor, loaders, optimizer, settings, lr_scheduler) self._set_default_settings() # Initialize statistics variables self.stats = OrderedDict({loader.name: None for loader in self.loaders}) # Initialize tensorboard and wandb #self.wandb_writer = None #if settings.local_rank in [-1, 0]: # tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path) # if not os.path.exists(tensorboard_writer_dir): # os.makedirs(tensorboard_writer_dir) # self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders]) # if settings.use_wandb: # world_size = get_world_size() # cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1) # interval = (world_size * settings.batchsize) # * interval # self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval) self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True) print("move_data", self.move_data_to_gpu) self.settings = settings self.use_amp = use_amp if use_amp: self.scaler = GradScaler() def _set_default_settings(self): # Dict of all default values default = {'print_interval': 10, 'print_stats': None, 'description': ''} for param, default_value in default.items(): if getattr(self.settings, param, None) is None: setattr(self.settings, param, default_value) self.miou_list = [] def cycle_dataset(self, loader): """Do a cycle of training or validation.""" torch.autograd.set_detect_anomaly(True) self.actor.train(loader.training) torch.set_grad_enabled(loader.training) self._init_timing() for i, data in enumerate(loader, 1): self.actor.eval() self.data_read_done_time = time.time() with torch.no_grad(): explore_result = self.actor.explore(data) if explore_result == None: print("this time i skip") continue # get inputs #print(data) self.data_to_gpu_time = time.time() data['epoch'] = self.epoch data['settings'] = self.settings stats = {} reward_record = [] miou_record = [] e_miou_record = [] num_seq = len(data['num_frames']) # Calculate reward tensor #reward_tensor = torch.zeros(explore_result['baseline_iou'].size()) baseline_iou = explore_result['baseline_iou'] #explore_iou = explore_result['explore_iou'] for seq_idx in range(num_seq): num_frames = data['num_frames'][seq_idx] - 1 b_miou = torch.mean(baseline_iou[:num_frames, seq_idx]) # e_miou = torch.mean(explore_iou[:num_frames, seq_idx]) miou_record.append(b_miou.item()) # e_miou_record.append(e_miou.item()) b_reward = b_miou.item() # e_reward = e_miou.item() # iou_gap = e_reward - b_reward # reward_record.append(iou_gap) # reward_tensor[:num_frames, seq_idx] = iou_gap # Training mode cursor = 0 bs_backward = 1 #print(self.actor.net.module.box_head.decoder.layers[2].mlpx.fc1.weight) self.optimizer.zero_grad() self.actor.train() cover = torch.Tensor(data['visible_ratio']).permute(1, 0) reverse_cover = torch.flip(cover, dims=[0]) cover = cover[1:, :] reverse_cover = reverse_cover[1:, :] cover_real = torch.cat((cover, reverse_cover), dim=1) while cursor < num_seq*2: #print("now is ", cursor , "and all is ", num_seq) model_inputs = {} model_inputs['slt_loss_weight'] = 15 #if cursor < num_seq: # model_inputs['template_images'] = explore_result['template_images'][cursor:cursor + bs_backward].cuda() #else: # model_inputs['template_images'] = explore_result['template_images_reverse'][cursor - num_seq:cursor - num_seq + bs_backward].cuda() model_inputs['search_images'] = explore_result['search_images'][:, cursor:cursor + bs_backward].cuda() model_inputs['search_anno'] = explore_result['search_anno'][:, cursor:cursor + bs_backward].cuda() model_inputs['pre_seq'] = explore_result['pre_seq'][:, cursor:cursor + bs_backward].cuda() model_inputs['x_feat'] = explore_result['x_feat'].squeeze(1)[:, cursor:cursor + bs_backward].cuda() model_inputs['template_images_z0'] = explore_result['template_images_z0'][:, cursor:cursor + bs_backward].cuda() model_inputs['dz_feat_update'] = explore_result['dz_feat_update'][:, cursor:cursor + bs_backward].cuda() model_inputs['target_in_search'] = explore_result['target_in_search'][:, cursor:cursor + bs_backward].cuda() model_inputs['cover'] = cover_real[:, cursor:cursor + bs_backward].cuda() model_inputs['epoch'] = self.epoch #print("this is cursor") #print(explore_result['pre_seq'].shape) #print(explore_result['x_feat'].squeeze(1).shape) #model_inputs['action_tensor'] = explore_result['action_tensor'][:, cursor:cursor + bs_backward].cuda() #model_inputs['reward_tensor'] = reward_tensor[:, cursor:cursor + bs_backward].cuda() loss, stats_cur = self.actor.compute_sequence_losses(model_inputs) #for name, param in self.actor.net.named_parameters(): # shape, c = (param.grad.shape, param.grad.sum()) if param.grad is not None else (None, None) # print(f'{name}: {param.shape} \n\t grad: {shape} \n\t {c}') #print("i make this!") loss.backward() #print("i made that?") for key, val in stats_cur.items(): if key in stats: stats[key] += val*(bs_backward / num_seq) else: stats[key] = val*(bs_backward / num_seq) cursor += bs_backward grad_norm = clip_grad_norm_(self.actor.net.parameters(), 100) stats['grad_norm'] = grad_norm #print(self.actor.net.module.backbone.blocks[8].mlp.fc1.weight) self.optimizer.step() miou = np.mean(miou_record) self.miou_list.append(miou) #stats['reward'] = np.mean(reward_record) #stats['e_mIoU'] = np.mean(e_miou_record) stats['mIoU'] = miou stats['mIoU10'] = np.mean(self.miou_list[-10:]) stats['mIoU100'] = np.mean(self.miou_list[-100:]) batch_size = num_seq * np.max(data['num_frames']) self._update_stats(stats, batch_size, loader) self._print_stats(i, loader, batch_size) torch.cuda.empty_cache() # # forward pass # if not self.use_amp: # loss, stats = self.actor(data) # else: # with autocast(): # loss, stats = self.actor(data) # # # backward pass and update weights # if loader.training: # self.optimizer.zero_grad() # if not self.use_amp: # loss.backward() # if self.settings.grad_clip_norm > 0: # torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm) # self.optimizer.step() # else: # self.scaler.scale(loss).backward() # self.scaler.step(self.optimizer) # self.scaler.update() # update statistics # batch_size = data['template_images'].shape[loader.stack_dim] # self._update_stats(stats, batch_size, loader) # print statistics # self._print_stats(i, loader, batch_size) # update wandb status #if self.wandb_writer is not None and i % self.settings.print_interval == 0: # if self.settings.local_rank in [-1, 0]: # self.wandb_writer.write_log(self.stats, self.epoch) # calculate ETA after every epoch # epoch_time = self.prev_time - self.start_time # print("Epoch Time: " + str(datetime.timedelta(seconds=epoch_time))) # print("Avg Data Time: %.5f" % (self.avg_date_time / self.num_frames * batch_size)) # print("Avg GPU Trans Time: %.5f" % (self.avg_gpu_trans_time / self.num_frames * batch_size)) # print("Avg Forward Time: %.5f" % (self.avg_forward_time / self.num_frames * batch_size)) def train_epoch(self): """Do one epoch for each loader.""" for loader in self.loaders: if self.epoch % loader.epoch_interval == 0: # 2021.1.10 Set epoch if isinstance(loader.sampler, DistributedSampler): loader.sampler.set_epoch(self.epoch) self.cycle_dataset(loader) self._stats_new_epoch() #if self.settings.local_rank in [-1, 0]: # self._write_tensorboard() def _init_timing(self): self.num_frames = 0 self.start_time = time.time() self.prev_time = self.start_time self.avg_date_time = 0 self.avg_gpu_trans_time = 0 self.avg_forward_time = 0 def _update_stats(self, new_stats: OrderedDict, batch_size, loader): # Initialize stats if not initialized yet if loader.name not in self.stats.keys() or self.stats[loader.name] is None: self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()}) # add lr state if loader.training: lr_list = self.lr_scheduler.get_last_lr() for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for name, val in new_stats.items(): if name not in self.stats[loader.name].keys(): self.stats[loader.name][name] = AverageMeter() self.stats[loader.name][name].update(val, batch_size) def _print_stats(self, i, loader, batch_size): self.num_frames += batch_size current_time = time.time() batch_fps = batch_size / (current_time - self.prev_time) average_fps = self.num_frames / (current_time - self.start_time) prev_frame_time_backup = self.prev_time self.prev_time = current_time self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup) self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time) self.avg_forward_time += current_time - self.data_to_gpu_time if i % self.settings.print_interval == 0 or i == loader.__len__(): print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()) print_str += 'FPS: %.1f (%.1f) , ' % (average_fps, batch_fps) # 2021.12.14 add data time print print_str += 'DataTime: %.3f (%.3f) , ' % (self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size) print_str += 'ForwardTime: %.3f , ' % (self.avg_forward_time / self.num_frames * batch_size) print_str += 'TotalTime: %.3f , ' % ((current_time - self.start_time) / self.num_frames * batch_size) # print_str += 'DataTime: %.3f (%.3f) , ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time) # print_str += 'ForwardTime: %.3f , ' % (current_time - self.data_to_gpu_time) # print_str += 'TotalTime: %.3f , ' % (current_time - prev_frame_time_backup) for name, val in self.stats[loader.name].items(): if (self.settings.print_stats is None or name in self.settings.print_stats): if hasattr(val, 'avg'): print_str += '%s: %.5f , ' % (name, val.avg) # else: # print_str += '%s: %r , ' % (name, val) print(print_str[:-5]) log_str = print_str[:-5] + '\n' with open(self.settings.log_file, 'a') as f: f.write(log_str) def _stats_new_epoch(self): # Record learning rate for loader in self.loaders: if loader.training: try: lr_list = self.lr_scheduler.get_last_lr() except: lr_list = self.lr_scheduler._get_lr(self.epoch) for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for loader_stats in self.stats.values(): if loader_stats is None: continue for stat_value in loader_stats.values(): if hasattr(stat_value, 'new_epoch'): stat_value.new_epoch() #def _write_tensorboard(self): # if self.epoch == 1: # self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description) # self.tensorboard_writer.write_epoch(self.stats, self.epoch) ================================================ FILE: artrackv2_mindspore/lib/utils/__init__.py ================================================ # from .tensor import TensorDict, TensorList ================================================ FILE: artrackv2_mindspore/lib/utils/box_ops.py ================================================ import numpy as np def box_xywh_to_cxywh(x): x1, y1, w, h = x.unbind(-1) b = [x1+0.5*w, y1+0.5*h, w, h] return torch.stack(b, dim=-1) def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) def box_xywh_to_xyxy(x): x1, y1, w, h = x.unbind(-1) b = [x1, y1, x1 + w, y1 + h] return torch.stack(b, dim=-1) def box_xyxy_to_xywh(x): x1, y1, x2, y2 = x.unbind(-1) b = [x1, y1, x2 - x1, y2 - y1] return torch.stack(b, dim=-1) def box_xyxy_to_cxcywh(x): x0, y0, x1, y1 = x.unbind(-1) b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return torch.stack(b, dim=-1) # modified from torchvision to also return the union '''Note that this function only supports shape (N,4)''' def box_iou(boxes1, boxes2): """ :param boxes1: (N, 4) (x1,y1,x2,y2) :param boxes2: (N, 4) (x1,y1,x2,y2) :return: """ area1 = box_area(boxes1) # (N,) area2 = box_area(boxes2) # (N,) lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # (N,2) rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # (N,2) wh = (rb - lt).clamp(min=0) # (N,2) inter = wh[:, 0] * wh[:, 1] # (N,) union = area1 + area2 - inter iou = inter / union return iou, union '''Note that this implementation is different from DETR's''' def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format boxes1: (N, 4) boxes2: (N, 4) """ # degenerate boxes gives inf / nan results # so do an early check # try: #assert (boxes1[:, 2:] >= boxes1[:, :2]).all() # assert (boxes2[:, 2:] >= boxes2[:, :2]).all() iou, union = box_iou(boxes1, boxes2) # (N,) lt = torch.min(boxes1[:, :2], boxes2[:, :2]) rb = torch.max(boxes1[:, 2:], boxes2[:, 2:]) wh = (rb - lt).clamp(min=0) # (N,2) area = wh[:, 0] * wh[:, 1] # (N,) return iou - (area - union) / area, iou def giou_loss(boxes1, boxes2): """ :param boxes1: (N, 4) (x1,y1,x2,y2) :param boxes2: (N, 4) (x1,y1,x2,y2) :return: """ giou, iou = generalized_box_iou(boxes1, boxes2) return (1 - giou).mean(), iou def clip_box(box: list, H, W, margin=0): x1, y1, w, h = box x2, y2 = x1 + w, y1 + h x1 = min(max(0, x1), W-margin) x2 = min(max(margin, x2), W) y1 = min(max(0, y1), H-margin) y2 = min(max(margin, y2), H) w = max(margin, x2-x1) h = max(margin, y2-y1) return [x1, y1, w, h] ================================================ FILE: artrackv2_mindspore/lib/utils/ce_utils.py ================================================ def generate_bbox_mask(bbox_mask, bbox): b, h, w = bbox_mask.shape for i in range(b): bbox_i = bbox[i].cpu().tolist() bbox_mask[i, int(bbox_i[1]):int(bbox_i[1] + bbox_i[3] - 1), int(bbox_i[0]):int(bbox_i[0] + bbox_i[2] - 1)] = 1 return bbox_mask def generate_mask_cond(cfg, bs, device, gt_bbox): template_size = cfg.DATA.TEMPLATE.SIZE stride = cfg.MODEL.BACKBONE.STRIDE template_feat_size = template_size // stride if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL': box_mask_z = None elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT': if template_feat_size == 8: index = slice(3, 4) elif template_feat_size == 12: index = slice(5, 6) elif template_feat_size == 7: index = slice(3, 4) elif template_feat_size == 14: index = slice(6, 7) else: raise NotImplementedError box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device) box_mask_z[:, index, index] = 1 box_mask_z = box_mask_z.flatten(1).to(torch.bool) elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC': # use fixed 4x4 region, 3:5 for 8x8 # use fixed 4x4 region 5:6 for 12x12 if template_feat_size == 8: index = slice(3, 5) elif template_feat_size == 12: index = slice(5, 7) elif template_feat_size == 7: index = slice(3, 4) else: raise NotImplementedError box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device) box_mask_z[:, index, index] = 1 box_mask_z = box_mask_z.flatten(1).to(torch.bool) elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX': box_mask_z = torch.zeros([bs, template_size, template_size], device=device) # box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:]) # (batch, 1, 128, 128) box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to( torch.float) # (batch, 1, 128, 128) # box_mask_z_vis = box_mask_z.cpu().numpy() box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear', align_corners=False) box_mask_z = box_mask_z.flatten(1).to(torch.bool) # box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy() # gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy() else: raise NotImplementedError return box_mask_z def adjust_keep_rate(epoch, warmup_epochs, total_epochs, ITERS_PER_EPOCH, base_keep_rate=0.5, max_keep_rate=1, iters=-1): if epoch < warmup_epochs: return 1 if epoch >= total_epochs: return base_keep_rate if iters == -1: iters = epoch * ITERS_PER_EPOCH total_iters = ITERS_PER_EPOCH * (total_epochs - warmup_epochs) iters = iters - ITERS_PER_EPOCH * warmup_epochs keep_rate = base_keep_rate + (max_keep_rate - base_keep_rate) \ * (math.cos(iters / total_iters * math.pi) + 1) * 0.5 return keep_rate ================================================ FILE: artrackv2_mindspore/lib/utils/focal_loss.py ================================================ from abc import ABC import torch import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.Module, ABC): def __init__(self, alpha=2, beta=4): super(FocalLoss, self).__init__() self.alpha = alpha self.beta = beta def forward(self, prediction, target): positive_index = target.eq(1).float() negative_index = target.lt(1).float() negative_weights = torch.pow(1 - target, self.beta) # clamp min value is set to 1e-12 to maintain the numerical stability prediction = torch.clamp(prediction, 1e-12) positive_loss = torch.log(prediction) * torch.pow(1 - prediction, self.alpha) * positive_index negative_loss = torch.log(1 - prediction) * torch.pow(prediction, self.alpha) * negative_weights * negative_index num_positive = positive_index.float().sum() positive_loss = positive_loss.sum() negative_loss = negative_loss.sum() if num_positive == 0: loss = -negative_loss else: loss = -(positive_loss + negative_loss) / num_positive return loss class LBHinge(nn.Module): """Loss that uses a 'hinge' on the lower bound. This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is also smaller than that threshold. args: error_matric: What base loss to use (MSE by default). threshold: Threshold to use for the hinge. clip: Clip the loss if it is above this value. """ def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None): super().__init__() self.error_metric = error_metric self.threshold = threshold if threshold is not None else -100 self.clip = clip def forward(self, prediction, label, target_bb=None): negative_mask = (label < self.threshold).float() positive_mask = (1.0 - negative_mask) prediction = negative_mask * F.relu(prediction) + positive_mask * prediction loss = self.error_metric(prediction, positive_mask * label) if self.clip is not None: loss = torch.min(loss, torch.tensor([self.clip], device=loss.device)) return loss ================================================ FILE: artrackv2_mindspore/lib/utils/heapmap_utils.py ================================================ import numpy as np import torch def generate_heatmap(bboxes, patch_size=320, stride=16): """ Generate ground truth heatmap same as CenterNet Args: bboxes (torch.Tensor): shape of [num_search, bs, 4] Returns: gaussian_maps: list of generated heatmap """ gaussian_maps = [] heatmap_size = patch_size // stride for single_patch_bboxes in bboxes: bs = single_patch_bboxes.shape[0] gt_scoremap = torch.zeros(bs, heatmap_size, heatmap_size) classes = torch.arange(bs).to(torch.long) bbox = single_patch_bboxes * heatmap_size wh = bbox[:, 2:] centers_int = (bbox[:, :2] + wh / 2).round() CenterNetHeatMap.generate_score_map(gt_scoremap, classes, wh, centers_int, 0.7) gaussian_maps.append(gt_scoremap.to(bbox.device)) return gaussian_maps class CenterNetHeatMap(object): @staticmethod def generate_score_map(fmap, gt_class, gt_wh, centers_int, min_overlap): radius = CenterNetHeatMap.get_gaussian_radius(gt_wh, min_overlap) radius = torch.clamp_min(radius, 0) radius = radius.type(torch.int).cpu().numpy() for i in range(gt_class.shape[0]): channel_index = gt_class[i] CenterNetHeatMap.draw_gaussian(fmap[channel_index], centers_int[i], radius[i]) @staticmethod def get_gaussian_radius(box_size, min_overlap): """ copyed from CornerNet box_size (w, h), it could be a torch.Tensor, numpy.ndarray, list or tuple notice: we are using a bug-version, please refer to fix bug version in CornerNet """ # box_tensor = torch.Tensor(box_size) box_tensor = box_size width, height = box_tensor[..., 0], box_tensor[..., 1] a1 = 1 b1 = height + width c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = torch.sqrt(b1 ** 2 - 4 * a1 * c1) r1 = (b1 + sq1) / 2 a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = torch.sqrt(b2 ** 2 - 4 * a2 * c2) r2 = (b2 + sq2) / 2 a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = torch.sqrt(b3 ** 2 - 4 * a3 * c3) r3 = (b3 + sq3) / 2 return torch.min(r1, torch.min(r2, r3)) @staticmethod def gaussian2D(radius, sigma=1): # m, n = [(s - 1.) / 2. for s in shape] m, n = radius y, x = np.ogrid[-m: m + 1, -n: n + 1] gauss = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) gauss[gauss < np.finfo(gauss.dtype).eps * gauss.max()] = 0 return gauss @staticmethod def draw_gaussian(fmap, center, radius, k=1): diameter = 2 * radius + 1 gaussian = CenterNetHeatMap.gaussian2D((radius, radius), sigma=diameter / 6) gaussian = torch.Tensor(gaussian) x, y = int(center[0]), int(center[1]) height, width = fmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_fmap = fmap[y - top: y + bottom, x - left: x + right] masked_gaussian = gaussian[radius - top: radius + bottom, radius - left: radius + right] if min(masked_gaussian.shape) > 0 and min(masked_fmap.shape) > 0: masked_fmap = torch.max(masked_fmap, masked_gaussian * k) fmap[y - top: y + bottom, x - left: x + right] = masked_fmap # return fmap def compute_grids(features, strides): """ grids regret to the input image size """ grids = [] for level, feature in enumerate(features): h, w = feature.size()[-2:] shifts_x = torch.arange( 0, w * strides[level], step=strides[level], dtype=torch.float32, device=feature.device) shifts_y = torch.arange( 0, h * strides[level], step=strides[level], dtype=torch.float32, device=feature.device) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \ strides[level] // 2 grids.append(grids_per_level) return grids def get_center3x3(locations, centers, strides, range=3): ''' Inputs: locations: M x 2 centers: N x 2 strides: M ''' range = (range - 1) / 2 M, N = locations.shape[0], centers.shape[0] locations_expanded = locations.view(M, 1, 2).expand(M, N, 2) # M x N x 2 centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2 strides_expanded = strides.view(M, 1, 1).expand(M, N, 2) # M x N centers_discret = ((centers_expanded / strides_expanded).int() * strides_expanded).float() + \ strides_expanded / 2 # M x N x 2 dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs() dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs() return (dist_x <= strides_expanded[:, :, 0] * range) & \ (dist_y <= strides_expanded[:, :, 0] * range) def get_pred(score_map_ctr, size_map, offset_map, feat_size): max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True) idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) return size * feat_size, offset ================================================ FILE: artrackv2_mindspore/lib/utils/image.py ================================================ ================================================ FILE: artrackv2_mindspore/lib/utils/lmdb_utils.py ================================================ import lmdb import numpy as np import cv2 import json LMDB_ENVS = dict() LMDB_HANDLES = dict() LMDB_FILELISTS = dict() def get_lmdb_handle(name): global LMDB_HANDLES, LMDB_FILELISTS item = LMDB_HANDLES.get(name, None) if item is None: env = lmdb.open(name, readonly=True, lock=False, readahead=False, meminit=False) LMDB_ENVS[name] = env item = env.begin(write=False) LMDB_HANDLES[name] = item return item def decode_img(lmdb_fname, key_name): handle = get_lmdb_handle(lmdb_fname) binfile = handle.get(key_name.encode()) if binfile is None: print("Illegal data detected. %s %s" % (lmdb_fname, key_name)) s = np.frombuffer(binfile, np.uint8) x = cv2.cvtColor(cv2.imdecode(s, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB) return x def decode_str(lmdb_fname, key_name): handle = get_lmdb_handle(lmdb_fname) binfile = handle.get(key_name.encode()) string = binfile.decode() return string def decode_json(lmdb_fname, key_name): return json.loads(decode_str(lmdb_fname, key_name)) if __name__ == "__main__": lmdb_fname = "/data/sda/v-yanbi/iccv21/LittleBoy_clean/data/got10k_lmdb" '''Decode image''' # key_name = "test/GOT-10k_Test_000001/00000001.jpg" # img = decode_img(lmdb_fname, key_name) # cv2.imwrite("001.jpg", img) '''Decode str''' # key_name = "test/list.txt" # key_name = "train/GOT-10k_Train_000001/groundtruth.txt" key_name = "train/GOT-10k_Train_000001/absence.label" str_ = decode_str(lmdb_fname, key_name) print(str_) ================================================ FILE: artrackv2_mindspore/lib/utils/merge.py ================================================ import torch def merge_template_search(inp_list, return_search=False, return_template=False): """NOTICE: search region related features must be in the last place""" seq_dict = {"feat": torch.cat([x["feat"] for x in inp_list], dim=0), "mask": torch.cat([x["mask"] for x in inp_list], dim=1), "pos": torch.cat([x["pos"] for x in inp_list], dim=0)} if return_search: x = inp_list[-1] seq_dict.update({"feat_x": x["feat"], "mask_x": x["mask"], "pos_x": x["pos"]}) if return_template: z = inp_list[0] seq_dict.update({"feat_z": z["feat"], "mask_z": z["mask"], "pos_z": z["pos"]}) return seq_dict def get_qkv(inp_list): """The 1st element of the inp_list is about the template, the 2nd (the last) element is about the search region""" dict_x = inp_list[-1] dict_c = {"feat": torch.cat([x["feat"] for x in inp_list], dim=0), "mask": torch.cat([x["mask"] for x in inp_list], dim=1), "pos": torch.cat([x["pos"] for x in inp_list], dim=0)} # concatenated dict q = dict_x["feat"] + dict_x["pos"] k = dict_c["feat"] + dict_c["pos"] v = dict_c["feat"] key_padding_mask = dict_c["mask"] return q, k, v, key_padding_mask ================================================ FILE: artrackv2_mindspore/lib/utils/misc.py ================================================ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Misc functions, including distributed helpers. Mostly copy-paste from torchvision references. """ import os import subprocess import time from collections import defaultdict, deque import datetime import pickle from typing import Optional, List import torch import torch.distributed as dist from mindspore import Tensor # needed due to empty tensor bug in pytorch and torchvision 0.5 import torchvision vers = torchvision.__version__.split('.') if int(vers[0]) <= 0 and int(vers[1]) < 7: from torchvision.ops import _new_empty_tensor from torchvision.ops.misc import _output_size class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.tensor([tensor.numel()], device="cuda") size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) if local_size != max_size: padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def reduce_dict(input_dict, average=True): """ Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.all_reduce(values) if average: values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' if torch.cuda.is_available(): log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}', 'max mem: {memory:.0f}' ]) else: log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ]) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len(iterable))) def get_sha(): cwd = os.path.dirname(os.path.abspath(__file__)) def _run(command): return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() sha = 'N/A' diff = "clean" branch = 'N/A' try: sha = _run(['git', 'rev-parse', 'HEAD']) subprocess.check_output(['git', 'diff'], cwd=cwd) diff = _run(['git', 'diff-index', 'HEAD']) diff = "has uncommited changes" if diff else "clean" branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) except Exception: pass message = f"sha: {sha}, status: {diff}, branch: {branch}" return message def collate_fn(batch): batch = list(zip(*batch)) batch[0] = nested_tensor_from_tensor_list(batch[0]) return tuple(batch) def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] # get the first one for sublist in the_list[1:]: # [h,w,3] for index, item in enumerate(sublist): # index: 0,1,2 maxes[index] = max(maxes[index], item) # compare current max with the other elements in the whole return maxes class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): # type: (Device) -> NestedTensor # noqa cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: assert mask is not None cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): # TODO make this more general if tensor_list[0].ndim == 3: if torchvision._is_tracing(): # nested_tensor_from_tensor_list() does not export well to ONNX # call _onnx_nested_tensor_from_tensor_list() instead return _onnx_nested_tensor_from_tensor_list(tensor_list) # TODO make it support different-sized images max_size = _max_by_axis([list(img.shape) for img in tensor_list]) # [[3,h1,w1], [3,h2,w2], [3,h3,w3], ...] # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) batch_shape = [len(tensor_list)] + max_size # () b, c, h, w = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((b, h, w), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # copy valid regions of the images to the largest padded base. m[: img.shape[1], :img.shape[2]] = False else: raise ValueError('not supported') return NestedTensor(tensor, mask) # _onnx_nested_tensor_from_tensor_list() is an implementation of # nested_tensor_from_tensor_list() that is supported by ONNX tracing. @torch.jit.unused def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: max_size = [] for i in range(tensor_list[0].dim()): max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) max_size.append(max_size_i) max_size = tuple(max_size) # work around for # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # m[: img.shape[1], :img.shape[2]] = False # which is not yet supported in onnx padded_imgs = [] padded_masks = [] for img in tensor_list: padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) padded_imgs.append(padded_img) m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) padded_masks.append(padded_mask.to(torch.bool)) tensor = torch.stack(padded_imgs) mask = torch.stack(padded_masks) return NestedTensor(tensor, mask=mask) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0) @torch.no_grad() def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" if target.numel() == 0: return [torch.zeros([], device=output.device)] maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor """ Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away. """ if float(torchvision.__version__[:3]) < 0.7: if input.numel() > 0: return torch.nn.functional.interpolate( input, size, scale_factor, mode, align_corners ) output_shape = _output_size(2, input, size, scale_factor) output_shape = list(input.shape[:-2]) + list(output_shape) return _new_empty_tensor(input, output_shape) else: return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) ================================================ FILE: artrackv2_mindspore/lib/utils/tensor.py ================================================ import functools import torch import copy from collections import OrderedDict class TensorDict(OrderedDict): """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" def concat(self, other): """Concatenates two dicts without copying internal data.""" return TensorDict(self, **other) def copy(self): return TensorDict(super(TensorDict, self).copy()) def __deepcopy__(self, memodict={}): return TensorDict(copy.deepcopy(list(self), memodict)) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) return apply_attr def attribute(self, attr: str, *args): return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) def apply(self, fn, *args, **kwargs): return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) @staticmethod def _iterable(a): return isinstance(a, (TensorDict, list)) class TensorList(list): """Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.""" def __init__(self, list_of_tensors = None): if list_of_tensors is None: list_of_tensors = list() super(TensorList, self).__init__(list_of_tensors) def __deepcopy__(self, memodict={}): return TensorList(copy.deepcopy(list(self), memodict)) def __getitem__(self, item): if isinstance(item, int): return super(TensorList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return TensorList([super(TensorList, self).__getitem__(i) for i in item]) else: return TensorList(super(TensorList, self).__getitem__(item)) def __add__(self, other): if TensorList._iterable(other): return TensorList([e1 + e2 for e1, e2 in zip(self, other)]) return TensorList([e + other for e in self]) def __radd__(self, other): if TensorList._iterable(other): return TensorList([e2 + e1 for e1, e2 in zip(self, other)]) return TensorList([other + e for e in self]) def __iadd__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] += e2 else: for i in range(len(self)): self[i] += other return self def __sub__(self, other): if TensorList._iterable(other): return TensorList([e1 - e2 for e1, e2 in zip(self, other)]) return TensorList([e - other for e in self]) def __rsub__(self, other): if TensorList._iterable(other): return TensorList([e2 - e1 for e1, e2 in zip(self, other)]) return TensorList([other - e for e in self]) def __isub__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] -= e2 else: for i in range(len(self)): self[i] -= other return self def __mul__(self, other): if TensorList._iterable(other): return TensorList([e1 * e2 for e1, e2 in zip(self, other)]) return TensorList([e * other for e in self]) def __rmul__(self, other): if TensorList._iterable(other): return TensorList([e2 * e1 for e1, e2 in zip(self, other)]) return TensorList([other * e for e in self]) def __imul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] *= e2 else: for i in range(len(self)): self[i] *= other return self def __truediv__(self, other): if TensorList._iterable(other): return TensorList([e1 / e2 for e1, e2 in zip(self, other)]) return TensorList([e / other for e in self]) def __rtruediv__(self, other): if TensorList._iterable(other): return TensorList([e2 / e1 for e1, e2 in zip(self, other)]) return TensorList([other / e for e in self]) def __itruediv__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] /= e2 else: for i in range(len(self)): self[i] /= other return self def __matmul__(self, other): if TensorList._iterable(other): return TensorList([e1 @ e2 for e1, e2 in zip(self, other)]) return TensorList([e @ other for e in self]) def __rmatmul__(self, other): if TensorList._iterable(other): return TensorList([e2 @ e1 for e1, e2 in zip(self, other)]) return TensorList([other @ e for e in self]) def __imatmul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] @= e2 else: for i in range(len(self)): self[i] @= other return self def __mod__(self, other): if TensorList._iterable(other): return TensorList([e1 % e2 for e1, e2 in zip(self, other)]) return TensorList([e % other for e in self]) def __rmod__(self, other): if TensorList._iterable(other): return TensorList([e2 % e1 for e1, e2 in zip(self, other)]) return TensorList([other % e for e in self]) def __pos__(self): return TensorList([+e for e in self]) def __neg__(self): return TensorList([-e for e in self]) def __le__(self, other): if TensorList._iterable(other): return TensorList([e1 <= e2 for e1, e2 in zip(self, other)]) return TensorList([e <= other for e in self]) def __ge__(self, other): if TensorList._iterable(other): return TensorList([e1 >= e2 for e1, e2 in zip(self, other)]) return TensorList([e >= other for e in self]) def concat(self, other): return TensorList(super(TensorList, self).__add__(other)) def copy(self): return TensorList(super(TensorList, self).copy()) def unroll(self): if not any(isinstance(t, TensorList) for t in self): return self new_list = TensorList() for t in self: if isinstance(t, TensorList): new_list.extend(t.unroll()) else: new_list.append(t) return new_list def list(self): return list(self) def attribute(self, attr: str, *args): return TensorList([getattr(e, attr, *args) for e in self]) def apply(self, fn): return TensorList([fn(e) for e in self]) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorList\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorList([getattr(e, name)(*args, **kwargs) for e in self]) return apply_attr @staticmethod def _iterable(a): return isinstance(a, (TensorList, list)) def tensor_operation(op): def islist(a): return isinstance(a, TensorList) @functools.wraps(op) def oplist(*args, **kwargs): if len(args) == 0: raise ValueError('Must be at least one argument without keyword (i.e. operand).') if len(args) == 1: if islist(args[0]): return TensorList([op(a, **kwargs) for a in args[0]]) else: # Multiple operands, assume max two if islist(args[0]) and islist(args[1]): return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])]) if islist(args[0]): return TensorList([op(a, *args[1:], **kwargs) for a in args[0]]) if islist(args[1]): return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]]) # None of the operands are lists return op(*args, **kwargs) return oplist ================================================ FILE: artrackv2_mindspore/lib/utils/variable_hook.py ================================================ import torch from bytecode import Bytecode, Instr class get_local(object): cache = {} is_activate = False def __init__(self, varname): self.varname = varname def __call__(self, func): if not type(self).is_activate: return func type(self).cache[func.__qualname__] = [] c = Bytecode.from_code(func.__code__) extra_code = [ Instr('STORE_FAST', '_res'), Instr('LOAD_FAST', self.varname), Instr('STORE_FAST', '_value'), Instr('LOAD_FAST', '_res'), Instr('LOAD_FAST', '_value'), Instr('BUILD_TUPLE', 2), Instr('STORE_FAST', '_result_tuple'), Instr('LOAD_FAST', '_result_tuple'), ] c[-1:-1] = extra_code func.__code__ = c.to_code() def wrapper(*args, **kwargs): res, values = func(*args, **kwargs) if isinstance(values, torch.Tensor): type(self).cache[func.__qualname__].append(values.detach().cpu().numpy()) elif isinstance(values, list): # list of Tensor type(self).cache[func.__qualname__].append([value.detach().cpu().numpy() for value in values]) else: raise NotImplementedError return res return wrapper @classmethod def clear(cls): for key in cls.cache.keys(): cls.cache[key] = [] @classmethod def activate(cls): cls.is_activate = True ================================================ FILE: artrackv2_mindspore/lib/vis/__init__.py ================================================ ================================================ FILE: artrackv2_mindspore/lib/vis/plotting.py ================================================ import matplotlib.pyplot as plt import numpy as np import torch import cv2 def draw_figure(fig): fig.canvas.draw() fig.canvas.flush_events() plt.pause(0.001) def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None): """Display a 2D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim == 3: a_np = np.transpose(a_np, (1, 2, 0)) if ax is None: fig = plt.figure(fig_num) plt.tight_layout() plt.cla() plt.imshow(a_np, vmin=range[0], vmax=range[1]) plt.axis('off') plt.axis('equal') if title is not None: plt.title(title) draw_figure(fig) else: ax.cla() ax.imshow(a_np, vmin=range[0], vmax=range[1]) ax.set_axis_off() ax.axis('equal') if title is not None: ax.set_title(title) draw_figure(plt.gcf()) def plot_graph(a: torch.Tensor, fig_num = None, title = None): """Plot graph. Data is a 1D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim > 1: raise ValueError fig = plt.figure(fig_num) # plt.tight_layout() plt.cla() plt.plot(a_np) if title is not None: plt.title(title) draw_figure(fig) def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None): im_np = im.clone().cpu().squeeze().numpy() im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8)) boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int) # Draw proposals for i_ in range(boxes.shape[0]): if disp_ids is None or disp_ids[i_]: bb = boxes[i_, :] disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256) cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), disp_color, 1) if iou_pred is not None: text_pos = (bb[0], bb[1] - 5) cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False) im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float() return im_tensor def _pascal_color_map(N=256, normalized=False): """ Python implementation of the color map function for the PASCAL VOC data set. Official Matlab version can be found in the PASCAL VOC devkit http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit """ def bitget(byteval, idx): return (byteval & (1 << idx)) != 0 dtype = 'float32' if normalized else 'uint8' cmap = np.zeros((N, 3), dtype=dtype) for i in range(N): r = g = b = 0 c = i for j in range(8): r = r | (bitget(c, 0) << 7 - j) g = g | (bitget(c, 1) << 7 - j) b = b | (bitget(c, 2) << 7 - j) c = c >> 3 cmap[i] = np.array([r, g, b]) cmap = cmap / 255 if normalized else cmap return cmap def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None): """ Overlay mask over image. Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py This function allows you to overlay a mask over an image with some transparency. # Arguments im: Numpy Array. Array with the image. The shape must be (H, W, 3) and the pixels must be represented as `np.uint8` data type. ann: Numpy Array. Array with the mask. The shape must be (H, W) and the values must be intergers alpha: Float. Proportion of alpha to apply at the overlaid mask. colors: Numpy Array. Optional custom colormap. It must have shape (N, 3) being N the maximum number of colors to represent. contour_thickness: Integer. Thickness of each object index contour draw over the overlay. This function requires to have installed the package `opencv-python`. # Returns Numpy Array: Image of the overlay with shape (H, W, 3) and data type `np.uint8`. """ im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int) if im.shape[:-1] != ann.shape: raise ValueError('First two dimensions of `im` and `ann` must match') if im.shape[-1] != 3: raise ValueError('im must have three channels at the 3 dimension') colors = colors or _pascal_color_map() colors = np.asarray(colors, dtype=np.uint8) mask = colors[ann] fg = im * alpha + (1 - alpha) * mask img = im.copy() img[ann > 0] = fg[ann > 0] if contour_thickness: # pragma: no cover import cv2 for obj_id in np.unique(ann[ann > 0]): contours = cv2.findContours((ann == obj_id).astype( np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(), contour_thickness) return img ================================================ FILE: artrackv2_mindspore/lib/vis/utils.py ================================================ import torch import numpy as np def numpy_to_torch(a: np.ndarray): return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0) ================================================ FILE: artrackv2_mindspore/tracking/_init_paths.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as osp import sys def add_path(path): if path not in sys.path: sys.path.insert(0, path) this_dir = osp.dirname(__file__) prj_path = osp.join(this_dir, '..') add_path(prj_path) ================================================ FILE: artrackv2_mindspore/tracking/analysis_results.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", "%matplotlib inline\n", "import os\n", "import sys\n", "import matplotlib.pyplot as plt\n", "plt.rcParams['figure.figsize'] = [14, 8]\n", "\n", "sys.path.append('/home/yebotao/OSTrack')\n", "from lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results, print_results_per_video\n", "from lib.test.evaluation import get_dataset, trackerlist" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "dataset_name = 'lasot'\n", "\n", "trackers = []\n", "trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_256_mae_ce_32x4_ep300', dataset_name=dataset_name,\n", " run_ids=None, display_name='OSTrack256'))\n", "trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_384_mae_ce_32x4_ep300', dataset_name=dataset_name,\n", " run_ids=None, display_name='OSTrack384'))\n", "\n", "dataset = get_dataset(dataset_name)\n", "# plot_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec'),\n", "# skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05, exclude_invalid_frames=False)\n", "print_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec', 'norm_prec'))\n", "# print_results_per_video(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec', 'norm_prec'),\n", "# per_video=True, force_evaluation=True)\n", "# print_per_sequence_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec', 'norm_prec'))" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: artrackv2_mindspore/tracking/analysis_results.py ================================================ import _init_paths import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [8, 8] from lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results from lib.test.evaluation import get_dataset, trackerlist trackers = [] #dataset_name = 'lasot_extension_subset' dataset_name = 'lasot' """stark""" # trackers.extend(trackerlist(name='stark_s', parameter_name='baseline', dataset_name=dataset_name, # run_ids=None, display_name='STARK-S50')) # trackers.extend(trackerlist(name='stark_st', parameter_name='baseline', dataset_name=dataset_name, # run_ids=None, display_name='STARK-ST50')) # trackers.extend(trackerlist(name='stark_st', parameter_name='baseline_R101', dataset_name=dataset_name, # run_ids=None, display_name='STARK-ST101')) """TransT""" # trackers.extend(trackerlist(name='TransT_N2', parameter_name=None, dataset_name=None, # run_ids=None, display_name='TransT_N2', result_only=True)) # trackers.extend(trackerlist(name='TransT_N4', parameter_name=None, dataset_name=None, # run_ids=None, display_name='TransT_N4', result_only=True)) """pytracking""" # trackers.extend(trackerlist('atom', 'default', None, range(0,5), 'ATOM')) # trackers.extend(trackerlist('dimp', 'dimp18', None, range(0,5), 'DiMP18')) # trackers.extend(trackerlist('dimp', 'dimp50', None, range(0,5), 'DiMP50')) # trackers.extend(trackerlist('dimp', 'prdimp18', None, range(0,5), 'PrDiMP18')) # trackers.extend(trackerlist('dimp', 'prdimp50', None, range(0,5), 'PrDiMP50')) """ostrack""" trackers.extend(trackerlist(name='ostrack', parameter_name='finetune_384', dataset_name=dataset_name, run_ids=None, display_name='OSTrack256')) #trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_384_mae_ce_32x4_ep300', dataset_name=dataset_name, #. run_ids=None, display_name='OSTrack384')) dataset = get_dataset(dataset_name) # dataset = get_dataset('otb', 'nfs', 'uav', 'tc128ce') # plot_results(trackers, dataset, 'OTB2015', merge_results=True, plot_types=('success', 'norm_prec'), # skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05) print_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'norm_prec', 'prec')) # print_results(trackers, dataset, 'UNO', merge_results=True, plot_types=('success', 'prec')) ================================================ FILE: artrackv2_mindspore/tracking/analysis_results_ITP.py ================================================ import _init_paths import argparse from lib.test.analysis.plot_results import print_results from lib.test.evaluation import get_dataset, trackerlist def parse_args(): """ args for evaluation. """ parser = argparse.ArgumentParser(description='Parse args for training') # for train parser.add_argument('--script', type=str, help='training script name') parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name') args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() trackers = [] trackers.extend(trackerlist(args.script, args.config, "None", None, args.config)) dataset = get_dataset('lasot') print_results(trackers, dataset, 'LaSOT', merge_results=True, plot_types=('success', 'prec', 'norm_prec')) ================================================ FILE: artrackv2_mindspore/tracking/convert_transt.py ================================================ import _init_paths import os from lib.test.evaluation import get_dataset import shutil trackers = [] # dataset_name = 'uav' dataset_name = 'nfs' root_dir = "/data/sda/v-yanbi/iccv21/STARK_Latest/Stark" base_dir = os.path.join(root_dir, "test/tracking_results/TransT_N2") dataset = get_dataset(dataset_name) for x in dataset: seq_name = x.name file_name = "%s.txt" % (seq_name.replace("nfs_", "")) file_path = os.path.join(base_dir, file_name) file_path_new = os.path.join(base_dir, "%s.txt" % seq_name) if os.path.exists(file_path): shutil.move(file_path, file_path_new) ================================================ FILE: artrackv2_mindspore/tracking/create_default_local_file.py ================================================ import argparse import os import _init_paths from lib.train.admin import create_default_local_file_ITP_train from lib.test.evaluation import create_default_local_file_ITP_test def parse_args(): parser = argparse.ArgumentParser(description='Create default local file on ITP or PAI') parser.add_argument("--workspace_dir", type=str, required=True) # workspace dir parser.add_argument("--data_dir", type=str, required=True) parser.add_argument("--save_dir", type=str, required=True) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() workspace_dir = os.path.realpath(args.workspace_dir) data_dir = os.path.realpath(args.data_dir) save_dir = os.path.realpath(args.save_dir) create_default_local_file_ITP_train(workspace_dir, data_dir) create_default_local_file_ITP_test(workspace_dir, data_dir, save_dir) ================================================ FILE: artrackv2_mindspore/tracking/download_pytracking_results.py ================================================ import os import sys import gdown import re import shutil import argparse import tempfile import _init_paths from lib.test.evaluation.environment import env_settings pytracking_results_link_dict = { "dimp": { "prdimp50_003.zip": "1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc", "prdimp50_002.zip": "1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz", "prdimp50_001.zip": "17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS", "prdimp50_000.zip": "1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6", "prdimp18_004.zip": "1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM", "prdimp18_003.zip": "1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO", "prdimp18_002.zip": "17lMllYhygCqgE81DoHX4BZar3xc3auzM", "prdimp18_001.zip": "1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G", "prdimp18_000.zip": "1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z", "prdimp50_004.zip": "1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO", "dimp50_004.zip": "1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa", "dimp50_000.zip": "1LCgf5sg453Z4bY37A_W5mbXeG68U1fET", "dimp18_000.zip": "17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g", "dimp18_001.zip": "1AsiliVgISyDTouYOQYVOXA0srj3YskhJ", "dimp18_002.zip": "1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y", "dimp50_001.zip": "1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K", "dimp18_004.zip": "1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg", "dimp18_003.zip": "1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8", "dimp50_003.zip": "1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy", "dimp50_002.zip": "1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4", }, "atom": { "default_004.zip": "1BapnQh_8iRM44DXj862eOZV4q8zQLdmT", "default_003.zip": "1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5", "default_000.zip": "1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5", "default_002.zip": "1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC", "default_001.zip": "1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt", }, "kys": { "default_004.zip": "1QdfkA3d4MzKwdDiBOM1ZhDJWk9NmALxD", "default_000.zip": "1SCs79_ePTc8zxPDzRAgAmbbRlnmE89SN", "default_003.zip": "1TCzq38QW4YiMrgU5VR6NAEefJ85gwzfT", "default_002.zip": "1_9u1ybCFxHu0yJmW5ZzDR4-isJMEUsDf", "default_001.zip": "1utJhdosNj6vlI75dfzUxGM3Vy8OjWslT", }, } def _download_file(file_id, path): link = 'https://drive.google.com/uc?id=' + file_id gdown.download(link, path, quiet=True) def download_results(download_path, trackers='pytracking'): """ Script to automatically download tracker results for PyTracking. args: download_path - Directory where the zipped results are downloaded trackers - Tracker results which are to be downloaded. If set to 'pytracking', results for all pytracking based trackers will be downloaded. If set to 'external', results for available external trackers will be downloaded. If set to 'all', all available results are downloaded. If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded. Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are downloaded. The value can be set to either 'all', in which case all available results for the tracker are downloaded. Else the value should be a list of parameter file names. """ print('Using download path ''{}'''.format(download_path)) os.makedirs(download_path, exist_ok=True) if isinstance(trackers, str): if trackers == 'all': all_trackers = list(pytracking_results_link_dict.keys()) + list(external_results_link_dict.keys()) trackers = {k: 'all' for k in all_trackers} elif trackers == 'pytracking': trackers = {k: 'all' for k in pytracking_results_link_dict.keys()} elif trackers == 'external': trackers = {k: 'all' for k in external_results_link_dict.keys()} elif trackers in pytracking_results_link_dict or trackers in external_results_link_dict: trackers = {trackers: 'all'} else: raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict') elif isinstance(trackers, dict): pass else: raise Exception('tracker_list must be set to ''all'', or be a dict') common_link_dict = pytracking_results_link_dict # for k, v in external_results_link_dict.items(): # common_link_dict[k] = v for trk, runfiles in trackers.items(): trk_path = os.path.join(download_path, trk) if not os.path.exists(trk_path): os.makedirs(trk_path) if runfiles == 'all': for params, fileid in common_link_dict[trk].items(): print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) elif isinstance(runfiles, (list, tuple)): for p in runfiles: for params, fileid in common_link_dict[trk].items(): if re.match(r'{}(|_(\d\d\d)).zip'.format(p), params) is not None: print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) else: raise Exception('tracker_list values must either be set to ''all'', or be a list of param names') def unpack_tracking_results(download_path, output_path=None): """ Unpacks zipped benchmark results. The directory 'download_path' should have the following structure - root - tracker1 - param1.zip - param2.zip . . - tracker2 - param1.zip - param2.zip . . args: download_path - Path to the directory where the zipped results are stored output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path by default """ if output_path is None: output_path = env_settings().results_path if not os.path.exists(output_path): os.makedirs(output_path) trackers = os.listdir(download_path) for t in trackers: runfiles = os.listdir(os.path.join(download_path, t)) for r in runfiles: save_path = os.path.join(output_path, t) if not os.path.exists(save_path): os.makedirs(save_path) shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip') def main(): parser = argparse.ArgumentParser(description='Download and unpack zipped results') parser.add_argument('--tracker', type=str, default='pytracking', help='Name of tracker results to download, or "pytracking" (downloads results for PyTracking' ' based trackers, or "external" (downloads results for external trackers) or "all"') parser.add_argument('--output_path', type=str, default=None, help='Path to the directory where the results will be unpacked.') parser.add_argument('--temp_download_path', type=str, default=None, help='Temporary path used for downloading the Zip files.') parser.add_argument('--download', type=bool, default=True, help='Whether to download results or unpack existing downloaded files.') args = parser.parse_args() download_path = args.temp_download_path if download_path is None: download_path = '{}/pytracking_results/'.format(tempfile.gettempdir()) if args.download: download_results(download_path, args.tracker) unpack_tracking_results(download_path, args.output_path) if __name__ == '__main__': main() ================================================ FILE: artrackv2_mindspore/tracking/pre_read_datasets.py ================================================ import _init_paths import multiprocessing as mp import argparse import os from lib.utils.lmdb_utils import decode_str import time import json def parse_args(): """ args for training. """ parser = argparse.ArgumentParser(description='Parse args for training') parser.add_argument('--data_dir', type=str, help='directory where lmdb data is located') parser.add_argument('--dataset_str', type=str, help="which datasets to use") args = parser.parse_args() return args def get_trknet_dict(trknet_dir): with open(os.path.join(trknet_dir, "seq_list.json"), "r") as f: seq_list = json.loads(f.read()) res_dict = {} set_idx_pre = -1 for set_idx, seq_name in seq_list: if set_idx != set_idx_pre: res_dict[set_idx] = "anno/%s.txt" % seq_name set_idx_pre = set_idx return res_dict def target(lmdb_dir, key_name): _ = decode_str(lmdb_dir, key_name) if __name__ == "__main__": args = parse_args() data_dir = args.data_dir dataset_str = args.dataset_str key_dict = {"got10k_lmdb": "train/list.txt", "lasot_lmdb": "LaSOTBenchmark.json", "coco_lmdb": "annotations/instances_train2017.json", "vid_lmdb": "cache.json"} print("Ready to pre load datasets") start = time.time() ps = [] datasets = [] if 'g' in dataset_str: datasets.append("got10k_lmdb") if 'l' in dataset_str: datasets.append("lasot_lmdb") if 'c' in dataset_str: datasets.append("coco_lmdb") if 'v' in dataset_str: datasets.append("vid_lmdb") for dataset in datasets: lmdb_dir = os.path.join(data_dir, dataset) p = mp.Process(target=target, args=(lmdb_dir, key_dict[dataset])) print("add %s %s to job queue" % (lmdb_dir, key_dict[dataset])) ps.append(p) # deal with trackingnet if 't' in dataset_str: trknet_dict = get_trknet_dict(os.path.join(data_dir, "trackingnet_lmdb")) for set_idx, seq_path in trknet_dict.items(): lmdb_dir = os.path.join(data_dir, "trackingnet_lmdb", "TRAIN_%d_lmdb" % set_idx) p = mp.Process(target=target, args=(lmdb_dir, seq_path)) print("add %s %s to job queue" % (lmdb_dir, seq_path)) ps.append(p) for p in ps: p.start() for p in ps: p.join() print("Pre read over") end = time.time() hour = (end - start) / 3600 print("it takes %.2f hours to pre-read data" % hour) ================================================ FILE: artrackv2_mindspore/tracking/profile_model.py ================================================ import os import sys prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) import argparse import torch from lib.utils.misc import NestedTensor from thop import profile from thop.utils import clever_format import time import importlib def parse_args(): """ args for training. """ parser = argparse.ArgumentParser(description='Parse args for training') # for train parser.add_argument('--script', type=str, default='ostrack', choices=['ostrack'], help='training script name') parser.add_argument('--config', type=str, default='2stage_256_got', help='yaml configure file name') args = parser.parse_args() return args def evaluate_vit(model, template, search, seq_input, stage): '''Speed Test''' #macs1, params1 = profile(model, inputs=(template, search), # custom_ops=None, verbose=False) #macs, params = clever_format([macs1, params1], "%.3f") #print('overall macs is ', macs) #print('overall params is ', params) T_w = 500 T_t = 1000 print("testing speed ...") torch.cuda.synchronize() with torch.no_grad(): # overall for i in range(T_w): _ = model(template, search, seq_input=seq_input, stage=stage) start = time.time() for i in range(T_t): _ = model(template, search, seq_input=seq_input, stage=stage) torch.cuda.synchronize() end = time.time() avg_lat = (end - start) / T_t print("The average overall latency is %.2f ms" % (avg_lat * 1000)) print("FPS is %.2f fps" % (1. / avg_lat)) # for i in range(T_w): # _ = model(template, search) # start = time.time() # for i in range(T_t): # _ = model(template, search) # end = time.time() # avg_lat = (end - start) / T_t # print("The average backbone latency is %.2f ms" % (avg_lat * 1000)) def evaluate_vit_separate(model, template, search): '''Speed Test''' T_w = 50 T_t = 1000 print("testing speed ...") z = model.forward_backbone(template, image_type='template') x = model.forward_backbone(search, image_type='search') with torch.no_grad(): # overall for i in range(T_w): _ = model.forward_backbone(search, image_type='search') _ = model.forward_cat(z, x) start = time.time() for i in range(T_t): _ = model.forward_backbone(search, image_type='search') _ = model.forward_cat(z, x) end = time.time() avg_lat = (end - start) / T_t print("The average overall latency is %.2f ms" % (avg_lat * 1000)) def get_data(bs, sz): img_patch = torch.randn(bs, 3, sz, sz) att_mask = torch.rand(bs, sz, sz) > 0.5 return NestedTensor(img_patch, att_mask) if __name__ == "__main__": device = "cuda:1" torch.cuda.set_device(device) # Compute the Flops and Params of our STARK-S model args = parse_args() '''update cfg''' yaml_fname = 'experiments/%s/%s.yaml' % (args.script, args.config) config_module = importlib.import_module('lib.config.%s.config' % args.script) cfg = config_module.cfg config_module.update_config_from_file(yaml_fname) print(cfg) '''set some values''' bs = 1 z_sz = cfg.TEST.TEMPLATE_SIZE x_sz = cfg.TEST.SEARCH_SIZE print(x_sz) print(z_sz) if args.script == "ostrack": model_module = importlib.import_module('lib.models') model_constructor = model_module.build_ostrack model = model_constructor(cfg, training=False) # get the template and search template = torch.randn(bs, 2, 3, z_sz, z_sz) search = torch.randn(bs, 3, x_sz, x_sz) # transfer to device model = model.to(device) model = model.eval() template = template.to(device) search = search.to(device) merge_layer = cfg.MODEL.BACKBONE.MERGE_LAYER #seq_input = torch.Tensor([[1]]) #seq_input = torch.Tensor([[1,2,3,4,5]]) #seq_input = torch.Tensor([[5,6,7,8,9,10,11,12,13,14,15,16,17,1,2,3]]) seq_input = torch.Tensor([[5,6,7,8,9,10,11,12,13,14,15,16,1,2,3,4,17,18,19,20]]).to(device).repeat(bs, 1) stage = "doit" #stage = None if merge_layer <= 0: evaluate_vit(model, template, search, seq_input, stage) else: evaluate_vit_separate(model, template, search) else: raise NotImplementedError ================================================ FILE: artrackv2_mindspore/tracking/test.py ================================================ import os import sys import argparse import mindspore as ms from mindspore import context import mindspore prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from lib.test.evaluation import get_dataset from lib.test.evaluation.running import run_dataset from lib.test.evaluation.tracker import Tracker def run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0, num_gpus=8): """Run tracker on sequence or dataset. args: tracker_name: Name of tracking method. tracker_param: Name of parameter file. run_id: The run id. dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot). sequence: Sequence number or name. debug: Debug level. threads: Number of threads. """ dataset = get_dataset(dataset_name) if sequence is not None: dataset = [dataset[sequence]] trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)] run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus) def main(): context.set_context(device_target="GPU") mindspore.run_check() parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.') parser.add_argument('tracker_name', type=str, help='Name of tracking method.') parser.add_argument('tracker_param', type=str, help='Name of config file.') parser.add_argument('--runid', type=int, default=None, help='The run id.') parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).') parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.') parser.add_argument('--debug', type=int, default=0, help='Debug level.') parser.add_argument('--threads', type=int, default=0, help='Number of threads.') parser.add_argument('--num_gpus', type=int, default=8) args = parser.parse_args() try: seq_name = int(args.sequence) except: seq_name = args.sequence run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug, args.threads, num_gpus=args.num_gpus) if __name__ == '__main__': main() ================================================ FILE: artrackv2_mindspore/tracking/test_exp.py ================================================ import os import sys import argparse prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from lib.test.evaluation import get_dataset from lib.test.evaluation.running import run_dataset from lib.test.evaluation.tracker import Tracker def run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0, num_gpus=8): """Run tracker on sequence or dataset. args: tracker_name: Name of tracking method. tracker_param: Name of parameter file. run_id: The run id. dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot). sequence: Sequence number or name. debug: Debug level. threads: Number of threads. """ dataset = get_dataset(*dataset_name) if sequence is not None: dataset = [dataset[sequence]] trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)] run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus) def main(): parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.') parser.add_argument('tracker_name', type=str, help='Name of tracking method.') parser.add_argument('tracker_param', type=str, help='Name of config file.') parser.add_argument('--runid', type=int, default=None, help='The run id.') parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).') parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.') parser.add_argument('--debug', type=int, default=0, help='Debug level.') parser.add_argument('--threads', type=int, default=0, help='Number of threads.') parser.add_argument('--num_gpus', type=int, default=8) args = parser.parse_args() try: seq_name = int(args.sequence) except: seq_name = args.sequence args.dataset_name = ['trackingnet', 'got10k_test', 'lasot'] run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug, args.threads, num_gpus=args.num_gpus) if __name__ == '__main__': main() ================================================ FILE: artrackv2_mindspore/tracking/train.py ================================================ import os import argparse import random import torch def parse_args(): """ args for training. """ parser = argparse.ArgumentParser(description='Parse args for training') # for train parser.add_argument('--script', type=str, help='training script name') parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name') parser.add_argument('--save_dir', type=str, help='root directory to save checkpoints, logs, and tensorboard') parser.add_argument('--mode', type=str, choices=["single", "multiple", "multi_node"], default="multiple", help="train on single gpu or multiple gpus") parser.add_argument('--nproc_per_node', type=int, help="number of GPUs per node") # specify when mode is multiple parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0) # whether datasets are in lmdb format parser.add_argument('--script_prv', type=str, help='training script name') parser.add_argument('--config_prv', type=str, default='baseline', help='yaml configure file name') parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0) # whether to use wandb # for knowledge distillation parser.add_argument('--distill', type=int, choices=[0, 1], default=0) # whether to use knowledge distillation parser.add_argument('--script_teacher', type=str, help='teacher script name') parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name') # for multiple machines parser.add_argument('--rank', type=int, help='Rank of the current process.') parser.add_argument('--world-size', type=int, help='Number of processes participating in the job.') parser.add_argument('--ip', type=str, default='127.0.0.1', help='IP of the current rank 0.') parser.add_argument('--port', type=int, default='20000', help='Port of the current rank 0.') args = parser.parse_args() return args def main(): torch.set_num_threads(8) args = parse_args() if args.mode == "single": train_cmd = "python lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d " \ "--script_prv %s --config_prv %s --distill %d --script_teacher %s --config_teacher %s --use_wandb %d"\ % (args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.distill, args.script_teacher, args.config_teacher, args.use_wandb) elif args.mode == "multiple": train_cmd = "python -m torch.distributed.launch --nproc_per_node %d --master_port %d lib/train/run_training.py " \ "--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d " \ "--distill %d --script_teacher %s --config_teacher %s" \ % (args.nproc_per_node, random.randint(10000, 50000), args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb, args.distill, args.script_teacher, args.config_teacher) elif args.mode == "multi_node": train_cmd = "python -m torch.distributed.launch --nproc_per_node %d --master_addr %s --master_port %d --nnodes %d --node_rank %d lib/train/run_training.py " \ "--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d " \ "--distill %d --script_teacher %s --config_teacher %s" \ % (args.nproc_per_node, args.ip, args.port, args.world_size, args.rank, args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb, args.distill, args.script_teacher, args.config_teacher) else: raise ValueError("mode should be 'single' or 'multiple'.") os.system(train_cmd) if __name__ == "__main__": main() ================================================ FILE: artrackv2_mindspore/tracking/video_demo.py ================================================ import os import sys import argparse prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from lib.test.evaluation import Tracker def run_video(tracker_name, tracker_param, videofile, optional_box=None, debug=None, save_results=False): """Run the tracker on your webcam. args: tracker_name: Name of tracking method. tracker_param: Name of parameter file. debug: Debug level. """ tracker = Tracker(tracker_name, tracker_param, "video") tracker.run_video(videofilepath=videofile, optional_box=optional_box, debug=debug, save_results=save_results) def main(): parser = argparse.ArgumentParser(description='Run the tracker on your webcam.') parser.add_argument('tracker_name', type=str, help='Name of tracking method.') parser.add_argument('tracker_param', type=str, help='Name of parameter file.') parser.add_argument('videofile', type=str, help='path to a video file.') parser.add_argument('--optional_box', type=float, default=None, nargs="+", help='optional_box with format x y w h.') parser.add_argument('--debug', type=int, default=0, help='Debug level.') parser.add_argument('--save_results', dest='save_results', action='store_true', help='Save bounding boxes') parser.set_defaults(save_results=False) args = parser.parse_args() run_video(args.tracker_name, args.tracker_param, args.videofile, args.optional_box, args.debug, args.save_results) if __name__ == '__main__': main() ================================================ FILE: artrackv2_mindspore/tracking/vis_results.py ================================================ import os import sys import time import torch import numpy as np import cv2 as cv from tqdm import tqdm from lib.vis.visdom_cus import Visdom env_path = os.path.join(os.path.dirname(__file__), '../lib') if env_path not in sys.path: sys.path.append(env_path) from lib.test.evaluation import trackerlist, get_dataset from lib.test.utils.load_text import load_text class VisResults(object): def __init__(self): self._init_visdom() def vis_dataset(self, dataset, trackers, skip_missing_seq=False, seq_list=[]): for seq_id, seq in enumerate(tqdm(dataset)): # Load anno seq_name = seq.name if seq_list: if seq_name not in seq_list: continue anno_bb = torch.tensor(seq.ground_truth_rect) target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None all_pred_boxes = [] for trk_id, trk in enumerate(trackers): # Load results base_results_path = '{}/{}'.format(trk.results_dir, seq.name) results_path = '{}.txt'.format(base_results_path) if os.path.isfile(results_path): pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\t', ','), dtype=np.float64)) all_pred_boxes.append(pred_bb) else: if skip_missing_seq: break else: raise Exception('Result not found. {}'.format(results_path)) frame_list = seq.frames for i in range(len(anno_bb)): data = [] frame = frame_list[i] im = cv.imread(frame) im = cv.cvtColor(im, cv.COLOR_BGR2RGB) # im = torch.from_numpy(im).float().permute(2, 0, 1) # im = im.numpy() data.append(im) gt_box = anno_bb[i] data.append(gt_box) for tracker_result in all_pred_boxes: data.append(tracker_result[i]) while self.pause_mode: if self.step: self.step = False break if self.next_seq: self.next_seq = False break self.update_boxes(data, seq_name + '-' + str(i).zfill(3)) # self.update_seg_result(im, frame) def update_boxes(self, data, caption): caption = 'Green: GT, Red: stark_s, Yellow: stark_motion _' + caption self.visdom.register(data, 'Tracking', 1, 'Tracking', caption=caption) def update_seg_result(self, frame_img, frame_path): seg_mask_path = os.path.join(os.path.dirname(frame_path), 'seg_mask', os.path.basename(frame_path).replace('jpg', 'png')) seg_mask = cv.imread(seg_mask_path) alpha = 0.5 out_img = (alpha * frame_img) + ((1 - alpha) * seg_mask) if max(out_img.shape) > 480: resize_factor = 480.0 / float(max(out_img.shape)) out_img = cv.resize(out_img, None, fx=resize_factor, fy=resize_factor) out_img = torch.from_numpy(out_img).float().permute(2, 0, 1) self.visdom.register(out_img, 'image', 1, 'Segmentation Result') def _init_visdom(self, visdom_info=None): visdom_info = {} if visdom_info is None else visdom_info self.pause_mode = False self.step = False self.next_seq = False try: self.visdom = Visdom(1, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'}, visdom_info=visdom_info, env='vis_results') # Show help help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \ 'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \ 'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \ 'block list.' self.visdom.register(help_text, 'text', 1, 'Help') except: time.sleep(0.5) print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n' '!!! Start Visdom in a separate terminal window by typing \'visdom\' !!!') def _visdom_ui_handler(self, data): if data['event_type'] == 'KeyPress': if data['key'] == ' ': self.pause_mode = not self.pause_mode elif data['key'] == 'n': self.next_seq = True elif data['key'] == 'ArrowRight' and self.pause_mode: self.step = True if __name__ == '__main__': viser = VisResults() dataset_name = 'lasot' trackers = [] # trackers.extend(trackerlist('defor_stark_s', 'baseline_got10k_only', None, 'defor_stark')) # trackers.extend(trackerlist('stark_s', 'baseline_got10k_only', None, 'stark')) # trackers.extend(trackerlist('dimp', 'dimp50', dataset_name, None, 'DiMP50')) # trackers.extend(trackerlist('sa', 'attn_direct', None, 'SA')) # ori # trackers.extend(trackerlist('sa', 'attn1', None, 'SA')) # extrat conv to conver backbone feature from 1024 to 32 # trackers.extend(trackerlist('sa', 'attn_segaddlabel', None, 'SA')) # add label to seg mask # trackers.extend(trackerlist('stark_s', 'baseline_got10k_only_encoder_only_ep500', None, 'STARK-S-Encoder-EP500')) # trackers.extend(trackerlist('stark_motion', 'baseline_got10k_only_ep100_002', None, 'STARK-motion-EP100')) # trackers.extend( # trackerlist('stark_motion', 'baseline_got10k_only_offset_ep100_001', None, 'STARK-motion-offset-EP100')) # trackers.extend(trackerlist('stark_s', 'baseline', dataset_name, None, 'stark_s')) # trackers.extend(trackerlist('stark_st', 'baseline', dataset_name, None, 'stark_st')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, None, 'baseline')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, 17, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, 32, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, 36, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, None, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, 6, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, 55, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, 57, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_upsample_syncbn', dataset_name, None, 'baseline_roi_upsample_syncbn')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset, 9, 'update_template1')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ciou', dataset_name, None, 'update_template')) # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ciou', dataset_name, 1, 'update_template')) trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep500', dataset_name, None, 'baseline_roi_ep500')) trackers.extend(trackerlist('vit_tracker', 'cait_small_224_ep300', dataset_name, None, 'cait_small_224_ep300')) trackers.extend(trackerlist('vit_tracker', 'cait_small_224_fcos_new_32x4_ep300', dataset_name, None, 'cait_small_224_fcos_new_32x4_ep300')) trackers.extend(trackerlist('vit_tracker', 'cait_small_224_fcos_cn_32x4_ep300_test_cn', dataset_name, None, 'cait_small_224_fcos_cn_32x4_ep300_test_cn')) # trackers.extend(trackerlist('vit_tracker', 'cait_small_224_vfloss_64x2_ep100', dataset_name, None, 'cait_small_224_vfloss_64x2_ep100')) # trackers.extend(trackerlist('vit_tracker', 'cait_small_224_fcos_cn_32x4_ep300_retest', dataset_name, None, 'retest')) dataset = get_dataset(dataset_name) # trackers.extend(trackerlist('dimp', 'dimp50', None, 'DiMP50')) # trackers.extend(trackerlist('sa', 'attn_direct', None, 'SA')) # dataset = get_dataset('vot') # 'GOT-10k_Train_001350' viser.vis_dataset(dataset, trackers, seq_list=[]) # viser.vis_dataset(dataset, trackers, seq_list=['GOT-10k_Train_007446']) ================================================ FILE: experiments/artrack/artrack_256_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 50 EPOCH: 240 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 192 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 240 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrack/artrack_256_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3.0 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 50 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 96 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 120 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrack/artrack_384_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 600 RANGE: 2 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 48 EPOCH: 240 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 192 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 240 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: experiments/artrack/artrack_large_384_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 60000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 600 RANGE: 2 PRETRAIN_FILE: "mae_pretrain_vit_large.pth" BACKBONE: TYPE: vit_large_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 1024 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 15 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 96 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 120 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: experiments/artrack_seq/artrack_seq_256_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 36 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 60 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000004 LR_DROP_EPOCH: 999 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 60 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrack_seq/artrack_seq_256_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 36 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 30 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000004 LR_DROP_EPOCH: 999 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 30 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrack_seq/artrack_seq_384_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 32 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 600 RANGE: 2 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 60 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000004 LR_DROP_EPOCH: 999 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 60 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: experiments/artrack_seq/artrack_seq_large_384_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 15 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 600 RANGE: 2 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_large.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_large_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 1024 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 60 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000004 LR_DROP_EPOCH: 999 NUM_WORKER: 4 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 60 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: experiments/artrackv2/artrackv2_256_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 NUMBER: 2 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - COCO17 - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 - 1 SAMPLE_PER_EPOCH: 76800 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 EXTENSION: 3 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 32 EPOCH: 300 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 240 NUM_WORKER: 6 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 118 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrackv2/artrackv2_256_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 NUMBER: 2 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 76800 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 EXTENSION: 3 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 32 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 96 NUM_WORKER: 6 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 118 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrackv2/artrackv2_large_384_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 1 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 NUMBER: 2 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 57600 VAL: DATASETS_NAME: - GOT10K_votval DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 600 RANGE: 2 EXTENSION: 6 PRETRAIN_FILE: "mae_pretrain_vit_large.pth" EXTRA_MERGER: False RETURN_INTER: False BACKBONE: TYPE: vit_large_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 1024 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 12 EPOCH: 100 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.000008 LR_DROP_EPOCH: 80 NUM_WORKER: 8 OPTIMIZER: ADAMW PRINT_INTERVAL: 10 SCHEDULER: TYPE: step DECAY_RATE: 0.1 VAL_EPOCH_INTERVAL: 20 WEIGHT_DECAY: 0.0001 AMP: False TEST: EPOCH: 100 SEARCH_FACTOR: 5.0 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: experiments/artrackv2_seq/artrackv2_seq_256_full.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 24 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 NUMBER: 2 TRAIN: DATASETS_NAME: - LASOT - GOT10K_vottrain - TRACKINGNET DATASETS_RATIO: - 1 - 1 - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 EXTENSION: 3 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False DECODER: TYPE: "mask" MASK_RATIO: 0.75 EMBEDDIM: 512 DEPTH: 8 NUMHEADS: 16 MLPRATIO: 4 BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 40 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 999 NUM_WORKER: 6 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.05 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 40 SEARCH_FACTOR: 4.0 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrackv2_seq/artrackv2_seq_256_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 3 FACTOR: 4.0 SCALE_JITTER: 0.25 SIZE: 256 NUMBER: 24 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 128 NUMBER: 2 TRAIN: DATASETS_NAME: - GOT10K_train_full DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 400 RANGE: 2 EXTENSION: 3 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_base.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False DECODER: TYPE: "mask" MASK_RATIO: 0.75 EMBEDDIM: 512 DEPTH: 8 NUMHEADS: 16 MLPRATIO: 4 BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 768 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 30 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 999 NUM_WORKER: 6 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.05 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 30 SEARCH_FACTOR: 3.95 SEARCH_SIZE: 256 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 128 ================================================ FILE: experiments/artrackv2_seq/artrackv2_seq_large_384_got.yaml ================================================ DATA: MAX_SAMPLE_INTERVAL: 200 MEAN: - 0.485 - 0.456 - 0.406 SEARCH: CENTER_JITTER: 4.5 FACTOR: 5.0 SCALE_JITTER: 0.5 SIZE: 384 NUMBER: 12 STD: - 0.229 - 0.224 - 0.225 TEMPLATE: CENTER_JITTER: 0 FACTOR: 2.0 SCALE_JITTER: 0 SIZE: 192 NUMBER: 2 TRAIN: - LASOT - GOT10K_vottrain - TRACKINGNET - SAV DATASETS_RATIO: - 1 - 1 - 1 - 4 SAMPLE_PER_EPOCH: 1000 VAL: DATASETS_NAME: - GOT10K_official_val DATASETS_RATIO: - 1 SAMPLE_PER_EPOCH: 10000 MODEL: BINS: 600 RANGE: 2 EXTENSION: 6 PRENUM: 7 PRETRAIN_FILE: "mae_pretrain_vit_large.pth" PRETRAIN_PTH: "" EXTRA_MERGER: False RETURN_INTER: False DECODER: TYPE: "mask" MASK_RATIO: 0.75 EMBEDDIM: 512 DEPTH: 8 NUMHEADS: 16 MLPRATIO: 4 BACKBONE: TYPE: vit_base_patch16_224 STRIDE: 16 HEAD: TYPE: PIX NUM_CHANNELS: 1024 TRAIN: BACKBONE_MULTIPLIER: 0.1 DROP_PATH_RATE: 0.1 BATCH_SIZE: 8 EPOCH: 120 GIOU_WEIGHT: 2.0 L1_WEIGHT: 0.0 GRAD_CLIP_NORM: 0.1 LR: 0.00008 LR_DROP_EPOCH: 60 NUM_WORKER: 6 OPTIMIZER: ADAMW PRINT_INTERVAL: 1 SCHEDULER: TYPE: step DECAY_RATE: 0.05 VAL_EPOCH_INTERVAL: 10 WEIGHT_DECAY: 0.05 AMP: False TEST: EPOCH: 30 SEARCH_FACTOR: 4.55 SEARCH_SIZE: 384 TEMPLATE_FACTOR: 2.0 TEMPLATE_SIZE: 192 ================================================ FILE: external/AR/README.md ================================================ # Alpha-Refine ## Introduction Alpha-Refine is the winner of the VOT Real-Time Challenge 2020, which has great ability to predict high-quality masks. In this work, we combine the STARK tracker with Alpha-Refine to test on the VOT2020 benchamark. ## Installation After the environment has been installed according to the README.md of STARK, you only need to install a few more packages as shown below. * Install ninja-build for Precise ROI pooling ```bash sudo apt-get install ninja-build ``` In case of issues, we refer to https://github.com/vacancy/PreciseRoIPooling. * Install the Precise ROI pooling ``` cd ltr/external git clone https://github.com/vacancy/PreciseRoIPooling.git cd ../.. ``` * Add the project path to environment variables ``` export PYTHONPATH=:$PYTHONPATH ``` * Setup the environment Create the default environment setting files. ```bash # Environment settings for pytracking. Saved at pytracking/evaluation/local.py python -c "from pytracking.evaluation.environment import create_default_local_file; create_default_local_file()" # Environment settings for ltr. Saved at ltr/admin/local.py python -c "from ltr.admin.environment import create_default_local_file; create_default_local_file()" ``` You can modify these files to set the paths to datasets, results paths etc. * Download the pre-trained Alpha-Refine network Download the network for [Alpha-Refine](https://drive.google.com/open?id=1qOQRfaRMbQ2nmgX1NFjoQHfXOAn609QM) and put it under the ltr/checkpoints/ltr/ARcm_seg/ARcm_coco_seg_only_mask_384 dir. ================================================ FILE: external/AR/__init__.py ================================================ ================================================ FILE: external/AR/ltr/README.md ================================================ # LTR A general PyTorch based framework for learning tracking representations. ## Table of Contents * [Quick Start](#quick-start) * [Overview](#overview) * [Trackers](#trackers) * [PrDiMP](#PrDiMP) * [DiMP](#DiMP) * [ATOM](#ATOM) * [Training your own networks](#training-your-own-networks) ## Quick Start The installation script will automatically generate a local configuration file "admin/local.py". In case the file was not generated, run ```admin.environment.create_default_local_file()``` to generate it. Next, set the paths to the training workspace, i.e. the directory where the checkpoints will be saved. Also set the paths to the datasets you want to use. If all the dependencies have been correctly installed, you can train a network using the run_training.py script in the correct conda environment. ```bash conda activate pytracking python run_training.py train_module train_name ``` Here, ```train_module``` is the sub-module inside ```train_settings``` and ```train_name``` is the name of the train setting file to be used. For example, you can train using the included default ATOM settings by running: ```bash python run_training bbreg atom_default ``` ## Overview The framework consists of the following sub-modules. - [actors](actors): Contains the actor classes for different trainings. The actor class is responsible for passing the input data through the network can calculating losses. - [admin](admin): Includes functions for loading networks, tensorboard etc. and also contains environment settings. - [dataset](dataset): Contains integration of a number of training datasets, namely [TrackingNet](https://tracking-net.org/), [GOT-10k](http://got-10k.aitestunion.com/), [LaSOT](https://cis.temple.edu/lasot/), [ImageNet-VID](http://image-net.org/), [DAVIS](https://davischallenge.org), [YouTube-VOS](https://youtube-vos.org), [MS-COCO](http://cocodataset.org/#home), [SBD](http://home.bharathh.info/pubs/codes/SBD), [LVIS](https://www.lvisdataset.org), [ECSSD](http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html), [MSRA10k](https://mmcheng.net/msra10k), and [HKU-IS](https://sites.google.com/site/ligb86/hkuis). Additionally, it includes modules to generate synthetic videos from image datasets. - [data_specs](data_specs): Information about train/val splits of different datasets. - [data](data): Contains functions for processing data, e.g. loading images, data augmentations, sampling frames from videos. - [external](external): External libraries needed for training. Added as submodules. - [models](models): Contains different layers and network definitions. - [trainers](trainers): The main class which runs the training. - [train_settings](train_settings): Contains settings files, specifying the training of a network. ## Trackers The framework currently contains the training code for the following trackers. ### PrDiMP The following setting files can be used train the DiMP networks, or to know the exact training details. - [dimp.prdimp18](train_settings/dimp/prdimp18.py): The default settings used for training the PrDiMP model with ResNet-18 backbone. - [dimp.prdimp50](train_settings/dimp/prdimp50.py): The default settings used for training the PrDiMP model with ResNet-50 backbone. - [dimp.super_dimp](train_settings/dimp/super_dimp.py): Combines the bounding-box regressor of PrDiMP with the standard DiMP classifier and better training and inference settings. ### DiMP The following setting files can be used train the DiMP networks, or to know the exact training details. - [dimp.dimp18](train_settings/dimp/dimp18.py): The default settings used for training the DiMP model with ResNet-18 backbone. - [dimp.dimp50](train_settings/dimp/dimp50.py): The default settings used for training the DiMP model with ResNet-50 backbone. ### ATOM The following setting file can be used train the ATOM network, or to know the exact training details. - [bbreg.atom](train_settings/bbreg/atom_paper.py): The settings used in the paper for training the network in ATOM. - [bbreg.atom](train_settings/bbreg/atom.py): Newer settings used for training the network in ATOM, also utilizing the GOT10k dataset. - [bbreg.atom](train_settings/bbreg/atom_prob_ml.py): Settings for ATOM with the probabilistic bounding box regression proposed in [this paper](https://arxiv.org/abs/1909.12297). - [bbreg.atom](train_settings/bbreg/atom_paper.py): The baseline ATOM* setting evaluated in [this paper](https://arxiv.org/abs/1909.12297). ## Training your own networks To train a custom network using the toolkit, the following components need to be specified in the train settings. For reference, see [atom.py](train_settings/bbreg/atom.py). - Datasets: The datasets to be used for training. A number of standard tracking datasets are already available in ```dataset``` module. - Processing: This function should perform the necessary post-processing of the data, e.g. cropping of target region, data augmentations etc. - Sampler: Determines how the frames are sampled from a video sequence to form the batches. - Network: The network module to be trained. - Objective: The training objective. - Actor: The trainer passes the training batch to the actor who is responsible for passing the data through the network correctly, and calculating the training loss. - Optimizer: Optimizer to be used, e.g. Adam. - Trainer: The main class which runs the epochs and saves checkpoints. ================================================ FILE: external/AR/ltr/__init__.py ================================================ from .admin.loading import load_network from .admin.model_constructor import model_constructor from .admin.multigpu import MultiGPU ================================================ FILE: external/AR/ltr/actors/__init__.py ================================================ from .base_actor import BaseActor from .bbreg import AtomActor from .tracking import DiMPActor ================================================ FILE: external/AR/ltr/actors/base_actor.py ================================================ from pytracking import TensorDict class BaseActor: """ Base class for actor. The actor class handles the passing of the data through the network and calculation the loss""" def __init__(self, net, objective): """ args: net - The network to train objective - The loss function """ self.net = net self.objective = objective def __call__(self, data: TensorDict): """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and return the training stats for the input data args: data - A TensorDict containing all the necessary data blocks. returns: loss - loss for the input data stats - a dict containing detailed losses """ raise NotImplementedError def to(self, device): """ Move the network to device args: device - device to use. 'cpu' or 'cuda' """ self.net.to(device) def train(self, mode=True): """ Set whether the network is in train mode. args: mode (True) - Bool specifying whether in training mode. """ self.net.train(mode) def eval(self): """ Set network to eval mode""" self.train(False) ================================================ FILE: external/AR/ltr/actors/bbreg.py ================================================ from . import BaseActor class AtomActor(BaseActor): """ Actor for training the IoU-Net in ATOM""" def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals' and 'proposal_iou'. returns: loss - the training loss states - dict containing detailed losses """ # Run network to obtain IoU prediction for each proposal in 'test_proposals' iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals']) iou_pred = iou_pred.view(-1, iou_pred.shape[2]) iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2]) # Compute loss loss = self.objective(iou_pred, iou_gt) # Return training stats stats = {'Loss/total': loss.item(), 'Loss/iou': loss.item()} return loss, stats class AtomBBKLActor(BaseActor): """ Actor for training the IoU-Net in ATOM with BBKL""" def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals', 'proposal_density', and 'gt_density'. returns: loss - the training loss states - dict containing detailed losses """ # Run network to obtain IoU prediction for each proposal in 'test_proposals' bb_scores = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals']) bb_scores = bb_scores.view(-1, bb_scores.shape[2]) proposal_density = data['proposal_density'].view(-1, data['proposal_density'].shape[2]) gt_density = data['gt_density'].view(-1, data['gt_density'].shape[2]) # Compute loss loss = self.objective(bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1) # Return training stats stats = {'Loss/total': loss.item(), 'Loss/bb_ce': loss.item()} return loss, stats ================================================ FILE: external/AR/ltr/actors/tracking.py ================================================ from . import BaseActor import torch class DiMPActor(BaseActor): """Actor for training the DiMP network.""" def __init__(self, net, objective, loss_weight=None): super().__init__(net, objective) if loss_weight is None: loss_weight = {'iou': 1.0, 'test_clf': 1.0} self.loss_weight = loss_weight def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals', 'proposal_iou' and 'test_label'. returns: loss - the training loss stats - dict containing detailed losses """ # Run network target_scores, iou_pred = self.net(train_imgs=data['train_images'], test_imgs=data['test_images'], train_bb=data['train_anno'], test_proposals=data['test_proposals']) # Classification losses for the different optimization iterations clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores] # Loss of the final filter clf_loss_test = clf_losses_test[-1] loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test # Compute loss for ATOM IoUNet loss_iou = self.loss_weight['iou'] * self.objective['iou'](iou_pred, data['proposal_iou']) # Loss for the initial filter iteration loss_test_init_clf = 0 if 'test_init_clf' in self.loss_weight.keys(): loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0] # Loss for the intermediate filter iterations loss_test_iter_clf = 0 if 'test_iter_clf' in self.loss_weight.keys(): test_iter_weights = self.loss_weight['test_iter_clf'] if isinstance(test_iter_weights, list): loss_test_iter_clf = sum([a*b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])]) else: loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1]) # Total loss loss = loss_iou + loss_target_classifier + loss_test_init_clf + loss_test_iter_clf # Log stats stats = {'Loss/total': loss.item(), 'Loss/iou': loss_iou.item(), 'Loss/target_clf': loss_target_classifier.item()} if 'test_init_clf' in self.loss_weight.keys(): stats['Loss/test_init_clf'] = loss_test_init_clf.item() if 'test_iter_clf' in self.loss_weight.keys(): stats['Loss/test_iter_clf'] = loss_test_iter_clf.item() stats['ClfTrain/test_loss'] = clf_loss_test.item() if len(clf_losses_test) > 0: stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item() if len(clf_losses_test) > 2: stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2) return loss, stats class KLDiMPActor(BaseActor): """Actor for training the DiMP network.""" def __init__(self, net, objective, loss_weight=None): super().__init__(net, objective) if loss_weight is None: loss_weight = {'bb_ce': 1.0} self.loss_weight = loss_weight def __call__(self, data): """ args: data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno', 'test_proposals', 'proposal_iou' and 'test_label'. returns: loss - the training loss stats - dict containing detailed losses """ # Run network target_scores, bb_scores = self.net(train_imgs=data['train_images'], test_imgs=data['test_images'], train_bb=data['train_anno'], test_proposals=data['test_proposals']) # Reshape bb reg variables is_valid = data['test_anno'][:, :, 0] < 99999.0 bb_scores = bb_scores[is_valid, :] proposal_density = data['proposal_density'][is_valid, :] gt_density = data['gt_density'][is_valid, :] # Compute loss bb_ce = self.objective['bb_ce'](bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1) loss_bb_ce = self.loss_weight['bb_ce'] * bb_ce # If standard DiMP classifier is used loss_target_classifier = 0 loss_test_init_clf = 0 loss_test_iter_clf = 0 if 'test_clf' in self.loss_weight.keys(): # Classification losses for the different optimization iterations clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores] # Loss of the final filter clf_loss_test = clf_losses_test[-1] loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test # Loss for the initial filter iteration if 'test_init_clf' in self.loss_weight.keys(): loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0] # Loss for the intermediate filter iterations if 'test_iter_clf' in self.loss_weight.keys(): test_iter_weights = self.loss_weight['test_iter_clf'] if isinstance(test_iter_weights, list): loss_test_iter_clf = sum([a * b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])]) else: loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1]) # If PrDiMP classifier is used loss_clf_ce = 0 loss_clf_ce_init = 0 loss_clf_ce_iter = 0 if 'clf_ce' in self.loss_weight.keys(): # Classification losses for the different optimization iterations clf_ce_losses = [self.objective['clf_ce'](s, data['test_label_density'], grid_dim=(-2,-1)) for s in target_scores] # Loss of the final filter clf_ce = clf_ce_losses[-1] loss_clf_ce = self.loss_weight['clf_ce'] * clf_ce # Loss for the initial filter iteration if 'clf_ce_init' in self.loss_weight.keys(): loss_clf_ce_init = self.loss_weight['clf_ce_init'] * clf_ce_losses[0] # Loss for the intermediate filter iterations if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2: test_iter_weights = self.loss_weight['clf_ce_iter'] if isinstance(test_iter_weights, list): loss_clf_ce_iter = sum([a * b for a, b in zip(test_iter_weights, clf_ce_losses[1:-1])]) else: loss_clf_ce_iter = (test_iter_weights / (len(clf_ce_losses) - 2)) * sum(clf_ce_losses[1:-1]) # Total loss loss = loss_bb_ce + loss_clf_ce + loss_clf_ce_init + loss_clf_ce_iter + \ loss_target_classifier + loss_test_init_clf + loss_test_iter_clf if torch.isinf(loss) or torch.isnan(loss): raise Exception('ERROR: Loss was nan or inf!!!') # Log stats stats = {'Loss/total': loss.item(), 'Loss/bb_ce': bb_ce.item(), 'Loss/loss_bb_ce': loss_bb_ce.item()} if 'test_clf' in self.loss_weight.keys(): stats['Loss/target_clf'] = loss_target_classifier.item() if 'test_init_clf' in self.loss_weight.keys(): stats['Loss/test_init_clf'] = loss_test_init_clf.item() if 'test_iter_clf' in self.loss_weight.keys(): stats['Loss/test_iter_clf'] = loss_test_iter_clf.item() if 'clf_ce' in self.loss_weight.keys(): stats['Loss/clf_ce'] = loss_clf_ce.item() if 'clf_ce_init' in self.loss_weight.keys(): stats['Loss/clf_ce_init'] = loss_clf_ce_init.item() if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2: stats['Loss/clf_ce_iter'] = loss_clf_ce_iter.item() if 'test_clf' in self.loss_weight.keys(): stats['ClfTrain/test_loss'] = clf_loss_test.item() if len(clf_losses_test) > 0: stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item() if len(clf_losses_test) > 2: stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2) if 'clf_ce' in self.loss_weight.keys(): stats['ClfTrain/clf_ce'] = clf_ce.item() if len(clf_ce_losses) > 0: stats['ClfTrain/clf_ce_init'] = clf_ce_losses[0].item() if len(clf_ce_losses) > 2: stats['ClfTrain/clf_ce_iter'] = sum(clf_ce_losses[1:-1]).item() / (len(clf_ce_losses) - 2) return loss, stats ================================================ FILE: external/AR/ltr/admin/__init__.py ================================================ ================================================ FILE: external/AR/ltr/admin/environment.py ================================================ import importlib import os from collections import OrderedDict def create_default_local_file(): path = os.path.join(os.path.dirname(__file__), 'local.py') empty_str = '\'\'' default_settings = OrderedDict({ 'workspace_dir': empty_str, 'tensorboard_dir': 'self.workspace_dir + \'/tensorboard/\'', 'lasot_dir': empty_str, 'got10k_dir': empty_str, 'trackingnet_dir': empty_str, 'coco_dir': empty_str, 'lvis_dir': empty_str, 'sbd_dir': empty_str, 'imagenet_dir': empty_str, 'imagenetdet_dir': empty_str, 'ecssd_dir': empty_str, 'hkuis_dir': empty_str, 'msra10k_dir': empty_str, 'davis_dir': empty_str, 'youtubevos_dir': empty_str}) comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 'tensorboard_dir': 'Directory for tensorboard files.'} with open(path, 'w') as f: f.write('class EnvironmentSettings:\n') f.write(' def __init__(self):\n') for attr, attr_val in default_settings.items(): comment_str = None if attr in comment: comment_str = comment[attr] if comment_str is None: f.write(' self.{} = {}\n'.format(attr, attr_val)) else: f.write(' self.{} = {} # {}\n'.format(attr, attr_val, comment_str)) def env_settings(): env_module_name = 'ltr.admin.local' try: env_module = importlib.import_module(env_module_name) return env_module.EnvironmentSettings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file)) ================================================ FILE: external/AR/ltr/admin/loading.py ================================================ import torch import os import sys from pathlib import Path import importlib import inspect from ltr.admin import settings as ws_settings def load_trained_network(workspace_dir, network_path, checkpoint=None): """OUTDATED. Use load_pretrained instead!""" checkpoint_dir = os.path.join(workspace_dir, 'checkpoints') directory = '{}/{}'.format(checkpoint_dir, network_path) net, _ = load_network(directory, checkpoint) return net def load_pretrained(module, name, checkpoint=None, **kwargs): """Load a network trained using the LTR framework. This is useful when you want to initialize your new network with a previously trained model. args: module - Name of the train script module. I.e. the name of the folder in ltr/train_scripts. name - The name of the train_script. checkpoint - You can supply the checkpoint number or the full path to the checkpoint file (see load_network). **kwargs - These are passed to load_network (see that function). """ settings = ws_settings.Settings() network_dir = os.path.join(settings.env.workspace_dir, 'checkpoints', 'ltr', module, name) return load_network(network_dir=network_dir, checkpoint=checkpoint, **kwargs) def load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs): """Loads a network checkpoint file. Can be called in two different ways: load_checkpoint(network_dir): Loads the checkpoint file given by the path. If checkpoint_dir is a directory, it tries to find the latest checkpoint in that directory. load_checkpoint(network_dir, checkpoint=epoch_num): Loads the network at the given epoch number (int). The extra keyword arguments are supplied to the network constructor to replace saved ones. """ if network_dir is not None: net_path = Path(network_dir) else: net_path = None if net_path.is_file(): checkpoint = str(net_path) if checkpoint is None: # Load most recent checkpoint checkpoint_list = sorted(net_path.glob('*.pth.tar')) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No matching checkpoint file found') elif isinstance(checkpoint, int): # Checkpoint is the epoch number checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint))) if not checkpoint_list or len(checkpoint_list) == 0: raise Exception('No matching checkpoint file found') if len(checkpoint_list) > 1: raise Exception('Multiple matching checkpoint files found') else: checkpoint_path = checkpoint_list[0] elif isinstance(checkpoint, str): # Checkpoint is the path checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network checkpoint_dict = torch_load_legacy(checkpoint_path) # Construct network model if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None: net_constr = checkpoint_dict['constructor'] if constructor_fun_name is not None: net_constr.fun_name = constructor_fun_name if constructor_module is not None: net_constr.fun_module = constructor_module # Legacy networks before refactoring if net_constr.fun_module.startswith('dlframework.'): net_constr.fun_module = net_constr.fun_module[len('dlframework.'):] net_fun = getattr(importlib.import_module(net_constr.fun_module), net_constr.fun_name) net_fun_args = list(inspect.signature(net_fun).parameters.keys()) for arg, val in kwargs.items(): if arg in net_fun_args: net_constr.kwds[arg] = val else: print('WARNING: Keyword argument "{}" not found when loading network. It was ignored.'.format(arg)) net = net_constr.get() else: raise RuntimeError('No constructor for the given network.') net.load_state_dict(checkpoint_dict['net']) net.constructor = checkpoint_dict['constructor'] if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None: net.info = checkpoint_dict['net_info'] return net, checkpoint_dict def load_weights(net, path, strict=True): checkpoint_dict = torch.load(path) weight_dict = checkpoint_dict['net'] net.load_state_dict(weight_dict, strict=strict) return net def torch_load_legacy(path): """Load network with legacy environment.""" # Setup legacy env (for older networks) _setup_legacy_env() # Load network checkpoint_dict = torch.load(path, map_location='cpu') # Cleanup legacy _cleanup_legacy_env() return checkpoint_dict def _setup_legacy_env(): importlib.import_module('ltr') sys.modules['dlframework'] = sys.modules['ltr'] sys.modules['dlframework.common'] = sys.modules['ltr'] importlib.import_module('ltr.admin') sys.modules['dlframework.common.utils'] = sys.modules['ltr.admin'] for m in ('model_constructor', 'stats', 'settings', 'local'): importlib.import_module('ltr.admin.' + m) sys.modules['dlframework.common.utils.' + m] = sys.modules['ltr.admin.' + m] def _cleanup_legacy_env(): del_modules = [] for m in sys.modules.keys(): if m.startswith('dlframework'): del_modules.append(m) for m in del_modules: del sys.modules[m] ================================================ FILE: external/AR/ltr/admin/model_constructor.py ================================================ from functools import wraps import importlib def model_constructor(f): """ Wraps the function 'f' which returns the network. An extra field 'constructor' is added to the network returned by 'f'. This field contains an instance of the 'NetConstructor' class, which contains the information needed to re-construct the network, such as the name of the function 'f', the function arguments etc. Thus, the network can be easily constructed from a saved checkpoint by calling NetConstructor.get() function. """ @wraps(f) def f_wrapper(*args, **kwds): net_constr = NetConstructor(f.__name__, f.__module__, args, kwds) output = f(*args, **kwds) if isinstance(output, (tuple, list)): # Assume first argument is the network output[0].constructor = net_constr else: output.constructor = net_constr return output return f_wrapper class NetConstructor: """ Class to construct networks. Takes as input the function name (e.g. atom_resnet18), the name of the module which contains the network function (e.g. ltr.models.bbreg.atom) and the arguments for the network function. The class object can then be stored along with the network weights to re-construct the network.""" def __init__(self, fun_name, fun_module, args, kwds): """ args: fun_name - The function which returns the network fun_module - the module which contains the network function args - arguments which are passed to the network function kwds - arguments which are passed to the network function """ self.fun_name = fun_name self.fun_module = fun_module self.args = args self.kwds = kwds def get(self): """ Rebuild the network by calling the network function with the correct arguments. """ net_module = importlib.import_module(self.fun_module) net_fun = getattr(net_module, self.fun_name) return net_fun(*self.args, **self.kwds) ================================================ FILE: external/AR/ltr/admin/multigpu.py ================================================ import torch.nn as nn def is_multi_gpu(net): return isinstance(net, (MultiGPU, nn.DataParallel)) class MultiGPU(nn.DataParallel): """Wraps a network to allow simple multi-GPU training.""" def __getattr__(self, item): try: return super().__getattr__(item) except: pass return getattr(self.module, item) ================================================ FILE: external/AR/ltr/admin/settings.py ================================================ from ltr.admin.environment import env_settings class Settings: """ Training settings, e.g. the paths to datasets and networks.""" def __init__(self): self.set_default() def set_default(self): self.env = env_settings() self.use_gpu = True ================================================ FILE: external/AR/ltr/admin/stats.py ================================================ class StatValue: def __init__(self): self.clear() def reset(self): self.val = 0 def clear(self): self.reset() self.history = [] def update(self, val): self.val = val self.history.append(self.val) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.clear() self.has_new_data = False def reset(self): self.avg = 0 self.val = 0 self.sum = 0 self.count = 0 def clear(self): self.reset() self.history = [] def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def new_epoch(self): if self.count > 0: self.history.append(self.avg) self.reset() self.has_new_data = True else: self.has_new_data = False def topk_accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" single_input = not isinstance(topk, (tuple, list)) if single_input: topk = (topk,) maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0] res.append(correct_k * 100.0 / batch_size) if single_input: return res[0] return res ================================================ FILE: external/AR/ltr/admin/tensorboard.py ================================================ import os from collections import OrderedDict try: from torch.utils.tensorboard import SummaryWriter except: print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.') from tensorboardX import SummaryWriter class TensorboardWriter: def __init__(self, directory, loader_names): self.directory = directory self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names}) def write_info(self, module_name, script_name, description): tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info')) tb_info_writer.add_text('Modulet_name', module_name) tb_info_writer.add_text('Script_name', script_name) tb_info_writer.add_text('Description', description) tb_info_writer.close() def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1): for loader_name, loader_stats in stats.items(): if loader_stats is None: continue for var_name, val in loader_stats.items(): if hasattr(val, 'history') and getattr(val, 'has_new_data', True): self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch) ================================================ FILE: external/AR/ltr/data/__init__.py ================================================ from .loader import LTRLoader ================================================ FILE: external/AR/ltr/data/bounding_box_utils.py ================================================ import torch def rect_to_rel(bb, sz_norm=None): """Convert standard rectangular parametrization of the bounding box [x, y, w, h] to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate. args: bb - N x 4 tensor of boxes. sz_norm - [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given. """ c = bb[...,:2] + 0.5 * bb[...,2:] if sz_norm is None: c_rel = c / bb[...,2:] else: c_rel = c / sz_norm sz_rel = torch.log(bb[...,2:]) return torch.cat((c_rel, sz_rel), dim=-1) def rel_to_rect(bb, sz_norm=None): """Inverts the effect of rect_to_rel. See above.""" sz = torch.exp(bb[...,2:]) if sz_norm is None: c = bb[...,:2] * sz else: c = bb[...,:2] * sz_norm tl = c - 0.5 * sz return torch.cat((tl, sz), dim=-1) def masks_to_bboxes(mask, fmt='c'): """ Convert a mask tensor to one or more bounding boxes. Note: This function is a bit new, make sure it does what it says. /Andreas :param mask: Tensor of masks, shape = (..., H, W) :param fmt: bbox layout. 'c' => "center + size" or (x_center, y_center, width, height) 't' => "top left + size" or (x_left, y_top, width, height) 'v' => "vertices" or (x_left, y_top, x_right, y_bottom) :return: tensor containing a batch of bounding boxes, shape = (..., 4) """ batch_shape = mask.shape[:-2] mask = mask.reshape((-1, *mask.shape[-2:])) bboxes = [] for m in mask: mx = m.sum(dim=-2).nonzero() my = m.sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bboxes.append(bb) bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device) bboxes = bboxes.reshape(batch_shape + (4,)) if fmt == 'v': return bboxes x1 = bboxes[..., :2] s = bboxes[..., 2:] - x1 + 1 if fmt == 'c': return torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': return torch.cat((x1, s), dim=-1) raise ValueError("Undefined bounding box layout '%s'" % fmt) def masks_to_bboxes_multi(mask, ids, fmt='c'): assert mask.dim() == 2 bboxes = [] for id in ids: mx = (mask == id).sum(dim=-2).nonzero() my = (mask == id).float().sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bb = torch.tensor(bb, dtype=torch.float32, device=mask.device) x1 = bb[:2] s = bb[2:] - x1 + 1 if fmt == 'v': pass elif fmt == 'c': bb = torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': bb = torch.cat((x1, s), dim=-1) else: raise ValueError("Undefined bounding box layout '%s'" % fmt) bboxes.append(bb) return bboxes ================================================ FILE: external/AR/ltr/data/image_loader.py ================================================ import jpeg4py import cv2 as cv from PIL import Image import numpy as np davis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8) davis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128], [128, 64, 128]] def default_image_loader(path): """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, but reverts to the opencv_loader if the former is not available.""" if default_image_loader.use_jpeg4py is None: # Try using jpeg4py im = jpeg4py_loader(path) if im is None: default_image_loader.use_jpeg4py = False print('Using opencv_loader instead.') else: default_image_loader.use_jpeg4py = True return im if default_image_loader.use_jpeg4py: return jpeg4py_loader(path) return opencv_loader(path) default_image_loader.use_jpeg4py = None def jpeg4py_loader(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_loader(path): """ Read image using opencv's imread function and returns it in rgb format""" try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def jpeg4py_loader_w_failsafe(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except: try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_seg_loader(path): """ Read segmentation annotation using opencv's imread function""" try: return cv.imread(path) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def imread_indexed(filename): """ Load indexed image with given filename. Used to read segmentation annotations.""" im = Image.open(filename) annotation = np.atleast_3d(im)[...,0] return annotation def imwrite_indexed(filename, array, color_palette=None): """ Save indexed image as png. Used to save segmentation annotation.""" if color_palette is None: color_palette = davis_palette if np.atleast_3d(array).shape[2] != 1: raise Exception("Saving indexed PNGs requires 2D array.") im = Image.fromarray(array) im.putpalette(color_palette.ravel()) im.save(filename, format='PNG') ================================================ FILE: external/AR/ltr/data/loader.py ================================================ import torch import torch.utils.data.dataloader import importlib import collections from torch._six import string_classes, int_classes from pytracking import TensorDict, TensorList def _check_use_shared_memory(): if hasattr(torch.utils.data.dataloader, '_use_shared_memory'): return getattr(torch.utils.data.dataloader, '_use_shared_memory') collate_lib = importlib.import_module('torch.utils.data._utils.collate') if hasattr(collate_lib, '_use_shared_memory'): return getattr(collate_lib, '_use_shared_memory') return torch.utils.data.get_worker_info() is not None def ltr_collate(batch): """Puts each data field into a tensor with outer dimension batch size""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) def ltr_collate_stack1(batch): """Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 1, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 1) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate_stack1(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate_stack1(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) class LTRLoader(torch.utils.data.dataloader.DataLoader): """ Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset. Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to select along which dimension the data should be stacked to form a batch. Arguments: dataset (Dataset): dataset from which to load the data. batch_size (int, optional): how many samples per batch to load (default: 1). shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: False). sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False. batch_sampler (Sampler, optional): like sampler, but returns a batch of indices at a time. Mutually exclusive with batch_size, shuffle, sampler, and drop_last. num_workers (int, optional): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0) collate_fn (callable, optional): merges a list of samples to form a mini-batch. stack_dim (int): Dimension along which to stack to form the batch. (default: 0) pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: False) timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: 0) worker_init_fn (callable, optional): If not None, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: None) .. note:: By default, each worker will have its PyTorch seed set to ``base_seed + worker_id``, where ``base_seed`` is a long generated by main process using its RNG. However, seeds for other libraies may be duplicated upon initializing workers (w.g., NumPy), causing each worker to return identical random numbers. (See :ref:`dataloader-workers-random-seed` section in FAQ.) You may use ``torch.initial_seed()`` to access the PyTorch seed for each worker in :attr:`worker_init_fn`, and use it to set other seeds before data loading. .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an unpicklable object, e.g., a lambda function. """ __initialized = False def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): if collate_fn is None: if stack_dim == 0: collate_fn = ltr_collate elif stack_dim == 1: collate_fn = ltr_collate_stack1 else: raise ValueError('Stack dim no supported. Must be 0 or 1.') super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn) self.name = name self.training = training self.epoch_interval = epoch_interval self.stack_dim = stack_dim ================================================ FILE: external/AR/ltr/data/processing.py ================================================ import torch import torchvision.transforms as transforms from pytracking import TensorDict import ltr.data.processing_utils as prutils def stack_tensors(x): if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor): return torch.stack(x) return x class BaseProcessing: """ Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it through the network. For example, it can be used to crop a search region around the object, apply various data augmentations, etc.""" def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None): """ args: transform - The set of transformations to be applied on the images. Used only if train_transform or test_transform is None. train_transform - The set of transformations to be applied on the train images. If None, the 'transform' argument is used instead. test_transform - The set of transformations to be applied on the test images. If None, the 'transform' argument is used instead. joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For example, it can be used to convert both test and train images to grayscale. """ self.transform = {'train': transform if train_transform is None else train_transform, 'test': transform if test_transform is None else test_transform, 'joint': joint_transform} def __call__(self, data: TensorDict): raise NotImplementedError class ATOMProcessing(BaseProcessing): """ The processing class used for training ATOM. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ Generates proposals by adding noise to the input box args: box - input box returns: torch.Tensor - Array of shape (num_proposals, 4) containing proposals torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] """ # Generate proposals num_proposals = self.proposal_params['boxes_per_frame'] proposal_method = self.proposal_params.get('proposal_method', 'default') if proposal_method == 'default': proposals = torch.zeros((num_proposals, 4)) gt_iou = torch.zeros(num_proposals) for i in range(num_proposals): proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'], sigma_factor=self.proposal_params['sigma_factor']) elif proposal_method == 'gmm': proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], num_samples=num_proposals) gt_iou = prutils.iou(box.view(1,4), proposals.view(-1,4)) # Map to [-1, 1] gt_iou = gt_iou * 2 - 1 return proposals, gt_iou def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou' """ # Apply joint transforms if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # Crop image region centered at jittered_anno box crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) # Apply transforms data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = list(frame2_proposals) data['proposal_iou'] = list(gt_iou) # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class KLBBregProcessing(BaseProcessing): """ Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565]. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ """ # Generate proposals proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], gt_sigma=self.proposal_params['gt_sigma'], num_samples=self.proposal_params[ 'boxes_per_frame'], add_mean_box=self.proposal_params.get( 'add_mean_box', False)) return proposals, proposal_density, gt_density def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density' """ # Apply joint transforms if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # Crop image region centered at jittered_anno box crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) # Apply transforms data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = proposals data['proposal_density'] = proposal_density data['gt_density'] = gt_density # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class ATOMwKLProcessing(BaseProcessing): """Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing.""" def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params, mode='pair', *args, **kwargs): super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.proposal_params = proposal_params self.mode = mode def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ """ # Generate proposals proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], self.proposal_params['gt_sigma'], self.proposal_params['boxes_per_frame']) iou = prutils.iou_gen(proposals, box.view(1, 4)) return proposals, proposal_density, gt_density, iou def __call__(self, data: TensorDict): # Apply joint transforms if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # Crop image region centered at jittered_anno box crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz) # Apply transforms data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals proposals, proposal_density, gt_density, proposal_iou = zip( *[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = proposals data['proposal_density'] = proposal_density data['gt_density'] = gt_density data['proposal_iou'] = proposal_iou # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data class DiMPProcessing(BaseProcessing): """ The processing class used for training DiMP. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are used for computing the loss of the predicted classification model on the test images. A set of proposals are also generated for the test images by jittering the ground truth box. These proposals are used to train the bounding box estimating branch. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate', max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.crop_type = crop_type self.mode = mode self.max_scale_change = max_scale_change self.proposal_params = proposal_params self.label_function_params = label_function_params def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ Generates proposals by adding noise to the input box args: box - input box returns: torch.Tensor - Array of shape (num_proposals, 4) containing proposals torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The IoU is mapped to [-1, 1] """ # Generate proposals num_proposals = self.proposal_params['boxes_per_frame'] proposal_method = self.proposal_params.get('proposal_method', 'default') if proposal_method == 'default': proposals = torch.zeros((num_proposals, 4)) gt_iou = torch.zeros(num_proposals) for i in range(num_proposals): proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'], sigma_factor=self.proposal_params['sigma_factor']) elif proposal_method == 'gmm': proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], num_samples=num_proposals) gt_iou = prutils.iou(box.view(1, 4), proposals.view(-1, 4)) else: raise ValueError('Unknown proposal method.') # Map to [-1, 1] gt_iou = gt_iou * 2 - 1 return proposals, gt_iou def _generate_label_function(self, target_bb): """ Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample """ gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'], self.label_function_params['kernel_sz'], self.label_function_params['feature_sz'], self.output_sz, end_pad_if_even=self.label_function_params.get('end_pad_if_even', True)) return gauss_label def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) """ if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz, mode=self.crop_type, max_scale_change=self.max_scale_change) data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals if self.proposal_params: frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = list(frame2_proposals) data['proposal_iou'] = list(gt_iou) # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) # Generate label functions if self.label_function_params is not None: data['train_label'] = self._generate_label_function(data['train_anno']) data['test_label'] = self._generate_label_function(data['test_anno']) return data class KLDiMPProcessing(BaseProcessing): """ The processing class used for training PrDiMP that additionally supports the probabilistic classifier and bounding box regressor. See DiMPProcessing for details. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate', max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, label_density_params=None, *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames proposal_params - Arguments for the proposal generation process. See _generate_proposals for details. label_function_params - Arguments for the label generation process. See _generate_label_function for details. label_density_params - Arguments for the label density generation process. See _generate_label_function for details. """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.crop_type = crop_type self.mode = mode self.max_scale_change = max_scale_change self.proposal_params = proposal_params self.label_function_params = label_function_params self.label_density_params = label_density_params def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'train' or 'test' indicating train or test data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def _generate_proposals(self, box): """ Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density. This is used for ML and KL based regression learning of the bounding box regressor. args: box - input bounding box """ # Generate proposals proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'], gt_sigma=self.proposal_params['gt_sigma'], num_samples=self.proposal_params['boxes_per_frame'], add_mean_box=self.proposal_params.get('add_mean_box', False)) return proposals, proposal_density, gt_density def _generate_label_function(self, target_bb): """ Generates the gaussian label function centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample """ gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'], self.label_function_params['kernel_sz'], self.label_function_params['feature_sz'], self.output_sz, end_pad_if_even=self.label_function_params.get('end_pad_if_even', True)) return gauss_label def _generate_label_density(self, target_bb): """ Generates the gaussian label density centered at target_bb args: target_bb - target bounding box (num_images, 4) returns: torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample """ feat_sz = self.label_density_params['feature_sz'] * self.label_density_params.get('interp_factor', 1) gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_density_params['sigma_factor'], self.label_density_params['kernel_sz'], feat_sz, self.output_sz, end_pad_if_even=self.label_density_params.get('end_pad_if_even', True), density=True, uni_bias=self.label_density_params.get('uni_weight', 0.0)) gauss_label *= (gauss_label > self.label_density_params.get('threshold', 0.0)).float() if self.label_density_params.get('normalize', False): g_sum = gauss_label.sum(dim=(-2,-1)) valid = g_sum>0.01 gauss_label[valid, :, :] /= g_sum[valid].view(-1, 1, 1) gauss_label[~valid, :, :] = 1.0 / (gauss_label.shape[-2] * gauss_label.shape[-1]) gauss_label *= 1.0 - self.label_density_params.get('shrink', 0.0) return gauss_label def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'train_images', test_images', 'train_anno', 'test_anno' returns: TensorDict - output data block with following fields: 'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density', 'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional) """ if self.transform['joint'] is not None: data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno']) data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False) for s in ['train', 'test']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor, self.output_sz, mode=self.crop_type, max_scale_change=self.max_scale_change) data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False) # Generate proposals proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']]) data['test_proposals'] = proposals data['proposal_density'] = proposal_density data['gt_density'] = gt_density for s in ['train', 'test']: is_distractor = data.get('is_distractor_{}_frame'.format(s), None) if is_distractor is not None: for is_dist, box in zip(is_distractor, data[s+'_anno']): if is_dist: box[0] = 99999999.9 box[1] = 99999999.9 # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) # Generate label functions if self.label_function_params is not None: data['train_label'] = self._generate_label_function(data['train_anno']) data['test_label'] = self._generate_label_function(data['test_anno']) if self.label_density_params is not None: data['train_label_density'] = self._generate_label_density(data['train_anno']) data['test_label_density'] = self._generate_label_density(data['test_anno']) return data ================================================ FILE: external/AR/ltr/data/processing_utils.py ================================================ import torch import math import cv2 as cv import random import torch.nn.functional as F from .bounding_box_utils import rect_to_rel, rel_to_rect def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None): """ Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ x, y, w, h = target_bb.tolist() # Crop image crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor) if crop_sz < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5 * w - crop_sz * 0.5) x2 = x1 + crop_sz y1 = round(y + 0.5 * h - crop_sz * 0.5) y2 = y1 + crop_sz x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE) if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) if output_sz is not None: resize_factor = output_sz / crop_sz im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz)) if mask is None: return im_crop_padded, resize_factor mask_crop_padded = \ F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0] return im_crop_padded, resize_factor, mask_crop_padded else: if mask is None: return im_crop_padded, 1.0 return im_crop_padded, 1.0, mask_crop_padded def transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box_in - the box for which the co-ordinates are to be transformed box_extract - the box about which the image crop has been extracted. resize_factor - the ratio between the original image scale and the scale of the image crop crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4] box_in_center = box_in[0:2] + 0.5 * box_in[2:4] box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor box_out_wh = box_in[2:4] * resize_factor box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh)) return box_out def jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if masks is None: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz) for f, a in zip(frames, box_extract)] frames_crop, resize_factors = zip(*crops_resize_factors) masks_crop = None else: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m) for f, a, m in zip(frames, box_extract, masks)] frames_crop, resize_factors, masks_crop = zip(*crops_resize_factors) crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz) for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)] return frames_crop, box_crop, masks_crop def sample_target_adaptive(im, target_bb, search_area_factor, output_sz, mode: str = 'replicate', max_scale_change=None, mask=None): """ Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image size, a smaller crop which fits the image is returned instead. args: im - Input numpy image to crop. target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') mask - Optional mask to apply the same crop. returns: numpy image - Extracted crop. torch.Tensor - A bounding box denoting the cropped region in the image. numpy mask - Cropped mask returned only if mask is not None. """ if max_scale_change is None: max_scale_change = float('inf') if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) output_sz = torch.Tensor(output_sz) im_h = im.shape[0] im_w = im.shape[1] bbx, bby, bbw, bbh = target_bb.tolist() # Crop image crop_sz_x, crop_sz_y = (output_sz * ( target_bb[2:].prod() / output_sz.prod()).sqrt() * search_area_factor).ceil().long().tolist() # Get new sample size if forced inside the image if mode == 'inside' or mode == 'inside_major': # Calculate rescaling factor if outside the image rescale_factor = [crop_sz_x / im_w, crop_sz_y / im_h] if mode == 'inside': rescale_factor = max(rescale_factor) elif mode == 'inside_major': rescale_factor = min(rescale_factor) rescale_factor = min(max(1, rescale_factor), max_scale_change) crop_sz_x = math.floor(crop_sz_x / rescale_factor) crop_sz_y = math.floor(crop_sz_y / rescale_factor) if crop_sz_x < 1 or crop_sz_y < 1: raise Exception('Too small bounding box.') x1 = round(bbx + 0.5 * bbw - crop_sz_x * 0.5) x2 = x1 + crop_sz_x y1 = round(bby + 0.5 * bbh - crop_sz_y * 0.5) y2 = y1 + crop_sz_y # Move box inside image shift_x = max(0, -x1) + min(0, im_w - x2) x1 += shift_x x2 += shift_x shift_y = max(0, -y1) + min(0, im_h - y2) y1 += shift_y y2 += shift_y out_x = (max(0, -x1) + max(0, x2 - im_w)) // 2 out_y = (max(0, -y1) + max(0, y2 - im_h)) // 2 shift_x = (-x1 - out_x) * (out_x > 0) shift_y = (-y1 - out_y) * (out_y > 0) x1 += shift_x x2 += shift_x y1 += shift_y y2 += shift_y x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE) if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) # Resize image im_out = cv.resize(im_crop_padded, tuple(output_sz.long().tolist())) if mask is not None: mask_out = \ F.interpolate(mask_crop_padded[None, None], tuple(output_sz.flip(0).long().tolist()), mode='nearest')[0, 0] crop_box = torch.Tensor([x1, y1, x2 - x1, y2 - y1]) if mask is None: return im_out, crop_box else: return im_out, crop_box, mask_out def crop_and_resize(im, box, crop_bb, output_sz, mask=None): if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) im_h = im.shape[0] im_w = im.shape[1] if crop_bb[2] < 1 or crop_bb[3] < 1: raise Exception('Too small bounding box.') x1 = crop_bb[0] x2 = crop_bb[0] + crop_bb[2] y1 = crop_bb[1] y2 = crop_bb[1] + crop_bb[3] x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE) if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) # Resize image im_out = cv.resize(im_crop_padded, output_sz) if mask is not None: mask_out = F.interpolate(mask_crop_padded[None, None], (output_sz[1], output_sz[0]), mode='nearest')[0, 0] rescale_factor = output_sz[0] / crop_bb[2] # Hack if box is not None: box_crop = box.clone() box_crop[0] -= crop_bb[0] box_crop[1] -= crop_bb[1] box_crop *= rescale_factor else: box_crop = None if mask is None: return im_out, box_crop else: return im_out, box_crop, mask_out def transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box - the box for which the co-ordinates are to be transformed crop_box - bounding box defining the crop in the original image crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_out = box.clone() box_out[:2] -= crop_box[:2] scale_factor = crop_sz / crop_box[2:] box_out[:2] *= scale_factor box_out[2:] *= scale_factor return box_out def target_image_crop(frames, box_extract, box_gt, search_area_factor, output_sz, mode: str = 'replicate', max_scale_change=None, masks=None): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image. If 'inside', the search region crop is shifted/shrunk to fit completely inside the image. If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image. max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major') masks - Optional masks to apply the same crop. returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) if masks is None: frame_crops_boxes = [sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change) for f, a in zip(frames, box_extract)] frames_crop, crop_boxes = zip(*frame_crops_boxes) else: frame_crops_boxes_masks = [ sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change, mask=m) for f, a, m in zip(frames, box_extract, masks)] frames_crop, crop_boxes, masks_crop = zip(*frame_crops_boxes_masks) crop_sz = torch.Tensor(output_sz) # find the bb location in the crop box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz) for bb_gt, crop_bb in zip(box_gt, crop_boxes)] if masks is None: return frames_crop, box_crop else: return frames_crop, box_crop, masks_crop def iou(reference, proposals): """Compute the IoU between a reference box with multiple proposal boxes. args: reference - Tensor of shape (1, 4). proposals - Tensor of shape (num_proposals, 4) returns: torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box. """ # Intersection box tl = torch.max(reference[:, :2], proposals[:, :2]) br = torch.min(reference[:, :2] + reference[:, 2:], proposals[:, :2] + proposals[:, 2:]) sz = (br - tl).clamp(0) # Area intersection = sz.prod(dim=1) union = reference[:, 2:].prod(dim=1) + proposals[:, 2:].prod(dim=1) - intersection return intersection / union def rand_uniform(a, b, shape=1): """ sample numbers uniformly between a and b. args: a - lower bound b - upper bound shape - shape of the output tensor returns: torch.Tensor - tensor of shape=shape """ return (b - a) * torch.rand(shape) + a def perturb_box(box, min_iou=0.5, sigma_factor=0.1): """ Perturb the input box by adding gaussian noise to the co-ordinates args: box - input box min_iou - minimum IoU overlap between input box and the perturbed box sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of sigma_factors, in which case one of them will be uniformly sampled. Further, each of the sigma_factor element can be either a float, or a tensor of shape (4,) specifying the sigma_factor per co-ordinate returns: torch.Tensor - the perturbed box """ if isinstance(sigma_factor, list): # If list, sample one sigma_factor as current sigma factor c_sigma_factor = random.choice(sigma_factor) else: c_sigma_factor = sigma_factor if not isinstance(c_sigma_factor, torch.Tensor): c_sigma_factor = c_sigma_factor * torch.ones(4) perturb_factor = torch.sqrt(box[2] * box[3]) * c_sigma_factor # multiple tries to ensure that the perturbed box has iou > min_iou with the input box for i_ in range(100): c_x = box[0] + 0.5 * box[2] c_y = box[1] + 0.5 * box[3] c_x_per = random.gauss(c_x, perturb_factor[0]) c_y_per = random.gauss(c_y, perturb_factor[1]) w_per = random.gauss(box[2], perturb_factor[2]) h_per = random.gauss(box[3], perturb_factor[3]) if w_per <= 1: w_per = box[2] * rand_uniform(0.15, 0.5) if h_per <= 1: h_per = box[3] * rand_uniform(0.15, 0.5) box_per = torch.Tensor([c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per]).round() if box_per[2] <= 1: box_per[2] = box[2] * rand_uniform(0.15, 0.5) if box_per[3] <= 1: box_per[3] = box[3] * rand_uniform(0.15, 0.5) box_iou = iou(box.view(1, 4), box_per.view(1, 4)) # if there is sufficient overlap, return if box_iou > min_iou: return box_per, box_iou # else reduce the perturb factor perturb_factor *= 0.9 return box_per, box_iou def gauss_1d(sz, sigma, center, end_pad=0, density=False): k = torch.arange(-(sz - 1) / 2, (sz + 1) / 2 + end_pad).reshape(1, -1) gauss = torch.exp(-1.0 / (2 * sigma ** 2) * (k - center.reshape(-1, 1)) ** 2) if density: gauss /= math.sqrt(2 * math.pi) * sigma return gauss def gauss_2d(sz, sigma, center, end_pad=(0, 0), density=False): if isinstance(sigma, (float, int)): sigma = (sigma, sigma) return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0], density).reshape(center.shape[0], 1, -1) * \ gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1], density).reshape(center.shape[0], -1, 1) def gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True, density=False, uni_bias=0): """Construct Gaussian label function.""" if isinstance(kernel_sz, (float, int)): kernel_sz = (kernel_sz, kernel_sz) if isinstance(feat_sz, (float, int)): feat_sz = (feat_sz, feat_sz) if isinstance(image_sz, (float, int)): image_sz = (image_sz, image_sz) image_sz = torch.Tensor(image_sz) feat_sz = torch.Tensor(feat_sz) target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4] target_center_norm = (target_center - image_sz / 2) / image_sz center = feat_sz * target_center_norm + 0.5 * \ torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2]) sigma = sigma_factor * feat_sz.prod().sqrt().item() if end_pad_if_even: end_pad = (int(kernel_sz[0] % 2 == 0), int(kernel_sz[1] % 2 == 0)) else: end_pad = (0, 0) gauss_label = gauss_2d(feat_sz, sigma, center, end_pad, density=density) if density: sz = (feat_sz + torch.Tensor(end_pad)).prod() label = (1.0 - uni_bias) * gauss_label + uni_bias / sz else: label = gauss_label + uni_bias return label def gauss_density_centered(x, std): """Evaluate the probability density of a Gaussian centered at zero. args: x - Samples. std - List of standard deviations """ return torch.exp(-0.5 * (x / std) ** 2) / (math.sqrt(2 * math.pi) * std) def gmm_density_centered(x, std): """Evaluate the probability density of a GMM centered at zero. args: x - Samples. Assumes dim=-1 is the component dimension and dim=-2 is feature dimension. Rest are sample dimension. std - Tensor of standard deviations """ if x.dim() == std.dim() - 1: x = x.unsqueeze(-1) elif not (x.dim() == std.dim() and x.shape[-1] == 1): raise ValueError('Last dimension must be the gmm stds.') return gauss_density_centered(x, std).prod(-2).mean(-1) def sample_gmm_centered(std, num_samples=1): """Sample from a GMM distribution centered at zero: args: std - Tensor of standard deviations num_samples - number of samples """ num_components = std.shape[-1] num_dims = std.numel() // num_components std = std.view(1, num_dims, num_components) # Sample component ids k = torch.randint(num_components, (num_samples,), dtype=torch.int64) std_samp = std[0, :, k].t() # Sample x_centered = std_samp * torch.randn(num_samples, num_dims) prob_dens = gmm_density_centered(x_centered, std) return x_centered, prob_dens def sample_gmm(mean, std, num_samples=1): """Sample from a GMM distribution: args: mean - a single mean vector std - Tensor of standard deviations num_samples - number of samples """ num_dims = mean.numel() num_components = std.shape[-1] mean = mean.view(1, num_dims) std = std.view(1, -1, num_components) # Sample component ids k = torch.randint(num_components, (num_samples,), dtype=torch.int64) std_samp = std[0, :, k].t() # Sample x_centered = std_samp * torch.randn(num_samples, num_dims) x = x_centered + mean prob_dens = gmm_density_centered(x_centered, std) return x, prob_dens def sample_box_gmm(mean_box, proposal_sigma, gt_sigma=None, num_samples=1, add_mean_box=False): """Sample boxes from a Gaussian mixture model. args: mean_box - Center (or mean) bounding box proposal_sigma - List of standard deviations for each Gaussian gt_sigma - Standard deviation of the ground truth distribution num_samples - Number of sampled boxes add_mean_box - Also add mean box as first element returns: proposals, proposal density and ground truth density for all samples """ center_std = torch.Tensor([s[0] for s in proposal_sigma]) sz_std = torch.Tensor([s[1] for s in proposal_sigma]) std = torch.stack([center_std, center_std, sz_std, sz_std]) mean_box = mean_box.view(1, 4) sz_norm = mean_box[:, 2:].clone() # Sample boxes proposals_rel_centered, proposal_density = sample_gmm_centered(std, num_samples) # Add mean and map back mean_box_rel = rect_to_rel(mean_box, sz_norm) proposals_rel = proposals_rel_centered + mean_box_rel proposals = rel_to_rect(proposals_rel, sz_norm) if gt_sigma is None or gt_sigma[0] == 0 and gt_sigma[1] == 0: gt_density = torch.zeros_like(proposal_density) else: std_gt = torch.Tensor([gt_sigma[0], gt_sigma[0], gt_sigma[1], gt_sigma[1]]).view(1, 4) gt_density = gauss_density_centered(proposals_rel_centered, std_gt).prod(-1) if add_mean_box: proposals = torch.cat((mean_box, proposals)) proposal_density = torch.cat((torch.Tensor([-1]), proposal_density)) gt_density = torch.cat((torch.Tensor([1]), gt_density)) return proposals, proposal_density, gt_density ================================================ FILE: external/AR/ltr/data/processing_utils_SE.py ================================================ import torch import math import cv2 as cv import random import numpy as np def stack_tensors(x): if isinstance(x, list) and isinstance(x[0], torch.Tensor): return torch.stack(x) return x '''Added on 2019.12.23''' def sample_target_SE(im, target_bb, search_area_factor, output_sz=None, mode=cv.BORDER_REPLICATE): """ Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width) args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ x, y, w, h = target_bb.tolist() # Crop image ws = math.ceil(search_area_factor * w) hs = math.ceil(search_area_factor * h) if ws < 1 or hs < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5*w - ws*0.5) x2 = x1 + ws y1 = round(y + 0.5 * h - hs * 0.5) y2 = y1 + hs x1_pad = max(0, -x1) x2_pad = max(x2-im.shape[1]+1, 0) y1_pad = max(0, -y1) y2_pad = max(y2-im.shape[0]+1, 0) # Crop target im_crop = im[y1+y1_pad:y2-y2_pad, x1+x1_pad:x2-x2_pad, :] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, mode) if output_sz is not None: w_rsz_f = output_sz / ws h_rsz_f = output_sz / hs im_crop_padded_rsz = cv.resize(im_crop_padded, (output_sz, output_sz)) if len(im_crop_padded_rsz.shape)==2: im_crop_padded_rsz = im_crop_padded_rsz[...,np.newaxis] return im_crop_padded_rsz, h_rsz_f, w_rsz_f else: return im_crop_padded, 1.0, 1.0 '''把mask映射到原图上''' def map_mask_back(im, target_bb, search_area_factor, mask, mode=cv.BORDER_REPLICATE): """ Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width) args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ H,W = (im.shape[0],im.shape[1]) base = np.zeros((H,W)) x, y, w, h = target_bb.tolist() # Crop image ws = math.ceil(search_area_factor * w) hs = math.ceil(search_area_factor * h) if ws < 1 or hs < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5*w - ws*0.5) x2 = x1 + ws y1 = round(y + 0.5 * h - hs * 0.5) y2 = y1 + hs x1_pad = max(0, -x1) x2_pad = max(x2-im.shape[1]+1, 0) y1_pad = max(0, -y1) y2_pad = max(y2-im.shape[0]+1, 0) '''pad base''' base_padded = cv.copyMakeBorder(base, y1_pad, y2_pad, x1_pad, x2_pad, mode) '''Resize mask''' mask_rsz = cv.resize(mask,(ws,hs)) '''fill region with mask''' base_padded[y1+y1_pad:y2+y1_pad, x1+x1_pad:x2+x1_pad] = mask_rsz.copy() '''crop base_padded to get final mask''' final_mask = base_padded[y1_pad:y1_pad+H,x1_pad:x1_pad+W] assert (final_mask.shape == (H,W)) return final_mask '''Added on 2019.12.23''' def transform_image_to_crop_SE(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor_h: float, resize_factor_w: float, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box_in - the box for which the co-ordinates are to be transformed box_extract - the box about which the image crop has been extracted. resize_factor - the ratio between the original image scale and the scale of the image crop crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_extract_center = box_extract[0:2] + 0.5*box_extract[2:4] box_in_center = box_in[0:2] + 0.5*box_in[2:4] box_out_xc = (crop_sz[0] -1)/2 + (box_in_center[0] - box_extract_center[0])*resize_factor_w box_out_yc = (crop_sz[0] -1)/2 + (box_in_center[1] - box_extract_center[1])*resize_factor_h box_out_w = box_in[2] * resize_factor_w box_out_h = box_in[3] * resize_factor_h '''2019.12.28 为了避免出现(x1,y1)小于0,或者(x2,y2)大于256的情况,这里我对它们加上了一些限制''' max_sz = crop_sz[0].item() box_out_x1 = torch.clamp(box_out_xc - 0.5 * box_out_w,0,max_sz) box_out_y1 = torch.clamp(box_out_yc - 0.5 * box_out_h,0,max_sz) box_out_x2 = torch.clamp(box_out_xc + 0.5 * box_out_w,0,max_sz) box_out_y2 = torch.clamp(box_out_yc + 0.5 * box_out_h,0,max_sz) box_out_w_new = box_out_x2 - box_out_x1 box_out_h_new = box_out_y2 - box_out_y1 box_out = torch.stack((box_out_x1, box_out_y1, box_out_w_new, box_out_h_new)) return box_out def centered_crop(frames, anno, area_factor, output_sz): crops_resize_factors = [sample_target(f, a, area_factor, output_sz) for f, a in zip(frames, anno)] frames_crop, resize_factors = zip(*crops_resize_factors) crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop anno_crop = [transform_image_to_crop(a, a, rf, crop_sz) for a, rf in zip(anno, resize_factors)] return frames_crop, anno_crop '''Added by Bin Yan 2019.12.23, changed on 2020.1.4(add a new args: "get_bbox_coord")''' def jittered_center_crop_SE(frames, box_extract, box_gt, search_area_factor, output_sz, get_bbox_coord=True, mode=cv.BORDER_REPLICATE): """ Crop a patch centered at box_extract. The height and width of cropped region is search_area_factor times that of box_extract. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ '''call function "sample_target_SE" and function "transform_image_to_crop_SE"''' '''get cropped patch(fixed size)''' crops_resize_factors = [sample_target_SE(f, a, search_area_factor, output_sz, mode=mode) for f, a in zip(frames, box_extract)] frames_crop, resize_factors_h, resize_factors_w = zip(*crops_resize_factors) if get_bbox_coord: crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop '''get GT's cooridinate on the cropped patch''' box_crop = [transform_image_to_crop_SE(a_gt, a_ex, h_rsf, w_rsf, crop_sz) for a_gt, a_ex, h_rsf, w_rsf in zip(box_gt, box_extract, resize_factors_h, resize_factors_w)] return frames_crop, box_crop else: return frames_crop def sample_target_nopad(im, target_bb, search_area_factor, output_sz): """ Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image size, a smaller crop which fits the image is returned instead. args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop torch.Tensor - a bounding box denoting the cropped region in the image. """ if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) output_sz = torch.Tensor(output_sz) im_h = im.shape[0] im_w = im.shape[1] bbx, bby, bbw, bbh = target_bb.tolist() # Crop image crop_sz_x, crop_sz_y = (output_sz * (target_bb[2:].prod()/output_sz.prod()).sqrt() * search_area_factor).ceil() # Calculate rescaling factor if outside the image rescale_factor = max(1, crop_sz_x/im_w, crop_sz_y/im_h) crop_sz_x = math.floor(crop_sz_x / rescale_factor) crop_sz_y = math.floor(crop_sz_y / rescale_factor) if crop_sz_x < 1 or crop_sz_y < 1: raise Exception('Too small bounding box.') x1 = round(bbx + 0.5*bbw - crop_sz_x*0.5) x2 = x1 + crop_sz_x y1 = round(bby + 0.5*bbh - crop_sz_y*0.5) y2 = y1 + crop_sz_y # Move box inside image shift_x = max(0, -x1) + min(0, im_w - x2) x1 += shift_x x2 += shift_x shift_y = max(0, -y1) + min(0, im_h - y2) y1 += shift_y y2 += shift_y # Crop and resize image im_crop = im[y1:y2, x1:x2, :] im_out = cv.resize(im_crop, tuple(output_sz.long().tolist())) crop_box = torch.Tensor([x1, y1, x2-x1, y2-y1]) return im_out, crop_box def transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box - the box for which the co-ordinates are to be transformed crop_box - bounding box defining the crop in the original image crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_out = box.clone() box_out[:2] -= crop_box[:2] scale_factor = crop_sz / crop_box[2:] box_out[:2] *= scale_factor box_out[2:] *= scale_factor return box_out def jittered_center_crop_nopad(frames, box_extract, box_gt, search_area_factor, output_sz): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if isinstance(output_sz, (float, int)): output_sz = (output_sz, output_sz) frame_crops_boxes = [sample_target_nopad(f, a, search_area_factor, output_sz) for f, a in zip(frames, box_extract)] frames_crop, crop_boxes = zip(*frame_crops_boxes) crop_sz = torch.Tensor(output_sz) # find the bb location in the crop box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz) for bb_gt, crop_bb in zip(box_gt, crop_boxes)] return frames_crop, box_crop def iou(reference, proposals): """Compute the IoU between a reference box with multiple proposal boxes. args: reference - Tensor of shape (1, 4). proposals - Tensor of shape (num_proposals, 4) returns: torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box. """ # Intersection box tl = torch.max(reference[:,:2], proposals[:,:2]) br = torch.min(reference[:,:2] + reference[:,2:], proposals[:,:2] + proposals[:,2:]) sz = (br - tl).clamp(0) # Area intersection = sz.prod(dim=1) union = reference[:,2:].prod(dim=1) + proposals[:,2:].prod(dim=1) - intersection return intersection / union def rand_uniform(a, b, shape=1): """ sample numbers uniformly between a and b. args: a - lower bound b - upper bound shape - shape of the output tensor returns: torch.Tensor - tensor of shape=shape """ return (b - a) * torch.rand(shape) + a def perturb_box(box, min_iou=0.5, sigma_factor=0.1): """ Perturb the input box by adding gaussian noise to the co-ordinates args: box - input box min_iou - minimum IoU overlap between input box and the perturbed box sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of sigma_factors, in which case one of them will be uniformly sampled. Further, each of the sigma_factor element can be either a float, or a tensor of shape (4,) specifying the sigma_factor per co-ordinate returns: torch.Tensor - the perturbed box """ if isinstance(sigma_factor, list): # If list, sample one sigma_factor as current sigma factor c_sigma_factor = random.choice(sigma_factor) else: c_sigma_factor = sigma_factor if not isinstance(c_sigma_factor, torch.Tensor): c_sigma_factor = c_sigma_factor * torch.ones(4) perturb_factor = torch.sqrt(box[2]*box[3])*c_sigma_factor # multiple tries to ensure that the perturbed box has iou > min_iou with the input box for i_ in range(100): c_x = box[0] + 0.5*box[2] c_y = box[1] + 0.5 * box[3] c_x_per = random.gauss(c_x, perturb_factor[0]) c_y_per = random.gauss(c_y, perturb_factor[1]) w_per = random.gauss(box[2], perturb_factor[2]) h_per = random.gauss(box[3], perturb_factor[3]) if w_per <= 1: w_per = box[2]*rand_uniform(0.15, 0.5) if h_per <= 1: h_per = box[3]*rand_uniform(0.15, 0.5) box_per = torch.Tensor([c_x_per - 0.5*w_per, c_y_per - 0.5*h_per, w_per, h_per]).round() if box_per[2] <= 1: box_per[2] = box[2]*rand_uniform(0.15, 0.5) if box_per[3] <= 1: box_per[3] = box[3]*rand_uniform(0.15, 0.5) box_iou = iou(box.view(1, 4), box_per.view(1, 4)) # if there is sufficient overlap, return if box_iou > min_iou: return box_per, box_iou # else reduce the perturb factor perturb_factor *= 0.9 return box_per, box_iou def gauss_1d(sz, sigma, center, end_pad=0): k = torch.arange(-(sz-1)/2, (sz+1)/2 + end_pad).reshape(1, -1) return torch.exp(-1.0/(2*sigma**2) * (k - center.reshape(-1, 1))**2) def gauss_2d(sz, sigma, center, end_pad=(0, 0)): if isinstance(sigma, (float, int)): sigma = (sigma, sigma) return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0]).reshape(center.shape[0], 1, -1) * \ gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1]).reshape(center.shape[0], -1, 1) def gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True): """Construct Gaussian label function.""" if isinstance(kernel_sz, (float, int)): kernel_sz = (kernel_sz, kernel_sz) if isinstance(feat_sz, (float, int)): feat_sz = (feat_sz, feat_sz) if isinstance(image_sz, (float, int)): image_sz = (image_sz, image_sz) image_sz = torch.Tensor(image_sz) feat_sz = torch.Tensor(feat_sz) target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4] target_center_norm = (target_center - image_sz / 2) / image_sz center = feat_sz * target_center_norm + 0.5 * \ torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2]) sigma = sigma_factor * feat_sz.prod().sqrt().item() if end_pad_if_even: end_pad = (int(kernel_sz[0]%2 == 0), int(kernel_sz[1]%2 == 0)) else: end_pad = (0, 0) gauss_label = gauss_2d(feat_sz, sigma, center, end_pad) return gauss_label ================================================ FILE: external/AR/ltr/data/sampler.py ================================================ import random import torch.utils.data from pytracking import TensorDict def no_processing(data): return data class TrackingSampler(torch.utils.data.Dataset): """ Class responsible for sampling frames from training sequences to form batches. Each training sample is a tuple consisting of i) a set of train frames, used to learn the DiMP classification model and obtain the modulation vector for IoU-Net, and ii) a set of test frames on which target classification loss for the predicted DiMP model, and the IoU prediction loss for the IoU-Net is calculated. The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and 'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id] and (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled. If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found. The sampled frames are then passed through the input 'processing' function for the necessary processing- """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the test frames. num_test_frames - Number of test frames to sample. num_train_frames - Number of train frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally, otherwise randomly within the interval. """ self.datasets = datasets # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.num_test_frames = num_test_frames self.num_train_frames = num_train_frames self.processing = processing self.frame_sample_mode = frame_sample_mode def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def __getitem__(self, index): """ args: index (int): Index (Ignored since we sample randomly) returns: TensorDict - dict containing all the data blocks """ # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_test_frames + self.num_train_frames) and len(visible) >= 20 enough_visible_frames = enough_visible_frames or not is_video_dataset if is_video_dataset: train_frame_ids = None test_frame_ids = None gap_increase = 0 if self.frame_sample_mode == 'interval': # Sample frame numbers within interval defined by the first frame while test_frame_ids is None: base_frame_id = self._sample_visible_ids(visible, num_ids=1) extra_train_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1, min_id=base_frame_id[ 0] - self.max_gap - gap_increase, max_id=base_frame_id[ 0] + self.max_gap + gap_increase) if extra_train_frame_ids is None: gap_increase += 5 continue train_frame_ids = base_frame_id + extra_train_frame_ids test_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_test_frames, min_id=train_frame_ids[0] - self.max_gap - gap_increase, max_id=train_frame_ids[0] + self.max_gap + gap_increase) gap_increase += 5 # Increase gap until a frame is found elif self.frame_sample_mode == 'causal': # Sample test and train frames in a causal manner, i.e. test_frame_ids > train_frame_ids while test_frame_ids is None: base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_train_frames - 1, max_id=len(visible) - self.num_test_frames) prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1, min_id=base_frame_id[0] - self.max_gap - gap_increase, max_id=base_frame_id[0]) if prev_frame_ids is None: gap_increase += 5 continue train_frame_ids = base_frame_id + prev_frame_ids test_frame_ids = self._sample_visible_ids(visible, min_id=train_frame_ids[0] + 1, max_id=train_frame_ids[0] + self.max_gap + gap_increase, num_ids=self.num_test_frames) # Increase gap until a frame is found gap_increase += 5 else: # In case of image dataset, just repeat the image to generate synthetic video train_frame_ids = [1] * self.num_train_frames test_frame_ids = [1] * self.num_test_frames train_frames, train_anno, meta_obj_train = dataset.get_frames(seq_id, train_frame_ids, seq_info_dict) test_frames, test_anno, meta_obj_test = dataset.get_frames(seq_id, test_frame_ids, seq_info_dict) data = TensorDict({'train_images': train_frames, 'train_anno': train_anno['bbox'], 'test_images': test_frames, 'test_anno': test_anno['bbox'], 'dataset': dataset.get_name(), 'test_class': meta_obj_test.get('object_class_name')}) return self.processing(data) class DiMPSampler(TrackingSampler): """ See TrackingSampler.""" def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'): super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap, num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing, frame_sample_mode=frame_sample_mode) class ATOMSampler(TrackingSampler): """ See TrackingSampler.""" def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_test_frames=1, num_train_frames=1, processing=no_processing, frame_sample_mode='interval'): super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap, num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing, frame_sample_mode=frame_sample_mode) ================================================ FILE: external/AR/ltr/data/transforms.py ================================================ import random import numpy as np import math import cv2 as cv import torch import torch.nn.functional as F import torchvision.transforms.functional as tvisf class Transform: """A set of transformations, used for e.g. data augmentation. Args of constructor: transforms: An arbitrary number of transformations, derived from the TransformBase class. They are applied in the order they are given. The Transform object can jointly transform images, bounding boxes and segmentation masks. This is done by calling the object with the following key-word arguments (all are optional). The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances. image - Image coords - 2xN dimensional Tensor of 2D image coordinates [y, x] bbox - Bounding box on the form [x, y, w, h] mask - Segmentation mask with discrete classes The following parameters can be supplied with calling the transform object: joint [Bool] - If True then transform all images/coords/bbox/mask in the list jointly using the same transformation. Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using different random rolls. Default: True. new_roll [Bool] - If False, then no new random roll is performed, and the saved result from the previous roll is used instead. Default: True. Check the DiMPProcessing class for examples. """ def __init__(self, *transforms): if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)): transforms = transforms[0] self.transforms = transforms self._valid_inputs = ['image', 'coords', 'bbox', 'mask'] self._valid_args = ['joint', 'new_roll'] self._valid_all = self._valid_inputs + self._valid_args def __call__(self, **inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] for v in inputs.keys(): if v not in self._valid_all: raise ValueError('Incorrect input \"{}\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args)) joint_mode = inputs.get('joint', True) new_roll = inputs.get('new_roll', True) if not joint_mode: out = zip(*[self(**inp) for inp in self._split_inputs(inputs)]) return tuple(list(o) for o in out) out = {k: v for k, v in inputs.items() if k in self._valid_inputs} for t in self.transforms: out = t(**out, joint=joint_mode, new_roll=new_roll) if len(var_names) == 1: return out[var_names[0]] # Make sure order is correct return tuple(out[v] for v in var_names) def _split_inputs(self, inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])] for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()): if isinstance(arg_val, list): for inp, av in zip(split_inputs, arg_val): inp[arg_name] = av else: for inp in split_inputs: inp[arg_name] = arg_val return split_inputs def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class TransformBase: """Base class for transformation objects. See the Transform class for details.""" def __init__(self): self._valid_inputs = ['image', 'coords', 'bbox', 'mask'] self._valid_args = ['new_roll'] self._valid_all = self._valid_inputs + self._valid_args self._rand_params = None def __call__(self, **inputs): # Split input input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs} input_args = {k: v for k, v in inputs.items() if k in self._valid_args} # Roll random parameters for the transform if input_args.get('new_roll', True): rand_params = self.roll() if rand_params is None: rand_params = () elif not isinstance(rand_params, tuple): rand_params = (rand_params,) self._rand_params = rand_params outputs = dict() for var_name, var in input_vars.items(): if var is not None: transform_func = getattr(self, 'transform_' + var_name) if var_name in ['coords', 'bbox']: params = (self._get_image_size(input_vars),) + self._rand_params else: params = self._rand_params if isinstance(var, (list, tuple)): outputs[var_name] = [transform_func(x, *params) for x in var] else: outputs[var_name] = transform_func(var, *params) return outputs def _get_image_size(self, inputs): im = None for var_name in ['image', 'mask']: if inputs.get(var_name) is not None: im = inputs[var_name] break if im is None: return None if isinstance(im, (list, tuple)): im = im[0] if isinstance(im, np.ndarray): return im.shape[:2] if torch.is_tensor(im): return (im.shape[-2], im.shape[-1]) raise Exception('Unknown image type') def roll(self): return None def transform_image(self, image, *rand_params): """Must be deterministic""" return image def transform_coords(self, coords, image_shape, *rand_params): """Must be deterministic""" return coords def transform_bbox(self, bbox, image_shape, *rand_params): """Assumes [x, y, w, h]""" # Check if not overloaded if self.transform_coords.__code__ == TransformBase.transform_coords.__code__: return bbox coord = bbox.clone().view(-1,2).t().flip(0) x1 = coord[1, 0] x2 = coord[1, 0] + coord[1, 1] y1 = coord[0, 0] y2 = coord[0, 0] + coord[0, 1] coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]]) coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0) tl = torch.min(coord_transf, dim=1)[0] sz = torch.max(coord_transf, dim=1)[0] - tl bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape) return bbox_out def transform_mask(self, mask, *rand_params): """Must be deterministic""" return mask class ToTensor(TransformBase): """Convert to a Tensor""" def transform_image(self, image): # handle numpy array if image.ndim == 2: image = image[:, :, None] image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if isinstance(image, torch.ByteTensor): return image.float().div(255) else: return image def transfrom_mask(self, mask): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) class ToTensorAndJitter(TransformBase): """Convert to a Tensor and jitter brightness""" def __init__(self, brightness_jitter=0.0, normalize=True): super().__init__() self.brightness_jitter = brightness_jitter self.normalize = normalize def roll(self): return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter) def transform_image(self, image, brightness_factor): # handle numpy array image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if self.normalize: return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0) else: return image.float().mul(brightness_factor).clamp(0.0, 255.0) def transform_mask(self, mask, brightness_factor): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) else: return mask class Normalize(TransformBase): """Normalize image""" def __init__(self, mean, std, inplace=False): super().__init__() self.mean = mean self.std = std self.inplace = inplace def transform_image(self, image): return tvisf.normalize(image, self.mean, self.std, self.inplace) class ToGrayscale(TransformBase): """Converts image to grayscale with probability""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32) def roll(self): return random.random() < self.probability def transform_image(self, image, do_grayscale): if do_grayscale: if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY) return np.stack([img_gray, img_gray, img_gray], axis=2) # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2) return image class ToBGR(TransformBase): """Converts image to BGR""" def transform_image(self, image): if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR) return img_bgr class RandomHorizontalFlip(TransformBase): """Horizontally flip image randomly with a probability p.""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability def roll(self): return random.random() < self.probability def transform_image(self, image, do_flip): if do_flip: if torch.is_tensor(image): return image.flip((2,)) return np.fliplr(image).copy() return image def transform_coords(self, coords, image_shape, do_flip): if do_flip: coords = coords.clone() coords[1,:] = (image_shape[1] - 1) - coords[1,:] return coords def transform_mask(self, mask, do_flip): if do_flip: if torch.is_tensor(mask): return mask.flip((-1,)) return np.fliplr(mask).copy() return mask class Blur(TransformBase): """ Blur the image by applying a gaussian kernel with given sigma""" def __init__(self, sigma): super().__init__() if isinstance(sigma, (float, int)): sigma = (sigma, sigma) self.sigma = sigma self.filter_size = [math.ceil(2*s) for s in self.sigma] x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size] self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)] self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum() self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum() def transform_image(self, image): if torch.is_tensor(image): sz = image.shape[2:] im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0)) return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1]) else: raise NotImplementedError class RandomBlur(TransformBase): """ Blur the image, with a given probability, by applying a gaussian kernel with given sigma""" def __init__(self, sigma, probability=0.1): super().__init__() self.probability = probability if isinstance(sigma, (float, int)): sigma = (sigma, sigma) self.sigma = sigma self.filter_size = [math.ceil(2*s) for s in self.sigma] x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size] self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)] self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum() self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum() def roll(self): return random.random() < self.probability def transform(self, image, do_blur=None): if do_blur is None: do_blur = False if do_blur: if torch.is_tensor(image): sz = image.shape[1:] im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0)) return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1]) else: raise NotImplementedError else: return image class RandomAffine(TransformBase): """Apply random affine transformation.""" def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0, border_mode='constant', pad_amount=0): super().__init__() self.p_flip = p_flip self.max_rotation = max_rotation self.max_shear = max_shear self.max_scale = max_scale self.max_ar_factor = max_ar_factor if border_mode == 'constant': self.border_flag = cv.BORDER_CONSTANT elif border_mode == 'replicate': self.border_flag == cv.BORDER_REPLICATE else: raise Exception self.pad_amount = pad_amount def roll(self): do_flip = random.random() < self.p_flip theta = random.uniform(-self.max_rotation, self.max_rotation) shear_x = random.uniform(-self.max_shear, self.max_shear) shear_y = random.uniform(-self.max_shear, self.max_shear) ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor)) scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale)) return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor) def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors): im_h, im_w = image_shape t_mat = np.identity(3) if do_flip: if do_flip: t_mat[0, 0] = -1.0 t_mat[0, 2] = im_w t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0) t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3))) t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w], [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w], [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_mat = t_scale @ t_rot @ t_shear @ t_mat t_mat[0, 2] += self.pad_amount t_mat[1, 2] += self.pad_amount t_mat = t_mat[:2, :] return t_mat def transform_image(self, image, do_flip, theta, shear_values, scale_factors): if torch.is_tensor(image): raise Exception('Only supported for numpy input') t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors) output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount) image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR, borderMode=self.border_flag) return image_t def transform_coords(self, coords, image_shape, do_flip, theta, shear_values, scale_factors): t_mat = self._construct_t_mat(image_shape, do_flip, theta, shear_values, scale_factors) t_mat_tensor = torch.from_numpy(t_mat).float() coords_xy1 = torch.stack((coords[1, :], coords[0, :], torch.ones_like(coords[1, :]))) coords_xy_t = torch.mm(t_mat_tensor, coords_xy1) return coords_xy_t[[1, 0], :] def transform_mask(self, mask, do_flip, theta, shear_values, scale_factors): t_mat = self._construct_t_mat(mask.shape[:2], do_flip, theta, shear_values, scale_factors) output_sz = (mask.shape[1] + 2*self.pad_amount, mask.shape[0] + 2*self.pad_amount) mask_t = cv.warpAffine(mask.numpy(), t_mat, output_sz, flags=cv.INTER_NEAREST, borderMode=self.border_flag) return torch.from_numpy(mask_t) ================================================ FILE: external/AR/ltr/dataset/__init__.py ================================================ from .lasot import Lasot from .got10k import Got10k from .tracking_net import TrackingNet from .imagenetvid import ImagenetVID from .coco import MSCOCO from .coco_seq import MSCOCOSeq from .youtubevos import YouTubeVOS from .davis import Davis from .lvis import LVIS from .ecssd import ECSSD from .msra10k import MSRA10k from .hku_is import HKUIS from .sbd import SBD from .synthetic_video import SyntheticVideo from .synthetic_video_blend import SyntheticVideoBlend ================================================ FILE: external/AR/ltr/dataset/base_image_dataset.py ================================================ import torch.utils.data from ltr.data.image_loader import jpeg4py_loader class BaseImageDataset(torch.utils.data.Dataset): """ Base class for image datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.image_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_images() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_images(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.image_list) def has_class_info(self): return False def get_class_name(self, image_id): return None def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_images_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_image_info(self, seq_id): """ Returns information about a particular image, args: seq_id - index of the image returns: Dict """ raise NotImplementedError def get_image(self, image_id, anno=None): """ Get a image args: image_id - index of image anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: image - anno - dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: external/AR/ltr/dataset/base_video_dataset.py ================================================ import torch.utils.data from ltr.data.image_loader import jpeg4py_loader class BaseVideoDataset(torch.utils.data.Dataset): """ Base class for video datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.sequence_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_sequences() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def is_video_sequence(self): """ Returns whether the dataset is a video dataset or an image dataset returns: bool - True if a video dataset """ return True def is_synthetic_video_dataset(self): """ Returns whether the dataset contains real videos or synthetic returns: bool - True if a video dataset """ return False def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_sequences(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.sequence_list) def has_class_info(self): return False def has_occlusion_info(self): return False def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_sequences_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_sequence_info(self, seq_id): """ Returns information about a particular sequences, args: seq_id - index of the sequence returns: Dict """ raise NotImplementedError def get_frames(self, seq_id, frame_ids, anno=None): """ Get a set of frames from a particular sequence args: seq_id - index of sequence frame_ids - a list of frame numbers anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: list - List of frames corresponding to frame_ids list - List of dicts for each frame dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: external/AR/ltr/dataset/coco.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader import torch from pycocotools.coco import COCO import random from collections import OrderedDict from ltr.admin.environment import env_settings class MSCOCO(BaseImageDataset): """ The COCO object detection dataset. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None, split="train", version="2014"): """ args: root - path to coco root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() # the parent class thing would happen in the sampler self.image_list = self._get_image_list(min_area=min_area) if data_fraction is not None: self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction)) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): ann_list = list(self.coco_set.anns.keys()) image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] if min_area is not None: image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area] return image_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def has_segmentation_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def _build_im_per_class(self): im_per_class = {} for i, im in enumerate(self.image_list): class_name = self.cats[self.coco_set.anns[im]['category_id']]['name'] if class_name not in im_per_class: im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4,) mask = torch.Tensor(self.coco_set.annToMask(anno)) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.coco_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: external/AR/ltr/dataset/coco_seq.py ================================================ import os from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader import torch import random from pycocotools.coco import COCO from collections import OrderedDict from ltr.admin.environment import env_settings class MSCOCOSeq(BaseVideoDataset): """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"): """ args: root - path to the coco dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the images will be used split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) # Load the COCO set. self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() self.sequence_list = self._get_sequence_list() if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_seq_per_class() def _get_sequence_list(self): ann_list = list(self.coco_set.anns.keys()) seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] return seq_list def is_video_sequence(self): return False def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def _build_seq_per_class(self): seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name'] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def get_sequence_info(self, seq_id): anno = self._get_anno(seq_id) bbox = torch.Tensor(anno['bbox']).view(1, 4) mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, seq_id): anno = self.coco_set.anns[self.sequence_list[seq_id]] return anno def _get_frames(self, seq_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, seq_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, seq_id): cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] return cat_dict_current['name'] def get_frames(self, seq_id=None, frame_ids=None, anno=None): # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a # list containing these replicated images. frame = self._get_frames(seq_id) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0, ...] for _ in frame_ids] object_meta = self.get_meta_info(seq_id) return frame_list, anno_frames, object_meta ================================================ FILE: external/AR/ltr/dataset/davis.py ================================================ from pathlib import Path from ltr.dataset.vos_base import VOSDatasetBase, VOSMeta from pytracking.evaluation import Sequence from ltr.admin.environment import env_settings from ltr.data.image_loader import jpeg4py_loader class Davis(VOSDatasetBase): """ The Davis VOS dataset Publication: A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung CVPR, 2016 http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Perazzi_A_Benchmark_Dataset_CVPR_2016_paper.pdf Download the dataset from https://davischallenge.org/davis2017/code.html """ def __init__(self, root=None, sequences=None, version='2017', split='train', multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader): """ args: root - Dataset root path. If unset, it uses the path in your local.py config. sequences - List of sequence names. Limit to a subset of sequences if not None. version - '2016' or '2017 split - Any name in DAVIS/ImageSets/ multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one object in each. vis_threshold - Minimum number of pixels required to consider a target object "visible". image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ if version == '2017': if split in ['train', 'val']: root = env_settings().davis_dir if root is None else root elif split in ['test-dev']: root = env_settings().davis_testdev_dir if root is None else root else: raise Exception('Unknown split {}'.format(split)) else: root = env_settings().davis16_dir if root is None else root super().__init__(name='DAVIS', root=Path(root), version=version, split=split, multiobj=multiobj, vis_threshold=vis_threshold, image_loader=image_loader) dset_path = self.root self._jpeg_path = dset_path / 'JPEGImages' / '480p' self._anno_path = dset_path / 'Annotations' / '480p' meta_path = dset_path / "generated_meta.json" if meta_path.exists(): self.gmeta = VOSMeta(filename=meta_path) else: self.gmeta = VOSMeta.generate('DAVIS', self._jpeg_path, self._anno_path) self.gmeta.save(meta_path) if sequences is None: if self.split != 'all': fname = dset_path / 'ImageSets' / self.version / (self.split + '.txt') sequences = open(fname).read().splitlines() else: sequences = [p for p in sorted(self._jpeg_path.glob("*")) if p.is_dir()] self.sequence_names = sequences self._samples = [] for seq in sequences: obj_ids = self.gmeta.get_obj_ids(seq) if self.multiobj: # Multiple objects per sample self._samples.append((seq, obj_ids)) else: # One object per sample self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids]) print("%s loaded." % self.get_name()) def _construct_sequence(self, sequence_info): seq_name = sequence_info['sequence'] images, gt_labels, gt_bboxes = self.get_paths_and_bboxes(sequence_info) return Sequence(name=seq_name, frames=images, dataset='DAVIS', ground_truth_rect=gt_bboxes, ground_truth_seg=gt_labels, object_ids=sequence_info['object_ids'], multiobj_mode=self.multiobj) ================================================ FILE: external/AR/ltr/dataset/ecssd.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed import torch from collections import OrderedDict from ltr.admin.environment import env_settings from ltr.data.bounding_box_utils import masks_to_bboxes class ECSSD(BaseImageDataset): """ Extended Complex Scene Saliency Dataset (ECSSD) Publication: Hierarchical Image Saliency Detection on Extended CSSD Jianping Shi, Qiong Yan, Li Xu, Jiaya Jia TPAMI, 2016 https://arxiv.org/pdf/1408.5418.pdf Download the dataset from http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None): """ args: root - path to ECSSD root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 """ root = env_settings().ecssd_dir if root is None else root super().__init__('ECSSD', root, image_loader) self.image_list = self._load_dataset(min_area=min_area) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, min_area=None): images = [] for i in range(1, 1001): a = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(i))) if min_area is None or (a > 0).sum() > min_area: images.append(i) return images def get_name(self): return 'ecssd' def has_segmentation_info(self): return True def get_image_info(self, im_id): mask = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(self.image_list[im_id]))) mask = torch.Tensor(mask == 255) bbox = masks_to_bboxes(mask, fmt='t').view(4,) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): frame = self.image_loader(os.path.join(self.root, 'images', '{:04d}.jpg'.format(self.image_list[image_id]))) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: external/AR/ltr/dataset/got10k.py ================================================ import os import os.path import numpy as np import torch import csv import pandas import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader from ltr.admin.environment import env_settings class Got10k(BaseVideoDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None): """ args: root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split, not NOT the official got-10k validation split. To use the official validation split, provide that as the root folder instead. seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids' options can be used at the same time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().got10k_dir if root is None else root super().__init__('GOT10k', root, image_loader) # all folders inside the root self.sequence_list = self._get_sequence_list() # seq_id is the index of the folder inside the got10k root path if split is not None: if seq_ids is not None: raise ValueError('Cannot set both split_name and seq_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt') elif split == 'val': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt') elif split == 'vottrain': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt') elif split == 'votval': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt') else: raise ValueError('Unknown split name.') seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist() elif seq_ids is None: seq_ids = list(range(0, len(self.sequence_list))) self.sequence_list = [self.sequence_list[i] for i in seq_ids] if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.sequence_meta_info = self._load_meta_info() self.seq_per_class = self._build_seq_per_class() self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def get_name(self): return 'got10k' def has_class_info(self): return True def has_occlusion_info(self): return True def _load_meta_info(self): sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list} return sequence_meta_info def _read_meta(self, seq_path): try: with open(os.path.join(seq_path, 'meta_info.ini')) as f: meta_info = f.readlines() object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1], 'motion_class': meta_info[6].split(': ')[-1][:-1], 'major_class': meta_info[7].split(': ')[-1][:-1], 'root_class': meta_info[8].split(': ')[-1][:-1], 'motion_adverb': meta_info[9].split(': ')[-1][:-1]}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def _build_seq_per_class(self): seq_per_class = {} for i, s in enumerate(self.sequence_list): object_class = self.sequence_meta_info[s]['object_class_name'] if object_class in seq_per_class: seq_per_class[object_class].append(i) else: seq_per_class[object_class] = [i] return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _get_sequence_list(self): with open(os.path.join(self.root, 'list.txt')) as f: dir_list = list(csv.reader(f)) dir_list = [dir_name[0] for dir_name in dir_list] return dir_list def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "absence.label") cover_file = os.path.join(seq_path, "cover.label") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) with open(cover_file, 'r', newline='') as f: cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) target_visible = ~occlusion & (cover>0).byte() visible_ratio = cover.float() / 8 return target_visible, visible_ratio def _get_sequence_path(self, seq_id): return os.path.join(self.root, self.sequence_list[seq_id]) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible, visible_ratio = self._read_target_visible(seq_path) visible = visible & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def get_class_name(self, seq_id): obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] return obj_meta['object_class_name'] def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return frame_list, anno_frames, obj_meta ================================================ FILE: external/AR/ltr/dataset/hku_is.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed import torch from collections import OrderedDict from ltr.admin.environment import env_settings from ltr.data.bounding_box_utils import masks_to_bboxes class HKUIS(BaseImageDataset): """ HKU-IS salient object detection dataset Publication: Visual saliency based on multiscale deep features Guanbin Li and Yizhou Yu CVPR, 2015 https://arxiv.org/pdf/1503.08663.pdf Dowload dataset from https://sites.google.com/site/ligb86/hkuis """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None): """ args: root - path to HKU-IS root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 """ root = env_settings().hkuis_dir if root is None else root super().__init__('HKUIS', root, image_loader) self.image_list, self.anno_list = self._load_dataset(min_area=min_area) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, min_area=None): files_list = os.listdir(os.path.join(self.root, 'imgs')) image_list = [f[:-4] for f in files_list] images = [] annos = [] for f in image_list: a = imread_indexed(os.path.join(self.root, 'gt', '{}.png'.format(f))) if min_area is None or (a > 0).sum() > min_area: im = opencv_loader(os.path.join(self.root, 'imgs', '{}.png'.format(f))) images.append(im) annos.append(a) return images, annos def get_name(self): return 'hku-is' def has_segmentation_info(self): return True def get_image_info(self, im_id): mask = self.anno_list[im_id] mask = torch.Tensor(mask == 255) bbox = masks_to_bboxes(mask, fmt='t').view(4,) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): frame = self.image_list[image_id] if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: external/AR/ltr/dataset/imagenetvid.py ================================================ import os from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import default_image_loader import xml.etree.ElementTree as ET import json import torch import random from collections import OrderedDict from ltr.admin.environment import env_settings def get_target_to_image_ratio(seq): anno = torch.Tensor(seq['anno']) img_sz = torch.Tensor(seq['image_size']) return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt() class ImagenetVID(BaseVideoDataset): """ Imagenet VID dataset. Publication: ImageNet Large Scale Visual Recognition Challenge Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei IJCV, 2015 https://arxiv.org/pdf/1409.0575.pdf Download the dataset from http://image-net.org/ """ def __init__(self, root=None, image_loader=default_image_loader, min_length=0, max_target_area=1): """ args: root - path to the imagenet vid dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. min_length - Minimum allowed sequence length. max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets which cover complete image. """ root = env_settings().imagenet_dir if root is None else root super().__init__(root, image_loader) cache_file = os.path.join(root, 'cache.json') if os.path.isfile(cache_file): # If available, load the pre-processed cache file containing meta-info for each sequence with open(cache_file, 'r') as f: sequence_list_dict = json.load(f) self.sequence_list = sequence_list_dict else: # Else process the imagenet annotations and generate the cache file self.sequence_list = self._process_anno(root) with open(cache_file, 'w') as f: json.dump(self.sequence_list, f) # Filter the sequences based on min_length and max_target_area in the first frame self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and get_target_to_image_ratio(x) < max_target_area] def get_name(self): return 'imagenetvid' def get_num_sequences(self): return len(self.sequence_list) def get_sequence_info(self, seq_id): bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno']) valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0) visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte() return {'bbox': bb_anno, 'valid': valid, 'visible': visible} def _get_frame(self, sequence, frame_id): set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id']) vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id']) frame_number = frame_id + sequence['start_frame'] frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name, '{:06d}.JPEG'.format(frame_number)) return self.image_loader(frame_path) def get_frames(self, seq_id, frame_ids, anno=None): sequence = self.sequence_list[seq_id] frame_list = [self._get_frame(sequence, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) # Create anno dict anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] # added the class info to the meta info object_meta = OrderedDict({'object_class': sequence['class_name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta def _process_anno(self, root): # Builds individual tracklets base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train') all_sequences = [] for set in sorted(os.listdir(base_vid_anno_path)): set_id = int(set.split('_')[-1]) for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))): vid_id = int(vid.split('_')[-1]) anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid))) frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0])) image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)] objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object') for f in anno_files] tracklets = {} # Find all tracklets along with start frame for f_id, all_targets in enumerate(objects): for target in all_targets: tracklet_id = target.find('trackid').text if tracklet_id not in tracklets: tracklets[tracklet_id] = f_id for tracklet_id, tracklet_start in tracklets.items(): tracklet_anno = [] target_visible = [] class_name_id = None for f_id in range(tracklet_start, len(objects)): found = False for target in objects[f_id]: if target.find('trackid').text == tracklet_id: if not class_name_id: class_name_id = target.find('name').text x1 = int(target.find('bndbox/xmin').text) y1 = int(target.find('bndbox/ymin').text) x2 = int(target.find('bndbox/xmax').text) y2 = int(target.find('bndbox/ymax').text) tracklet_anno.append([x1, y1, x2 - x1, y2 - y1]) target_visible.append(target.find('occluded').text == '0') found = True break if not found: break new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id, 'start_frame': tracklet_start, 'anno': tracklet_anno, 'target_visible': target_visible, 'image_size': image_size} all_sequences.append(new_sequence) return all_sequences ================================================ FILE: external/AR/ltr/dataset/lasot.py ================================================ import os import os.path import torch import numpy as np import pandas import csv import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader from ltr.admin.environment import env_settings class Lasot(BaseVideoDataset): """ LaSOT dataset. Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): """ args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().lasot_dir if root is None else root super().__init__('LaSOT', root, image_loader) # Keep a list of all classes self.class_list = [f for f in os.listdir(self.root)] self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)} self.sequence_list = self._build_sequence_list(vid_ids, split) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_class_list() def _build_sequence_list(self, vid_ids=None, split=None): if split is not None: if vid_ids is not None: raise ValueError('Cannot set both split_name and vid_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt') else: raise ValueError('Unknown split name.') sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist() elif vid_ids is not None: sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids] else: raise ValueError('Set either split_name or vid_ids.') return sequence_list def _build_class_list(self): seq_per_class = {} for seq_id, seq_name in enumerate(self.sequence_list): class_name = seq_name.split('-')[0] if class_name in seq_per_class: seq_per_class[class_name].append(seq_id) else: seq_per_class[class_name] = [seq_id] return seq_per_class def get_name(self): return 'lasot' def has_class_info(self): return True def has_occlusion_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "full_occlusion.txt") out_of_view_file = os.path.join(seq_path, "out_of_view.txt") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) with open(out_of_view_file, 'r') as f: out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) target_visible = ~occlusion & ~out_of_view return target_visible def _get_sequence_path(self, seq_id): seq_name = self.sequence_list[seq_id] class_name = seq_name.split('-')[0] vid_id = seq_name.split('-')[1] return os.path.join(self.root, class_name, class_name + '-' + vid_id) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = self._read_target_visible(seq_path) & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def _get_class(self, seq_path): raw_class = seq_path.split('/')[-2] return raw_class def get_class_name(self, seq_id): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: external/AR/ltr/dataset/lvis.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader_w_failsafe import torch import random import lvis.lvis as lvis_pk from collections import OrderedDict from ltr.admin.environment import env_settings class LVIS(BaseImageDataset): """ The LVIS object detection dataset Publication: LVIS: A Dataset for Large Vocabulary Instance Segmentation Agrim Gupta, Piotr Dollár, and Ross Girshick CVPR, 2019 https://arxiv.org/pdf/1908.03195.pdf Download the images along with annotations from https://www.lvisdataset.org/dataset. The root folder should be organized as follows. - lvis_root - annotations - lvis_v0.5_train.json - lvis_v0.5_val.json - images - val2017 - train2017 Note: You also have to install the lvis Python API from https://github.com/lvis-dataset/lvis-api """ def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, min_area=None, split="train"): """ args: root - path to lvis root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 split - 'train' or 'val'. """ root = env_settings().lvis_dir if root is None else root super().__init__('LVIS', root, image_loader) self.img_pth = os.path.join(root, 'images', f'{split}2017/') self.anno_path = os.path.join(root, 'annotations', f'lvis_v0.5_{split}.json') # Load the LVIS set. self.lvis_set = lvis_pk.LVIS(self.anno_path) self.cats = self.lvis_set.cats self.class_list = self.get_class_list() # the parent class thing would happen in the sampler self.image_list = self._get_image_list(min_area=min_area) if data_fraction is not None: self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction)) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): im_list = list(self.lvis_set.anns.keys()) # No 'iscrowd' information in LVIS if min_area is not None: im_list = [s for s in im_list if self.lvis_set.anns[s]['area'] > min_area] return im_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'lvis' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def _build_im_per_class(self): im_per_class = {} for i, im in enumerate(self.image_list): class_name = self.cats[self.lvis_set.anns[im]['category_id']]['name'] if class_name not in im_per_class: im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4,) mask = torch.Tensor(self.lvis_set.ann_to_mask(anno)) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.lvis_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.lvis_set.load_imgs([self.lvis_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': None, # No 'supercategory' information available in LVIS 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: external/AR/ltr/dataset/msra10k.py ================================================ import os from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader, imread_indexed import torch from collections import OrderedDict from ltr.admin.environment import env_settings from ltr.data.bounding_box_utils import masks_to_bboxes class MSRA10k(BaseImageDataset): """ MSRA10k salient object detection dataset Publication: Global contrast based salient region detection Ming-Ming Cheng, Niloy J. Mitra, Xiaolei Huang, Philip H. S. Torr, and Shi-Min Hu TPAMI, 2015 https://mmcheng.net/mftp/Papers/SaliencyTPAMI.pdf Download dataset from https://mmcheng.net/msra10k/ """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None): """ args: root - path to MSRA10k root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 """ root = env_settings().msra10k_dir if root is None else root super().__init__('MSRA10k', root, image_loader) self.image_list = self._load_dataset(min_area=min_area) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, min_area=None): files_list = os.listdir(os.path.join(self.root, 'Imgs')) image_list = [f[:-4] for f in files_list if f[-3:] == 'jpg'] images = [] for f in image_list: a = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(f))) if min_area is None or (a > 0).sum() > min_area: images.append(f) return images def get_name(self): return 'msra10k' def has_segmentation_info(self): return True def get_image_info(self, im_id): mask = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(self.image_list[im_id]))) mask = torch.Tensor(mask == 255) bbox = masks_to_bboxes(mask, fmt='t').view(4,) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): frame = self.image_loader(os.path.join(self.root, 'Imgs', '{}.jpg'.format(self.image_list[image_id]))) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: external/AR/ltr/dataset/sbd.py ================================================ from .base_image_dataset import BaseImageDataset from ltr.data.image_loader import jpeg4py_loader_w_failsafe import torch from collections import OrderedDict import os from scipy.io import loadmat from ltr.data.bounding_box_utils import masks_to_bboxes from ltr.admin.environment import env_settings class SBD(BaseImageDataset): """ Semantic Boundaries Dataset and Benchmark (SBD) Publication: Semantic contours from inverse detectors Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji and Jitendra Malik ICCV, 2011 http://home.bharathh.info/pubs/pdfs/BharathICCV2011.pdf Download dataset from: http://home.bharathh.info/pubs/codes/SBD/download.html """ def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, split="train"): """ args: root - path to SBD root folder image_loader - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default split - dataset split ("train", "train_noval", "val") """ root = env_settings().sbd_dir if root is None else root super().__init__('SBD', root, image_loader) assert split in ["train", "train_noval", "val"] self.root = root self.image_path_list, self.anno_file_list = self._load_dataset(split) # Load mat fine anno_list = [loadmat(a) for a in self.anno_file_list] self.image_list = self._construct_image_list(anno_list) if data_fraction is not None: raise NotImplementedError def _load_dataset(self, split): split_f = os.path.join(self.root, split.rstrip('\n') + '.txt') with open(os.path.join(split_f), "r") as f: file_names = [x.strip() for x in f.readlines()] image_list = [os.path.join(self.root, 'img', x + ".jpg") for x in file_names] anno_list = [os.path.join(self.root, 'inst', x + ".mat") for x in file_names] assert (len(image_list) == len(anno_list)) return image_list, anno_list def _get_mask_from_mat(self, mat): return torch.tensor(mat['GTinst'][0]['Segmentation'][0]) def _construct_image_list(self, anno_list): image_list = [] for im_id, a in enumerate(anno_list): mask = self._get_mask_from_mat(a) for instance_id in range(1, mask.max().item() + 1): image_list.append((im_id, instance_id)) return image_list def get_name(self): return 'sbd' def has_segmentation_info(self): return True def get_image_info(self, im_id): image_id, instance_id = self.image_list[im_id] anno_mat = loadmat(self.anno_file_list[image_id]) mask = self._get_mask_from_mat(anno_mat) mask = (mask == instance_id).float() bbox = masks_to_bboxes(mask, fmt='t') valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_image(self, im_id): image_id, _ = self.image_list[im_id] img = self.image_loader(self.image_path_list[image_id]) return img def get_meta_info(self, im_id): object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_image(self, image_id, anno=None): image = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return image, anno, object_meta ================================================ FILE: external/AR/ltr/dataset/synthetic_video.py ================================================ from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.bounding_box_utils import masks_to_bboxes class SyntheticVideo(BaseVideoDataset): """ Create a synthetic video dataset from an image dataset by applying a random transformation to images. """ def __init__(self, base_image_dataset, transform=None): """ args: base_image_dataset - Image dataset used for generating synthetic videos transform - Set of transforms to be applied to the images to generate synthetic video. """ super().__init__(base_image_dataset.get_name() + '_syn_vid', base_image_dataset.root, base_image_dataset.image_loader) self.base_image_dataset = base_image_dataset self.transform = transform def get_name(self): return self.name def is_video_sequence(self): return False def has_class_info(self): return self.base_image_dataset.has_class_info() def has_occlusion_info(self): return True def get_num_sequences(self): return self.base_image_dataset.get_num_images() def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.get_images_in_class[class_name] def get_sequence_info(self, seq_id): image_info = self.base_image_dataset.get_image_info(seq_id) image_info = {k: v.unsqueeze(0) for k, v in image_info.items()} return image_info def get_class_name(self, seq_id): return self.base_image_dataset.get_class_name(seq_id) def get_frames(self, seq_id, frame_ids, anno=None): frame, anno, object_meta = self.base_image_dataset.get_image(seq_id, anno=anno) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0].clone() for f_id in frame_ids] if self.transform is not None: if 'mask' in anno_frames.keys(): frame_list, anno_frames['bbox'], anno_frames['mask'] = self.transform(image=frame_list, bbox=anno_frames['bbox'], mask=anno_frames['mask'], joint=False) anno_frames['bbox'] = [masks_to_bboxes(m, fmt='t') for m in anno_frames['mask']] else: frame_list, anno_frames['bbox'] = self.transform(image=frame_list, bbox=anno_frames['bbox'], joint=False) object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id), 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: external/AR/ltr/dataset/synthetic_video_blend.py ================================================ from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from ltr.data.bounding_box_utils import masks_to_bboxes import random import torch class SyntheticVideoBlend(BaseVideoDataset): """ Create a synthetic video by applying random transformations to an object (foreground) and pasting it in a background image. Currently, the foreground object is pasted at random locations in different frames. """ def __init__(self, foreground_image_dataset, background_image_dataset, foreground_transform=None, background_transform=None): """ args: foreground_image_dataset - A segmentation dataset from which foreground objects are cropped using the segmentation mask background_image_dataset - Dataset used to sample background image for the synthetic video foreground_transform - Random transformations to be applied to the foreground object in every frame background_transform - Random transformations to be applied to the background image in every frame """ assert foreground_image_dataset.has_segmentation_info() super().__init__(foreground_image_dataset.get_name() + '_syn_vid_blend', foreground_image_dataset.root, foreground_image_dataset.image_loader) self.foreground_image_dataset = foreground_image_dataset self.background_image_dataset = background_image_dataset self.foreground_transform = foreground_transform self.background_transform = background_transform def get_name(self): return self.name def is_video_sequence(self): return False def has_class_info(self): return self.foreground_image_dataset.has_class_info() def has_occlusion_info(self): return True def get_num_sequences(self): return self.foreground_image_dataset.get_num_images() def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.get_images_in_class[class_name] def get_sequence_info(self, seq_id): image_info = self.foreground_image_dataset.get_image_info(seq_id) image_info = {k: v.unsqueeze(0) for k, v in image_info.items()} return image_info def get_class_name(self, seq_id): return self.foreground_image_dataset.get_class_name(seq_id) def _paste_target(self, fg_image, fg_box, fg_mask, bg_image, paste_loc): fg_mask = fg_mask.view(fg_mask.shape[0], fg_mask.shape[1], 1) fg_box = fg_box.long().tolist() x1 = int(paste_loc[0] - 0.5 * fg_box[2]) x2 = x1 + fg_box[2] y1 = int(paste_loc[1] - 0.5 * fg_box[3]) y2 = y1 + fg_box[3] x1_pad = max(-x1, 0) y1_pad = max(-y1, 0) x2_pad = max(x2 - bg_image.shape[1], 0) y2_pad = max(y2 - bg_image.shape[0], 0) bg_mask = torch.zeros((bg_image.shape[0], bg_image.shape[1], 1), dtype=fg_mask.dtype, device=fg_mask.device) if x1_pad >= fg_mask.shape[1] or x2_pad >= fg_mask.shape[1] or y1_pad >= fg_mask.shape[0] or y2_pad >= \ fg_mask.shape[0]: return bg_image, bg_mask.squeeze(-1) fg_mask_patch = fg_mask[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad, fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :] fg_image_patch = fg_image[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad, fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :] bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = \ bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] * (1 - fg_mask_patch.numpy()) \ + fg_mask_patch.numpy() * fg_image_patch bg_mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = fg_mask_patch return bg_image, bg_mask.squeeze(-1) def get_frames(self, seq_id, frame_ids, anno=None): # Handle foreground fg_frame, fg_anno, fg_object_meta = self.foreground_image_dataset.get_image(seq_id, anno=anno) fg_frame_list = [fg_frame.copy() for _ in frame_ids] fg_anno_frames = {} for key, value in fg_anno.items(): fg_anno_frames[key] = [value[0].clone() for f_id in frame_ids] if self.foreground_transform is not None: fg_frame_list, fg_anno_frames['bbox'], fg_anno_frames['mask'] = self.foreground_transform( image=fg_frame_list, bbox=fg_anno_frames['bbox'], mask=fg_anno_frames['mask'], joint=False) # Sample a random background bg_seq_id = random.randint(0, self.background_image_dataset.get_num_images() - 1) bg_frame, bg_anno, _ = self.background_image_dataset.get_image(bg_seq_id) bg_frame_list = [bg_frame.copy() for _ in frame_ids] bg_anno_frames = {} for key, value in bg_anno.items(): # Note: Since we get bg anno from image dataset, it does not has frame dimension bg_anno_frames[key] = [value.clone() for f_id in frame_ids] if self.background_transform is not None: if 'mask' in bg_anno_frames.keys(): bg_frame_list, bg_anno_frames['bbox'], bg_anno_frames['mask'] = self.background_transform( image=bg_frame_list, bbox=bg_anno_frames['bbox'], mask=bg_anno_frames['mask'], joint=False) else: bg_frame_list, bg_anno_frames['bbox'] = self.background_transform( image=bg_frame_list, bbox=bg_anno_frames['bbox'], joint=False) for i in range(len(frame_ids)): # To be safe, get target bb for the mask bbox = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t') loc_y = random.randint(0, bg_frame_list[i].shape[0] - 1) loc_x = random.randint(0, bg_frame_list[i].shape[1] - 1) paste_loc = (loc_x, loc_y) fg_frame_list[i], fg_anno_frames['mask'][i] = self._paste_target(fg_frame_list[i], bbox, fg_anno_frames['mask'][i], bg_frame_list[i], paste_loc) fg_anno_frames['bbox'][i] = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t') object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id), 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return fg_frame_list, fg_anno_frames, object_meta ================================================ FILE: external/AR/ltr/dataset/tracking_net.py ================================================ import torch import os import os.path import numpy as np import pandas import random from collections import OrderedDict from ltr.data.image_loader import jpeg4py_loader from .base_video_dataset import BaseVideoDataset from ltr.admin.environment import env_settings def list_sequences(root, set_ids): """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name) args: root: Root directory to TrackingNet set_ids: Sets (0-11) which are to be used returns: list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence """ sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, "TRAIN_" + str(s), "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list class TrackingNet(BaseVideoDataset): """ TrackingNet dataset. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None): """ args: root - The path to the TrackingNet folder, containing the training sets. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the sets (0 - 11) will be used. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().trackingnet_dir if root is None else root super().__init__('TrackingNet', root, image_loader) if set_ids is None: set_ids = [i for i in range(12)] self.set_ids = set_ids # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and # video_name for each sequence self.sequence_list = list_sequences(self.root, self.set_ids) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction)) self.seq_to_class_map, self.seq_per_class = self._load_class_info() # we do not have the class_lists for the tracking net self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def _load_class_info(self): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt') with open(class_map_path, 'r') as f: seq_to_class_map = {seq_class.split('\t')[0]: seq_class.rstrip().split('\t')[1] for seq_class in f} seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = seq_to_class_map[seq[1]] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_to_class_map, seq_per_class def get_name(self): return 'trackingnet' def has_class_info(self): return True def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] bb_anno_file = os.path.join(self.root, "TRAIN_" + str(set_id), "anno", vid_name + ".txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def get_sequence_info(self, seq_id): bbox = self._read_bb_anno(seq_id) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame(self, seq_id, frame_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] frame_path = os.path.join(self.root, "TRAIN_" + str(set_id), "frames", vid_name, str(frame_id) + ".jpg") return self.image_loader(frame_path) def _get_class(self, seq_id): seq_name = self.sequence_list[seq_id][1] return self.seq_to_class_map[seq_name] def get_class_name(self, seq_id): obj_class = self._get_class(seq_id) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): frame_list = [self._get_frame(seq_id, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] obj_class = self._get_class(seq_id) object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: external/AR/ltr/dataset/vos_base.py ================================================ import torch from pathlib import Path from collections import OrderedDict, defaultdict import json import numpy as np import os from .base_video_dataset import BaseVideoDataset from ltr.data.image_loader import jpeg4py_loader, imread_indexed from ltr.data.bounding_box_utils import masks_to_bboxes class VOSMeta: def __init__(self, data=None, filename=None): if filename is not None: self.load(filename) elif data is not None: self._data = data else: raise ValueError("Must set either data or filename parameter") def save(self, gen_meta: Path): gen_meta.parent.mkdir(exist_ok=True, parents=True) json.dump(self._data, open(gen_meta, "w")) def load(self, gen_meta: Path): if not gen_meta.exists(): print("Generated metadata file %s is not found." % gen_meta) print("Find and run VOSMeta.generate() to create it.") raise FileNotFoundError(gen_meta) self._data = json.load(open(gen_meta), object_pairs_hook=OrderedDict) @classmethod def generate(cls, dset_name: str, dset_images_path: Path, dset_annos_path: Path): """ Count the annotation mask pixels per object, per frame, in all sequences in a dataset :param dset_name: Dataset name, for printing the progress bar. :param dset_annos_path: Path to annotations directory, containing sequence directories, with annotation frames in them. :return: Dataset meta dict: {'sequence0': { 'shape': (height, width) 'obj_sizes': # Object pixels per frame {'frame0': {'object0': px_count, 'object1': px_count, ...}, 'frame1': {'object0': px_count, 'object1': px_count, ...}, ... }, 'bboxes': # Bounding boxes per frame {'frame0': {'object0': bbox, 'object1': bbox, ...}, 'frame1': {'object0': bbox, 'object1': bbox, ...}, ... }, ... } """ assert(dset_annos_path.exists()) dset_meta = OrderedDict() sequences = [p.stem for p in sorted(dset_annos_path.glob("*")) if p.is_dir()] try: from tqdm import tqdm except: def tqdm(x, *args, **kwargs): return x for seq in tqdm(sequences, desc=dset_name, unit="seq"): obj_sizes2 = defaultdict(OrderedDict) bboxes = defaultdict(OrderedDict) shape = None frame_names = [file.stem for file in sorted((dset_images_path / seq).glob("*.jpg"))] anno_paths = list(sorted((dset_annos_path / seq).glob("*.png"))) # Extract information from the given label frames for path in anno_paths: f_id = path.stem # Count label-pixels per frame labels = imread_indexed(path) # labels = np.array(Image.open(path)) obj_ids, obj_sizes = np.unique(labels, return_counts=True) obj_ids = [str(oid) for oid in obj_ids] obj_sizes = obj_sizes.tolist() if '0' in obj_ids: # Remove background id obj_ids = obj_ids[1:] obj_sizes = obj_sizes[1:] obj_sizes2[f_id] = OrderedDict(zip(obj_ids, obj_sizes)) # Generate per-label bounding boxes for obj_id in obj_ids: bboxes[f_id][obj_id] = cls._mask_to_bbox(labels == int(obj_id)) if shape is None: shape = labels.shape[:2] # Format result dset_meta[seq] = dict(shape=shape, obj_sizes=obj_sizes2, bboxes=bboxes, frame_names=frame_names) return VOSMeta(dset_meta) @staticmethod def _mask_to_bbox(mask: np.ndarray): mask = mask.astype(int) xs = mask.sum(axis=-2).nonzero()[0].tolist() ys = mask.sum(axis=-1).nonzero()[0].tolist() if len(ys) > 0 and len(xs) > 0: x, y, w, h = xs[0], ys[0], xs[-1] - xs[0], ys[-1] - ys[0] else: x, y, w, h = 0, 0, 0, 0 return [x, y, w, h] @staticmethod def _transpose_nested_dict(d): """ Permute a 2-level nested dict such that the inner and outer keys swap places. """ d2 = defaultdict(OrderedDict) for key1, inner in d.items(): for key2, value in inner.items(): d2[key2][key1] = value return d2 def select_split(self, dataset_name, split): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') sequences = set([s.strip() for s in open(os.path.join(ltr_path, 'data_specs', dataset_name + '_' + split + '.txt')).readlines()]) all_sequences = set(self._data.keys()) to_remove = all_sequences.difference(sequences) for seq_name in to_remove: self._data.pop(seq_name) def get_sequence_names(self): return list(self._data.keys()) def get_shape(self, seq_name): """ Sequence image shape (h,w) """ h, w = self._data[seq_name]['shape'] return h, w def get_obj_ids(self, seq_name): """ All objects in the sequence """ return list(self.get_obj_sizes_per_object(seq_name).keys()) def get_frame_names(self, seq_name): """ All filename stems of the frames in the sequence """ return self._data[seq_name]['frame_names'] def enable_all_frames(self, dset_images_path): """ For YouTubeVOS: Update the frame names with (jpeg) files from the _all_frames set :param dset_images_path: /path/to/train_all_frames/JPEGImages (or valid or test) :param seq: Sequence name :return: """ # Try load the cached index idx_file = dset_images_path.parent / "frame_names.json" if idx_file.exists(): print('Loading cached frame names from %s' % idx_file) all_frame_names = json.load(open(idx_file)) else: # Cache the data to the user's home directory (guaranteed to be writable) all_frame_names = dict() user_idx_file = Path.home() / (dset_images_path.parent.stem + "_frame_names.json") print('Indexing YouTubeVOS "all_frames" frame names to %s' % user_idx_file) for seq in self._data: all_frame_names[seq] = [file.stem for file in sorted((dset_images_path / seq).glob("*.jpg"))] json.dump(all_frame_names, open(user_idx_file, "w")) print('Done. Move %s to %s to load faster next time.' % (user_idx_file, idx_file)) for seq, frame_names in all_frame_names.items(): self._data[seq]['frame_names'] = frame_names def get_aspect_ratio(self, seq_name): """ Sequence aspect ratio """ h, w = self._data[seq_name]['shape'] return w / h def get_obj_sizes_per_frame(self, seq_name): """ Get object pixel counts, grouped by frame names """ return self._data[seq_name]['obj_sizes'] def get_bboxes_per_frame(self, seq_name): """ Object bounding boxes, grouped by frame names """ return self._data[seq_name]['bboxes'] def get_obj_sizes_per_object(self, seq_name): """ Object pixel counts, grouped by object """ return self._transpose_nested_dict(self.get_obj_sizes_per_frame(seq_name)) def get_bboxes_per_object(self, seq_name): """ Object bounding boxes, grouped by object """ return self._transpose_nested_dict(self.get_bboxes_per_frame(seq_name)) @staticmethod def generate_datasets_meta(src, dst=Path("~/vosdataset_meta").expanduser()): VOSMeta.generate("SyntheticCoco", src / "JPEGImages", src / "Annotations").save(src / "generated_meta.json") class VOSDatasetBase(BaseVideoDataset): """ Generic VOS dataset reader base class, for both DAVIS and YouTubeVOS """ def __init__(self, name: str, root: Path, version=None, split='train', multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader): """ :param root: Dataset root path, eg /path/to/DAVIS or /path/to/YouTubeVOS/ Note: YouTubeVOS 2018 and 2019 are expected to be in /path/to/YouTubeVOS/2018 and /path/to/YouTubeVOS/2019, respectively :param name: 'DAVIS' or 'YouTubeVOS' (case sensitive) :param version: DAVIS: '2016', '2017, YouTubeVOS: '2018' or '2019' :param split: DAVIS: Any name in DAVIS/ImageSets/, YouTubeVOS: 'test', 'train', 'valid' or 'jjtrain', 'jjvalid' :param multiobj: Whether the dataset will return all objects in a sequence or multiple sequences with one object in each. :param vis_threshold: Minimum number of pixels required to consider a target object "visible". :param image_loader: Image loader. """ assert root.exists() and root.is_dir() super().__init__(name, root, image_loader) self.version = version self.split = split self.vis_threshold = vis_threshold self.multiobj = multiobj def _load_image(self, path): im = self.image_loader(str(path)) assert im is not None im = np.atleast_3d(im) return im @staticmethod def _load_anno(path): if not path.exists(): return None # im = np.atleast_3d(np.array(Image.open(path))) im = imread_indexed(path) return im def get_num_sequences(self): return len(self._samples) def get_sequence_info(self, sample_id): """ Get sample meta data. :param sample_id: Sample to query. :return: dict of metadata: sequence: Sequence name frame_shape: (height, width) of the images frame_names: List of frame filename stems in the sequence object_ids: Id numbers of all objects occurring in the sequence obj_sizes: Matrix shape=(frames, object) of the number of pixels for each object in each frame Coordinates in this matrix relate to the frame_names and object_ids visible: Boolean matrix of the same shape as obj_sizes. Entries with more pixels than self.visible_threshold are True. """ m = self.gmeta seq_name, obj_ids = self._samples[sample_id] f_names = m.get_frame_names(seq_name) # All frames f2i = {f: i for i, f in enumerate(f_names)} # Frame name to matrix index o2i = {o: i for i, o in enumerate(obj_ids)} # Object id to matrix index # Get a matrix of object sizes: shape=(frames, objects) obj_sizes = torch.zeros((len(f_names), len(obj_ids)), dtype=torch.int) sizes_per_object = m.get_obj_sizes_per_object(seq_name) for obj_id in obj_ids: frames = sizes_per_object[obj_id] oid = o2i[obj_id] for f, sz in frames.items(): obj_sizes[f2i[f], oid] = sz visible = (obj_sizes > self.vis_threshold).byte() return dict(sequence=seq_name, frame_shape=m.get_shape(seq_name), frame_names=f_names, object_ids=obj_ids, object_sizes=obj_sizes, visible=visible, valid=visible) def get_paths_and_bboxes(self, sequence_info): seq_name = sequence_info['sequence'] annos_root = self._anno_path / seq_name images_root = self._jpeg_path / seq_name frame_names = sequence_info['frame_names'] f2i = {f: i for i, f in enumerate(frame_names)} images = [str(images_root / (f + ".jpg")) for f in frame_names] # Find the frames where ground truth is available and # get the bounding boxes and segmentation labels of those frames all_bboxes = self.gmeta.get_bboxes_per_frame(seq_name) gt_labels = [str(annos_root / (f + ".png")) if f in all_bboxes.keys() else None for f in frame_names] gt_bboxes = OrderedDict() for obj_id in sequence_info['object_ids']: gt_bboxes[obj_id] = np.array([all_bboxes.get(frame, {}).get(obj_id, [-1, -1, -1, -1]) for frame in frame_names]) return images, gt_labels, gt_bboxes def _construct_sequence(self, sequence_info): raise NotImplementedError def get_sequence_list(self): if len(self.sequence_list) > 0: return self.sequence_list self.sequence_list = [self._construct_sequence(self.get_sequence_info(i)) for i in range(len(self._samples))] return self.sequence_list def __len__(self): return len(self._samples) def _get_image_path(self, meta, frame_id): return self._jpeg_path / meta['sequence'] / (meta['frame_names'][frame_id] + ".jpg") def _get_anno_path(self, meta, frame_id): return self._anno_path / meta['sequence'] / (meta['frame_names'][frame_id] + ".png") def get_frames(self, sample_id, frame_ids, anno=None): """ Fetch frames with the given ids. :param sample_id: Sample to get. :param frame_ids: List of frame indices in the sequence belonging to the sample_id :return: dict of metadata and data: sequence: Sequence name images: List of images. No entries may be None labels: List of label/mask images. Entries may be None if the data is missing bboxes: List of bounding boxes. Entries may be None if the data is missing """ seq_name, obj_ids = self._samples[sample_id] meta = self.get_sequence_info(sample_id) if anno is None else anno frame_names = meta['frame_names'] images = [self._load_image(self._jpeg_path / seq_name / (frame_names[f] + ".jpg")) for f in frame_ids] labels = [self._load_anno(self._anno_path / seq_name / (frame_names[f] + ".png")) for f in frame_ids] # Generate bounding boxes for the requested objects bboxes = [] for lb in labels: lb = torch.from_numpy(lb.squeeze()) frame_bbs = {} for obj_id in obj_ids: bbox = masks_to_bboxes(lb == int(obj_id), fmt='t') if bbox[3] == 0 or bbox[2] == 0: print("!") frame_bbs[obj_id] = bbox bboxes.append(frame_bbs) # Insert empty bboxes for missing object ids for bbox in bboxes: for obj_id in obj_ids: if obj_id not in bbox: bbox[obj_id] = torch.zeros(4, dtype=torch.float32) # Remap to object id 1, if requested - for training if not self.multiobj: assert len(obj_ids) == 1 obj_id = obj_ids[0] labels = [torch.Tensor(lb == int(obj_id)) for lb in labels] bboxes = [bbox[obj_id] for bbox in bboxes] else: labels = [torch.Tensor(lb) for lb in labels] object_meta = {key: meta[key] for key in ['sequence', 'frame_shape', 'frame_names', 'object_ids']} anno_frames = dict(bbox=bboxes, mask=labels) for key in ['object_sizes', 'visible', 'valid']: value = meta[key] anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return images, anno_frames, object_meta def get_name(self): return "%s/%s/%s" % (self.name, self.version, self.split) def has_class_info(self): return False def has_occlusion_info(self): return True def get_num_classes(self): return 0 def get_class_list(self): return [] def get_sequences_in_class(self, class_name): raise [] def has_segmentation_info(self): return True ================================================ FILE: external/AR/ltr/dataset/youtubevos.py ================================================ from pathlib import Path import os from ltr.dataset.vos_base import VOSDatasetBase, VOSMeta from pytracking.evaluation import Sequence import json from ltr.admin.environment import env_settings from ltr.data.image_loader import jpeg4py_loader class YouTubeVOSMeta: """ Thin wrapper for YouTubeVOS meta data meta.json { "videos": { "": { "objects": { "": { "category": "", "frames": [ "", "", ] } } } } } # is the same as the pixel values of object in annotated segmentation PNG files. # is the 5-digit index of frame in video, and not necessary to start from 0. """ def __init__(self, dset_split_path): self._data = json.load(open(dset_split_path / 'meta.json'))['videos'] def sequences(self): return list(self._data.keys()) def seq_frames(self, seq_name): """ All filename stems of the frames in the sequence """ frames = set() for obj_id in self.object_ids(seq_name): for f in self.object_frames(seq_name, obj_id): frames.add(f) return list(sorted(frames)) def object_ids(self, seq_name): """ All objects in the sequence """ return list(self._data[seq_name]['objects'].keys()) def object_category(self, seq_name, obj_id): return self._data[seq_name]['objects'][str(obj_id)]['category'] def object_frames(self, seq_name, obj_id): return self._data[seq_name]['objects'][str(obj_id)]['frames'] def object_first_frame(self, seq_name, obj_id): return self.object_frames(seq_name, obj_id)[0] class YouTubeVOS(VOSDatasetBase): """ YoutubeVOS video object segmentation dataset. Publication: YouTube-VOS: A Large-Scale Video Object Segmentation Benchmark Ning Xu, Linjie Yang, Yuchen Fan, Dingcheng Yue, Yuchen Liang, Jianchao Yang, and Thomas Huang ECCV, 2018 https://arxiv.org/pdf/1809.03327.pdf Download dataset from: https://youtube-vos.org/dataset/ """ def __init__(self, root=None, version='2019', split='train', cleanup=None, all_frames=False, sequences=None, multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader): """ args: root - Dataset root path. If unset, it uses the path in your local.py config. version - '2018' or '2019' split - 'test', 'train', 'valid', or 'jjtrain', 'jjvalid'. 'jjvalid' corresponds to a custom validation dataset consisting of 300 videos randomly sampled from the train set. 'jjtrain' contains the remaining videos used for training. cleanup - List of actions to take to to clean up known problems in the dataset. 'aspects': remove frames with weird aspect ratios, 'starts': fix up start frames from original meta data all_frames - Whether to use an "all_frames" split. sequences - List of sequence names. Limit to a subset of sequences if not None. multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one object in each. vis_threshold - Minimum number of pixels required to consider a target object "visible". image_loader - Image loader. """ root = env_settings().youtubevos_dir if root is None else root super().__init__(name="YouTubeVOS", root=Path(root), version=version, split=split, multiobj=multiobj, vis_threshold=vis_threshold, image_loader=image_loader) split_folder = self.split if self.split.startswith("jj"): split_folder = "train" dset_path = self.root / self.version / split_folder self._anno_path = dset_path / 'Annotations' if all_frames: self._jpeg_path = self.root / self.version / (split_folder + "_all_frames") / 'JPEGImages' else: self._jpeg_path = dset_path / 'JPEGImages' self.meta = YouTubeVOSMeta(dset_path) meta_path = dset_path / "generated_meta.json" if meta_path.exists(): self.gmeta = VOSMeta(filename=meta_path) else: self.gmeta = VOSMeta.generate('YouTubeVOS', self._jpeg_path, self._anno_path) self.gmeta.save(meta_path) if all_frames: self.gmeta.enable_all_frames(self._jpeg_path) if self.split not in ['train', 'valid', 'test']: self.gmeta.select_split('youtubevos', self.split) if sequences is None: sequences = self.gmeta.get_sequence_names() to_remove = set() cleanup = {} if cleanup is None else set(cleanup) if 'aspect' in cleanup: # Remove sequences with unusual aspect ratios for seq_name in sequences: a = self.gmeta.get_aspect_ratio(seq_name) if a < 1.45 or a > 1.9: to_remove.add(seq_name) if 'starts' in cleanup: # Fix incorrect start frames for some objects found with ytvos_start_frames_test() bad_start_frames = [("0e27472bea", '2', ['00055', '00060'], '00065'), ("5937b08d69", '4', ['00000'], '00005'), ("5e1ce354fd", '5', ['00010', '00015'], '00020'), ("7053e4f41e", '2', ['00000', '00005', '00010', '00015'], '00020'), ("720e3fa04c", '2', ['00050'], '00055'), ("c73c8e747f", '2', ['00035'], '00040')] for seq_name, obj_id, bad_frames, good_frame in bad_start_frames: # bad_frames is from meta.json included with the dataset # good_frame is from the generated meta - and the first actual frame where the object was seen. if seq_name in self.meta._data: frames = self.meta.object_frames(seq_name, obj_id) for f in bad_frames: frames.remove(f) assert frames[0] == good_frame sequences = [seq for seq in sequences if seq not in to_remove] self.sequence_names = sequences self._samples = [] for seq in sequences: obj_ids = self.meta.object_ids(seq) if self.multiobj: # Multiple objects per sample self._samples.append((seq, obj_ids)) else: # One object per sample self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids]) print("%s loaded." % self.get_name()) if len(to_remove) > 0: print(" %d sequences were removed, (%d remaining)." % (len(to_remove), len(sequences))) def _construct_sequence(self, sequence_info): seq_name = sequence_info['sequence'] frame_names = sequence_info['frame_names'] fname_to_fid = {f: i for i, f in enumerate(frame_names)} images, gt_segs, gt_bboxes = self.get_paths_and_bboxes(sequence_info) init_data = dict() for obj_id in sequence_info['object_ids']: if obj_id == '0': print("!") f_name = self.meta.object_first_frame(seq_name, obj_id) f_id = fname_to_fid[f_name] if f_id not in init_data: init_data[f_id] = {'object_ids': [obj_id], 'bbox': {obj_id: gt_bboxes[obj_id][f_id,:]}, 'mask': os.path.join(os.path.dirname(gt_segs[f_id]), (f_name + ".png"))} assert init_data[f_id]['mask'] in gt_segs # If this fails, some file is missing else: init_data[f_id]['object_ids'].append(obj_id) init_data[f_id]['bbox'][obj_id] = gt_bboxes[obj_id][f_id,:] return Sequence(name=seq_name, frames=images, dataset='YouTubeVOS', ground_truth_rect=gt_bboxes, init_data=init_data, ground_truth_seg=gt_segs, object_ids=sequence_info['object_ids'], multiobj_mode=self.multiobj) ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/.gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .vim-template* # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/LICENSE ================================================ MIT License Copyright (c) 2018 Jiayuan Mao Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/README.md ================================================ # PreciseRoIPooling This repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation). **Acquisition of Localization Confidence for Accurate Object Detection** _Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.) https://arxiv.org/abs/1807.11590 ## Brief In short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is: - different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates. - different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous. For a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.
## Implementation PrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome. ## Usage (PyTorch 1.0) In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented). Since we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` ## Usage (PyTorch 0.4) **!!! Please first checkout to the branch pytorch0.4.** In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented). To use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` Here, - RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor. - `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`. - The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`. ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore ================================================ *.o /_prroi_pooling ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : __init__.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. from .prroi_pool import * ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : functional.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch import torch.autograd as ag __all__ = ['prroi_pool2d'] _prroi_pooling = None def _import_prroi_pooling(): global _prroi_pooling if _prroi_pooling is None: try: from os.path import join as pjoin, dirname from torch.utils.cpp_extension import load as load_extension root_dir = pjoin(dirname(__file__), 'src') _prroi_pooling = load_extension( '_prroi_pooling', [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], verbose=True ) except ImportError: raise ImportError('Can not compile Precise RoI Pooling library.') return _prroi_pooling class PrRoIPool2DFunction(ag.Function): @staticmethod def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): _prroi_pooling = _import_prroi_pooling() assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) pooled_height = int(pooled_height) pooled_width = int(pooled_width) spatial_scale = float(spatial_scale) features = features.contiguous() rois = rois.contiguous() params = (pooled_height, pooled_width, spatial_scale) if features.is_cuda: output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) ctx.params = params # everything here is contiguous. ctx.save_for_backward(features, rois, output) else: raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') return output @staticmethod def backward(ctx, grad_output): _prroi_pooling = _import_prroi_pooling() features, rois, output = ctx.saved_tensors grad_input = grad_coor = None if features.requires_grad: grad_output = grad_output.contiguous() grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) if rois.requires_grad: grad_output = grad_output.contiguous() grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) return grad_input, grad_coor, None, None, None prroi_pool2d = PrRoIPool2DFunction.apply ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : prroi_pool.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch.nn as nn from .functional import prroi_pool2d __all__ = ['PrRoIPool2D'] class PrRoIPool2D(nn.Module): def __init__(self, pooled_height, pooled_width, spatial_scale): super().__init__() self.pooled_height = int(pooled_height) self.pooled_width = int(pooled_width) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) def extra_repr(self): return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__) ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c ================================================ /* * File : prroi_pooling_gpu.c * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include #include #include #include #include #include "prroi_pooling_gpu_impl.cuh" at::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) { int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options()); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingForwardGpu( stream, features.data(), rois.data(), output.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count ); THCudaCheck(cudaGetLastError()); return output; } at::Tensor prroi_pooling_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto features_diff = at::zeros_like(features); int nr_rois = rois.size(0); int batch_size = features.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = batch_size * nr_channels * height * width; if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return features_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), features_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); THCudaCheck(cudaGetLastError()); return features_diff; } at::Tensor prroi_pooling_coor_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto coor_diff = at::zeros_like(rois); int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = nr_rois * 5; if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return coor_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingCoorBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), coor_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); THCudaCheck(cudaGetLastError()); return coor_diff; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("prroi_pooling_forward_cuda", &prroi_pooling_forward_cuda, "PRRoIPooling_forward"); m.def("prroi_pooling_backward_cuda", &prroi_pooling_backward_cuda, "PRRoIPooling_backward"); m.def("prroi_pooling_coor_backward_cuda", &prroi_pooling_coor_backward_cuda, "PRRoIPooling_backward_coor"); } ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h ================================================ /* * File : prroi_pooling_gpu.h * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); int prroi_pooling_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scale ); int prroi_pooling_coor_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scal ); ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py ================================================ # -*- coding: utf-8 -*- # File : test_prroi_pooling2d.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 18/02/2018 # # This file is part of Jacinle. import unittest import torch import torch.nn as nn import torch.nn.functional as F from jactorch.utils.unittest import TorchTestCase from prroi_pool import PrRoIPool2D class TestPrRoIPool2D(TorchTestCase): def test_forward(self): pool = PrRoIPool2D(7, 7, spatial_scale=0.5) features = torch.rand((4, 16, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 14, 14], [1, 14, 14, 28, 28], ]).float().cuda() out = pool(features, rois) out_gold = F.avg_pool2d(features, kernel_size=2, stride=1) self.assertTensorClose(out, torch.stack(( out_gold[0, :, :7, :7], out_gold[1, :, 7:14, 7:14], ), dim=0)) def test_backward_shapeonly(self): pool = PrRoIPool2D(2, 2, spatial_scale=0.5) features = torch.rand((4, 2, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 4, 4], [1, 14, 14, 18, 18], ]).float().cuda() features.requires_grad = rois.requires_grad = True out = pool(features, rois) loss = out.sum() loss.backward() self.assertTupleEqual(features.size(), features.grad.size()) self.assertTupleEqual(rois.size(), rois.grad.size()) if __name__ == '__main__': unittest.main() ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: external/AR/ltr/models/AR_seg_mask/AR_seg_mask.py ================================================ import torch.nn as nn from ltr.models.neck import CorrNL from ltr import model_constructor import torch import ltr.models.backbone.resnet_seg as resnet_seg from ltr.models.head import seg_network from easydict import EasyDict as edict '''2020.4.14 replace mask head with frtm for higher-quality mask''' '''2020.4.22 Only use the mask branch''' class ARnet_seg_mask(nn.Module): """ Scale Estimation network module with three branches: bbox, coner and mask. """ def __init__(self, feature_extractor, neck_module, head_module, used_layers, extractor_grad=True,output_size=(256,256)): """ args: feature_extractor - backbone feature extractor bb_regressor - IoU prediction module bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to bb_regressor extractor_grad - Bool indicating whether backbone feature extractor requires gradients """ super(ARnet_seg_mask, self).__init__() self.feature_extractor = feature_extractor self.neck = neck_module self.refiner = head_module self.used_layers = used_layers self.output_size = output_size if not extractor_grad: for p in self.feature_extractor.parameters(): p.requires_grad_(False) def forward(self, train_imgs, test_imgs, train_bb, mode='train'): """ Forward pass Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col] """ self.forward_ref(train_imgs, train_bb) pred_dict = self.forward_test(test_imgs, mode) return pred_dict def forward_ref(self, train_imgs, train_bb): """ Forward pass of reference branch. size of train_imgs is (1,batch,3,H,W), train_bb is (1,batch,4)""" num_sequences = train_imgs.shape[-4] # batch num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 # 1 # Extract backbone features '''train_feat OrderedDict, key:'layer4' ''' train_feat_dict = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:])) # 输入size是(batch,3,256,256) train_feat_list = [feat for feat in train_feat_dict.values()] #list,其中每个元素对应一层输出的特征(tensor) # get reference feature self.neck.get_ref_kernel(train_feat_list, train_bb.view(num_train_images, num_sequences, 4)) def forward_test(self, test_imgs, mode='train'): """ Forward pass of test branch. size of test_imgs is (1,batch,3,256,256)""" output = {} # Extract backbone features test_feat_dict = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]), layers=['layer1','layer2','layer3','layer4','layer5'])# 输入size是(batch,3,256,256) '''list,tensor''' # Save low-level feature list # Lfeat_list = [feat for name, feat in test_feat_dict.items() if name != 'layer3'] # fuse feature from two branches fusion_feat = self.neck.fuse_feat([test_feat_dict['layer4']]) # Obtain bbox prediction if mode=='train': output['mask'] = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size)) elif mode == 'mask': output = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size)) else: raise ValueError("mode should be train or test") return output def extract_backbone_features(self, im, layers=None): if layers is None: layers = self.used_layers return self.feature_extractor(im, layers) def extract_features(self, im, layers): return self.feature_extractor(im, layers) @model_constructor def ARnet_seg_mask_resnet50(backbone_pretrained=True,used_layers=('layer4',),pool_size=None): # backbone backbone_net = resnet_seg.resnet50(pretrained=backbone_pretrained) # neck neck_net = CorrNL.CorrNL(pool_size=pool_size) # multiple heads '''create segnet''' in_channels = 1024 # disc_params = edict(layer="layer4", in_channels=in_channels, c_channels=96, out_channels=64) # non-local feat (64 channels rather than 1) '''2020.4.22 change "out_channels" to pool_size * pool_size''' disc_params = edict(layer="layer4", in_channels=in_channels, c_channels=96, out_channels=pool_size*pool_size) # non-local feat (64 channels rather than 1) refnet_params = edict( layers=("layer5", "layer4", "layer3", "layer2"), nchannels=64, use_batch_norm=True) disc_params.in_channels = backbone_net.get_out_channels()[disc_params.layer] p = refnet_params refinement_layers_channels = {L: nch for L, nch in backbone_net.get_out_channels().items() if L in p.layers} refiner = seg_network.SegNetwork(disc_params.out_channels, p.nchannels, refinement_layers_channels, p.use_batch_norm) '''create Alpha-Refine''' net = ARnet_seg_mask(feature_extractor=backbone_net, neck_module=neck_net, head_module=refiner, used_layers=used_layers, extractor_grad=True, output_size=(int(pool_size*2*16),int(pool_size*2*16))) return net ================================================ FILE: external/AR/ltr/models/AR_seg_mask/__init__.py ================================================ ================================================ FILE: external/AR/ltr/models/__init__.py ================================================ ================================================ FILE: external/AR/ltr/models/backbone/__init__.py ================================================ from .resnet import resnet18, resnet50, resnet_baby from .resnet18_vggm import resnet18_vggmconv1 ================================================ FILE: external/AR/ltr/models/backbone/base.py ================================================ import torch import torch.nn as nn class Backbone(nn.Module): """Base class for backbone networks. Handles freezing layers etc. args: frozen_layers - Name of layers to freeze. Either list of strings, 'none' or 'all'. Default: 'none'. """ def __init__(self, frozen_layers=()): super().__init__() if isinstance(frozen_layers, str): if frozen_layers.lower() == 'none': frozen_layers = () elif frozen_layers.lower() != 'all': raise ValueError('Unknown option for frozen layers: \"{}\". Should be \"all\", \"none\" or list of layer names.'.format(frozen_layers)) self.frozen_layers = frozen_layers self._is_frozen_nograd = False def train(self, mode=True): super().train(mode) if mode == True: self._set_frozen_to_eval() if not self._is_frozen_nograd: self._set_frozen_to_nograd() self._is_frozen_nograd = True def _set_frozen_to_eval(self): if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all': self.eval() else: for layer in self.frozen_layers: getattr(self, layer).eval() def _set_frozen_to_nograd(self): if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all': for p in self.parameters(): p.requires_grad_(False) else: for layer in self.frozen_layers: for p in getattr(self, layer).parameters(): p.requires_grad_(False) ================================================ FILE: external/AR/ltr/models/backbone/resnet.py ================================================ import math import torch.nn as nn from collections import OrderedDict import torch.utils.model_zoo as model_zoo from torchvision.models.resnet import model_urls from .base import Backbone def conv3x3(in_planes, out_planes, stride=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_bn=True): super(BasicBlock, self).__init__() self.use_bn = use_bn self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation) if use_bn: self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes, dilation=dilation) if use_bn: self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) if self.use_bn: out = self.bn1(out) out = self.relu(out) out = self.conv2(out) if self.use_bn: out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(Backbone): """ ResNet network module. Allows extracting specific feature blocks.""" def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1, frozen_layers=()): self.inplanes = inplanes super(ResNet, self).__init__(frozen_layers=frozen_layers) self.output_layers = output_layers self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) stride = [1 + (dilation_factor < l) for l in (8, 4, 2)] self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1)) self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1)) self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1)) self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor) out_feature_strides = {'conv1': 4, 'layer1': 4, 'layer2': 4*stride[0], 'layer3': 4*stride[0]*stride[1], 'layer4': 4*stride[0]*stride[1]*stride[2]} # TODO better way? if isinstance(self.layer1[0], BasicBlock): out_feature_channels = {'conv1': inplanes, 'layer1': inplanes, 'layer2': inplanes*2, 'layer3': inplanes*4, 'layer4': inplanes*8} elif isinstance(self.layer1[0], Bottleneck): base_num_channels = 4 * inplanes out_feature_channels = {'conv1': inplanes, 'layer1': base_num_channels, 'layer2': base_num_channels * 2, 'layer3': base_num_channels * 4, 'layer4': base_num_channels * 8} else: raise Exception('block not supported') self._out_feature_strides = out_feature_strides self._out_feature_channels = out_feature_channels # self.avgpool = nn.AvgPool2d(7, stride=1) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def out_feature_strides(self, layer=None): if layer is None: return self._out_feature_strides else: return self._out_feature_strides[layer] def out_feature_channels(self, layer=None): if layer is None: return self._out_feature_channels else: return self._out_feature_channels[layer] def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _add_output_and_check(self, name, x, outputs, output_layers): if name in output_layers: outputs[name] = x return len(output_layers) == len(outputs) def forward(self, x, output_layers=None): """ Forward pass with input x. The output_layers specify the feature blocks which must be returned """ outputs = OrderedDict() if output_layers is None: output_layers = self.output_layers x = self.conv1(x) x = self.bn1(x) x = self.relu(x) if self._add_output_and_check('conv1', x, outputs, output_layers): return outputs x = self.maxpool(x) x = self.layer1(x) if self._add_output_and_check('layer1', x, outputs, output_layers): return outputs x = self.layer2(x) if self._add_output_and_check('layer2', x, outputs, output_layers): return outputs x = self.layer3(x) if self._add_output_and_check('layer3', x, outputs, output_layers): return outputs x = self.layer4(x) if self._add_output_and_check('layer4', x, outputs, output_layers): return outputs x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self._add_output_and_check('fc', x, outputs, output_layers): return outputs if len(output_layers) == 1 and output_layers[0] == 'default': return x raise ValueError('output_layer is wrong.') def resnet_baby(output_layers=None, pretrained=False, inplanes=16, **kwargs): """Constructs a ResNet-18 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, inplanes=inplanes, **kwargs) if pretrained: raise NotImplementedError return model def resnet18(output_layers=None, pretrained=False, **kwargs): """Constructs a ResNet-18 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet50(output_layers=None, pretrained=False, **kwargs): """Constructs a ResNet-50 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model ================================================ FILE: external/AR/ltr/models/backbone/resnet18_vggm.py ================================================ import math import torch import torch.nn as nn from collections import OrderedDict from torchvision.models.resnet import BasicBlock from .base import Backbone class SpatialCrossMapLRN(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super(SpatialCrossMapLRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average=nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size-1.0)/2), 0, 0)) else: self.average=nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size-1.0)/2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(self.k).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(self.k).pow(self.beta) x = x.div(div) return x class ResNetVGGm1(Backbone): def __init__(self, block, layers, output_layers, num_classes=1000, frozen_layers=()): self.inplanes = 64 super(ResNetVGGm1, self).__init__(frozen_layers=frozen_layers) self.output_layers = output_layers self.vggmconv1 = nn.Conv2d(3,96,(7, 7),(2, 2), padding=3) self.vgglrn = SpatialCrossMapLRN(5, 0.0005, 0.75, 2) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # self.avgpool = nn.AvgPool2d(7, stride=1) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _add_output_and_check(self, name, x, outputs, output_layers): if name in output_layers: outputs[name] = x return len(output_layers) == len(outputs) def forward(self, x, output_layers=None): outputs = OrderedDict() if output_layers is None: output_layers = self.output_layers if 'vggconv1' in output_layers: c1 = self.vgglrn(self.relu(self.vggmconv1(x))) if self._add_output_and_check('vggconv1', c1, outputs, output_layers): return outputs x = self.conv1(x) x = self.bn1(x) x = self.relu(x) if self._add_output_and_check('conv1', x, outputs, output_layers): return outputs x = self.maxpool(x) x = self.layer1(x) if self._add_output_and_check('layer1', x, outputs, output_layers): return outputs x = self.layer2(x) if self._add_output_and_check('layer2', x, outputs, output_layers): return outputs x = self.layer3(x) if self._add_output_and_check('layer3', x, outputs, output_layers): return outputs x = self.layer4(x) if self._add_output_and_check('layer4', x, outputs, output_layers): return outputs x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self._add_output_and_check('fc', x, outputs, output_layers): return outputs if len(output_layers) == 1 and output_layers[0] == 'default': return x raise ValueError('output_layer is wrong.') def resnet18_vggmconv1(output_layers=None, path=None, **kwargs): """Constructs a ResNet-18 model with first-layer VGGm features. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNetVGGm1(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs) if path is not None: model.load_state_dict(torch.load(path), strict=False) return model ================================================ FILE: external/AR/ltr/models/backbone/resnet_seg.py ================================================ import math import torch.nn as nn from collections import OrderedDict import torch.utils.model_zoo as model_zoo from torchvision.models.resnet import model_urls '''2020.4.14 newly added''' from collections import OrderedDict as odict from ltr.models.head.utils import get_out_channels def conv3x3(in_planes, out_planes, stride=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): """ ResNet network module. Allows extracting specific feature blocks.""" def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1): self.inplanes = inplanes super(ResNet, self).__init__() self.output_layers = output_layers self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) stride = [1 + (dilation_factor < l) for l in (8, 4, 2)] self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1)) self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1)) self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1)) self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor) # self.avgpool = nn.AvgPool2d(7, stride=1) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() '''2020.4.14 newly added''' self._out_channels = odict( # Deep-to-shallow order is required by SegNetwork layer5=get_out_channels(self.layer4), layer4=get_out_channels(self.layer3), layer3=get_out_channels(self.layer2), layer2=get_out_channels(self.layer1), layer1=get_out_channels(self.conv1)) def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _add_output_and_check(self, name, x, outputs, output_layers): if name in output_layers: outputs[name] = x return len(output_layers) == len(outputs) def forward(self, x, output_layers=None): """ Forward pass with input x. The output_layers specify the feature blocks which must be returned """ outputs = OrderedDict() if output_layers is None: output_layers = self.output_layers x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x)# conv1: (batch,64,128,128) '''2020.4.14 change names for every layers''' if self._add_output_and_check('layer1', x, outputs, output_layers): return outputs x = self.layer1(x) # (batch,256,64,64) if self._add_output_and_check('layer2', x, outputs, output_layers): return outputs x = self.layer2(x) # (batch,512,32,32) if self._add_output_and_check('layer3', x, outputs, output_layers): return outputs x = self.layer3(x) # (batch,1024,16,16) if self._add_output_and_check('layer4', x, outputs, output_layers): return outputs x = self.layer4(x) if self._add_output_and_check('layer5', x, outputs, output_layers): return outputs x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if self._add_output_and_check('fc', x, outputs, output_layers): return outputs if len(output_layers) == 1 and output_layers[0] == 'default': return x raise ValueError('output_layer is wrong.') '''2020.4.14 newly added''' def get_out_channels(self): return self._out_channels def resnet18(output_layers=None, pretrained=False, dilation_factor=1): """Constructs a ResNet-18 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, dilation_factor=dilation_factor) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet50(output_layers=None, pretrained=False, dilation_factor=1): """Constructs a ResNet-50 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, dilation_factor=dilation_factor) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model '''newly added''' def resnet101(output_layers=None, pretrained=False, dilation_factor=1): """Constructs a ResNet-101 model. """ if output_layers is None: output_layers = ['default'] else: for l in output_layers: if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer: {}'.format(l)) model = ResNet(Bottleneck, [3, 4, 23, 3], output_layers, dilation_factor=dilation_factor) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model ================================================ FILE: external/AR/ltr/models/bbreg/__init__.py ================================================ from .atom_iou_net import AtomIoUNet ================================================ FILE: external/AR/ltr/models/bbreg/atom.py ================================================ import torch.nn as nn import ltr.models.backbone as backbones import ltr.models.bbreg as bbmodels from ltr import model_constructor class ATOMnet(nn.Module): """ ATOM network module""" def __init__(self, feature_extractor, bb_regressor, bb_regressor_layer, extractor_grad=True): """ args: feature_extractor - backbone feature extractor bb_regressor - IoU prediction module bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to bb_regressor extractor_grad - Bool indicating whether backbone feature extractor requires gradients """ super(ATOMnet, self).__init__() self.feature_extractor = feature_extractor self.bb_regressor = bb_regressor self.bb_regressor_layer = bb_regressor_layer if not extractor_grad: for p in self.feature_extractor.parameters(): p.requires_grad_(False) def forward(self, train_imgs, test_imgs, train_bb, test_proposals): """ Forward pass Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col] """ num_sequences = train_imgs.shape[-4] num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 num_test_images = test_imgs.shape[0] if test_imgs.dim() == 5 else 1 # Extract backbone features train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:])) test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:])) train_feat_iou = [feat for feat in train_feat.values()] test_feat_iou = [feat for feat in test_feat.values()] # Obtain iou prediction iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb.reshape(num_train_images, num_sequences, 4), test_proposals.reshape(num_train_images, num_sequences, -1, 4)) return iou_pred def extract_backbone_features(self, im, layers=None): if layers is None: layers = self.bb_regressor_layer return self.feature_extractor(im, layers) def extract_features(self, im, layers): return self.feature_extractor(im, layers) @model_constructor def atom_resnet18(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True): # backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained) # Bounding box regressor iou_predictor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'], extractor_grad=False) return net @model_constructor def atom_resnet50(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True): # backbone backbone_net = backbones.resnet50(pretrained=backbone_pretrained) # Bounding box regressor iou_predictor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'], extractor_grad=False) return net ================================================ FILE: external/AR/ltr/models/bbreg/atom_iou_net.py ================================================ import torch.nn as nn import torch from ltr.models.layers.blocks import LinearBlock from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) class AtomIoUNet(nn.Module): """Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture. It uses two backbone feature layers as input. args: input_dim: Feature dimensionality of the two input backbone layers. pred_input_dim: Dimensionality input the the prediction network. pred_inter_dim: Intermediate dimensionality in the prediction network.""" def __init__(self, input_dim=(128,256), pred_input_dim=(256,256), pred_inter_dim=(256,256)): super().__init__() # _r for reference, _t for test self.conv3_1r = conv(input_dim[0], 128, kernel_size=3, stride=1) self.conv3_1t = conv(input_dim[0], 256, kernel_size=3, stride=1) self.conv3_2t = conv(256, pred_input_dim[0], kernel_size=3, stride=1) self.prroi_pool3r = PrRoIPool2D(3, 3, 1/8) self.prroi_pool3t = PrRoIPool2D(5, 5, 1/8) self.fc3_1r = conv(128, 256, kernel_size=3, stride=1, padding=0) self.conv4_1r = conv(input_dim[1], 256, kernel_size=3, stride=1) self.conv4_1t = conv(input_dim[1], 256, kernel_size=3, stride=1) self.conv4_2t = conv(256, pred_input_dim[1], kernel_size=3, stride=1) self.prroi_pool4r = PrRoIPool2D(1, 1, 1/16) self.prroi_pool4t = PrRoIPool2D(3, 3, 1 / 16) self.fc34_3r = conv(256 + 256, pred_input_dim[0], kernel_size=1, stride=1, padding=0) self.fc34_4r = conv(256 + 256, pred_input_dim[1], kernel_size=1, stride=1, padding=0) self.fc3_rt = LinearBlock(pred_input_dim[0], pred_inter_dim[0], 5) self.fc4_rt = LinearBlock(pred_input_dim[1], pred_inter_dim[1], 3) self.iou_predictor = nn.Linear(pred_inter_dim[0]+pred_inter_dim[1], 1, bias=True) # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight.data, mode='fan_in') if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): # In earlier versions batch norm parameters was initialized with default initialization, # which changed in pytorch 1.2. In 1.1 and earlier the weight was set to U(0,1). # So we use the same initialization here. # m.weight.data.fill_(1) m.weight.data.uniform_() m.bias.data.zero_() def forward(self, feat1, feat2, bb1, proposals2): """Runs the ATOM IoUNet during training operation. This forward pass is mainly used for training. Call the individual functions during tracking instead. args: feat1: Features from the reference frames (4 or 5 dims). feat2: Features from the test frames (4 or 5 dims). bb1: Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4). proposals2: Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).""" assert bb1.dim() == 3 assert proposals2.dim() == 4 num_images = proposals2.shape[0] num_sequences = proposals2.shape[1] # Extract first train sample feat1 = [f[0,...] if f.dim()==5 else f.reshape(-1, num_sequences, *f.shape[-3:])[0,...] for f in feat1] bb1 = bb1[0,...] # Get modulation vector modulation = self.get_modulation(feat1, bb1) iou_feat = self.get_iou_feat(feat2) modulation = [f.reshape(1, num_sequences, -1).repeat(num_images, 1, 1).reshape(num_sequences*num_images, -1) for f in modulation] proposals2 = proposals2.reshape(num_sequences*num_images, -1, 4) pred_iou = self.predict_iou(modulation, iou_feat, proposals2) return pred_iou.reshape(num_images, num_sequences, -1) def predict_iou(self, modulation, feat, proposals): """Predicts IoU for the give proposals. args: modulation: Modulation vectors for the targets. Dims (batch, feature_dim). feat: IoU features (from get_iou_feat) for test images. Dims (batch, feature_dim, H, W). proposals: Proposal boxes for which the IoU will be predicted (batch, num_proposals, 4).""" fc34_3_r, fc34_4_r = modulation c3_t, c4_t = feat batch_size = c3_t.size()[0] # Modulation c3_t_att = c3_t * fc34_3_r.reshape(batch_size, -1, 1, 1) c4_t_att = c4_t * fc34_4_r.reshape(batch_size, -1, 1, 1) # Add batch_index to rois batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(c3_t.device) # Push the different rois for the same image along the batch dimension num_proposals_per_batch = proposals.shape[1] # input proposals2 is in format xywh, convert it to x0y0x1y1 format proposals_xyxy = torch.cat((proposals[:, :, 0:2], proposals[:, :, 0:2] + proposals[:, :, 2:4]), dim=2) # Add batch index roi2 = torch.cat((batch_index.reshape(batch_size, -1, 1).expand(-1, num_proposals_per_batch, -1), proposals_xyxy), dim=2) roi2 = roi2.reshape(-1, 5).to(proposals_xyxy.device) roi3t = self.prroi_pool3t(c3_t_att, roi2) roi4t = self.prroi_pool4t(c4_t_att, roi2) fc3_rt = self.fc3_rt(roi3t) fc4_rt = self.fc4_rt(roi4t) fc34_rt_cat = torch.cat((fc3_rt, fc4_rt), dim=1) iou_pred = self.iou_predictor(fc34_rt_cat).reshape(batch_size, num_proposals_per_batch) return iou_pred def get_modulation(self, feat, bb): """Get modulation vectors for the targets. args: feat: Backbone features from reference images. Dims (batch, feature_dim, H, W). bb: Target boxes (x,y,w,h) in image coords in the reference samples. Dims (batch, 4).""" feat3_r, feat4_r = feat c3_r = self.conv3_1r(feat3_r) # Add batch_index to rois batch_size = bb.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(bb.device) # input bb is in format xywh, convert it to x0y0x1y1 format bb = bb.clone() bb[:, 2:4] = bb[:, 0:2] + bb[:, 2:4] roi1 = torch.cat((batch_index, bb), dim=1) roi3r = self.prroi_pool3r(c3_r, roi1) c4_r = self.conv4_1r(feat4_r) roi4r = self.prroi_pool4r(c4_r, roi1) fc3_r = self.fc3_1r(roi3r) # Concatenate from block 3 and 4 fc34_r = torch.cat((fc3_r, roi4r), dim=1) fc34_3_r = self.fc34_3r(fc34_r) fc34_4_r = self.fc34_4r(fc34_r) return fc34_3_r, fc34_4_r def get_iou_feat(self, feat2): """Get IoU prediction features from a 4 or 5 dimensional backbone input.""" feat2 = [f.reshape(-1, *f.shape[-3:]) if f.dim()==5 else f for f in feat2] feat3_t, feat4_t = feat2 c3_t = self.conv3_2t(self.conv3_1t(feat3_t)) c4_t = self.conv4_2t(self.conv4_1t(feat4_t)) return c3_t, c4_t ================================================ FILE: external/AR/ltr/models/head/__init__.py ================================================ ================================================ FILE: external/AR/ltr/models/head/seg_network.py ================================================ import torch from torch import nn as nn from torch.nn import functional as F from ltr.models.head.utils import conv, relu, interpolate, adaptive_cat class TSE(nn.Module): def __init__(self, fc, ic, oc): super().__init__() nc = ic + oc self.reduce = nn.Sequential(conv(fc, oc, 1), relu(), conv(oc, oc, 1)) self.transform = nn.Sequential(conv(nc, nc, 3), relu(), conv(nc, nc, 3), relu(), conv(nc, oc, 3), relu()) def forward(self, ft, score, x=None): h = self.reduce(ft) hpool = F.adaptive_avg_pool2d(h, (1, 1)) if x is None else x h = adaptive_cat((h, score), dim=1, ref_tensor=0) h = self.transform(h) return h, hpool class CAB(nn.Module): def __init__(self, oc, deepest): super().__init__() self.convreluconv = nn.Sequential(conv(2 * oc, oc, 1), relu(), conv(oc, oc, 1)) self.deepest = deepest def forward(self, deeper, shallower, att_vec=None): shallow_pool = F.adaptive_avg_pool2d(shallower, (1, 1)) deeper_pool = deeper if self.deepest else F.adaptive_avg_pool2d(deeper, (1, 1)) if att_vec is not None: global_pool = torch.cat([shallow_pool, deeper_pool, att_vec], dim=1) else: global_pool = torch.cat((shallow_pool, deeper_pool), dim=1) conv_1x1 = self.convreluconv(global_pool) inputs = shallower * torch.sigmoid(conv_1x1) out = inputs + interpolate(deeper, inputs.shape[-2:]) return out class RRB(nn.Module): def __init__(self, oc, use_bn=False): super().__init__() self.conv1x1 = conv(oc, oc, 1) if use_bn: self.bblock = nn.Sequential(conv(oc, oc, 3), nn.BatchNorm2d(oc), relu(), conv(oc, oc, 3, bias=False)) else: self.bblock = nn.Sequential(conv(oc, oc, 3), relu(), conv(oc, oc, 3, bias=False)) # Basic block def forward(self, x): h = self.conv1x1(x) return F.relu(h + self.bblock(h)) class Upsampler(nn.Module): def __init__(self, in_channels=64): super().__init__() self.conv1 = conv(in_channels, in_channels // 2, 3) self.conv2 = conv(in_channels // 2, 1, 3) def forward(self, x, image_size): print(x.shape) x = F.interpolate(x, (2 * x.shape[-2], 2 * x.shape[-1]), mode='bicubic', align_corners=False) x = F.relu(self.conv1(x)) x = F.interpolate(x, image_size[-2:], mode='bicubic', align_corners=False) x = self.conv2(x) return x class PyrUpBicubic2d(nn.Module): def __init__(self, channels): super().__init__() self.channels = channels def kernel(d): x = d + torch.arange(-1, 3, dtype=torch.float32) x = torch.abs(x) a = -0.75 f = (x < 1).float() * ((a + 2) * x * x * x - (a + 3) * x * x + 1) + \ ((x >= 1) * (x < 2)).float() * (a * x * x * x - 5 * a * x * x + 8 * a * x - 4 * a) W = f.reshape(1, 1, 1, len(x)).float() Wt = W.permute(0, 1, 3, 2) return W, Wt We, We_t = kernel(-0.25) Wo, Wo_t = kernel(-0.25 - 0.5) # Building non-separable filters for now. It would make sense to # have separable filters if it proves to be faster. # .contiguous() is needed until a bug is fixed in nn.Conv2d. self.W00 = (We_t @ We).expand(channels, 1, 4, 4).contiguous() self.W01 = (We_t @ Wo).expand(channels, 1, 4, 4).contiguous() self.W10 = (Wo_t @ We).expand(channels, 1, 4, 4).contiguous() self.W11 = (Wo_t @ Wo).expand(channels, 1, 4, 4).contiguous() def forward(self, input): if input.device != self.W00.device: self.W00 = self.W00.to(input.device) self.W01 = self.W01.to(input.device) self.W10 = self.W10.to(input.device) self.W11 = self.W11.to(input.device) a = F.pad(input, (2, 2, 2, 2), 'replicate') I00 = F.conv2d(a, self.W00, groups=self.channels) I01 = F.conv2d(a, self.W01, groups=self.channels) I10 = F.conv2d(a, self.W10, groups=self.channels) I11 = F.conv2d(a, self.W11, groups=self.channels) n, c, h, w = I11.shape J0 = torch.stack((I00, I01), dim=-1).view(n, c, h, 2 * w) J1 = torch.stack((I10, I11), dim=-1).view(n, c, h, 2 * w) out = torch.stack((J0, J1), dim=-2).view(n, c, 2 * h, 2 * w) out = F.pad(out, (-1, -1, -1, -1)) return out class BackwardCompatibleUpsampler(nn.Module): """ Upsampler with bicubic interpolation that works with Pytorch 1.0.1 """ def __init__(self, in_channels=64): super().__init__() self.conv1 = conv(in_channels, in_channels // 2, 3) self.up1 = PyrUpBicubic2d(in_channels) self.conv2 = conv(in_channels // 2, 1, 3) self.up2 = PyrUpBicubic2d(in_channels // 2) def forward(self, x, image_size): x = self.up1(x) x = F.relu(self.conv1(x)) x = self.up2(x) x = F.interpolate(x, image_size[-2:], mode='bilinear', align_corners=False) x = self.conv2(x) return x class SegNetwork(nn.Module): def __init__(self, in_channels=1, out_channels=32, ft_channels=None, use_bn=False): super().__init__() assert ft_channels is not None self.ft_channels = ft_channels self.TSE = nn.ModuleDict() self.RRB1 = nn.ModuleDict() self.CAB = nn.ModuleDict() self.RRB2 = nn.ModuleDict() ic = in_channels oc = out_channels for L, fc in self.ft_channels.items(): self.TSE[L] = TSE(fc, ic, oc) self.RRB1[L] = RRB(oc, use_bn=use_bn) self.CAB[L] = CAB(oc, L == 'layer5') self.RRB2[L] = RRB(oc, use_bn=use_bn) #if torch.__version__ == '1.0.1' self.project = BackwardCompatibleUpsampler(out_channels) #self.project = Upsampler(out_channels) def forward(self, scores, features, image_size): num_targets = scores.shape[0] num_fmaps = features[next(iter(self.ft_channels))].shape[0] if num_targets > num_fmaps: multi_targets = True else: multi_targets = False x = None for i, L in enumerate(self.ft_channels): ft = features[L] s = interpolate(scores, ft.shape[-2:]) # Resample scores to match features size if multi_targets: h, hpool = self.TSE[L](ft.repeat(num_targets, 1, 1, 1), s, x) else: h, hpool = self.TSE[L](ft, s, x) h = self.RRB1[L](h) h = self.CAB[L](hpool, h) x = self.RRB2[L](h) x = self.project(x, image_size) return x ================================================ FILE: external/AR/ltr/models/head/utils.py ================================================ from collections import OrderedDict as odict import numpy as np import torch from torch import nn as nn from torch.nn import functional as F def text_bargraph(values): blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o')) nsteps = len(blocks) - 2 - 1 hstep = 1 / (2 * nsteps) values = np.array(values) nans = np.isnan(values) values[nans] = 0 # '░' indices = ((values + hstep) * nsteps + 1).astype(np.int) indices[values < 0] = 0 indices[values > 1] = len(blocks) - 1 graph = blocks[indices] graph[nans] = '░' graph = str.join('', graph) return graph class ModuleWrapper: """ A wrapper for hiding modules from PyTorch, so that the same module can be used in multiple places. and yet saved only once in a checkpoint, or not at all. """ # https://stackoverflow.com/questions/1466676/create-a-wrapper-class-to-call-a-pre-and-post-function-around-existing-functions def __init__(self, wrapped_module): self.__wrapped_module__ = wrapped_module def __getattr__(self, attr): orig_attr = self.__wrapped_module__.__getattribute__(attr) if callable(orig_attr): def hooked(*args, **kwargs): result = orig_attr(*args, **kwargs) # prevent wrapped_class from becoming unwrapped if result == self.__wrapped_module__: return self return result return hooked else: return orig_attr def __call__(self, *args, **kwargs): return self.__wrapped_module__(*args, **kwargs) def conv(ic, oc, ksize, bias=True, dilation=1, stride=1): return nn.Conv2d(ic, oc, ksize, padding=ksize // 2, bias=bias, dilation=dilation, stride=stride) def relu(negative_slope=0.0, inplace=False): return nn.LeakyReLU(negative_slope, inplace=inplace) def interpolate(t, sz): sz = sz.tolist() if torch.is_tensor(sz) else sz return F.interpolate(t, sz, mode='bilinear', align_corners=False) if t.shape[-2:] != sz else t def adaptive_cat(seq, dim=0, ref_tensor=0): sz = seq[ref_tensor].shape[-2:] t = torch.cat([interpolate(t, sz) for t in seq], dim=dim) return t def get_out_channels(layer): if hasattr(layer, 'out_channels'): oc = layer.out_channels elif hasattr(layer, '_modules'): oc = get_out_channels(layer._modules) else: ocs = [] for key in reversed(layer): ocs.append(get_out_channels(layer[key])) oc = 0 for elem in ocs: if elem: return elem return oc def is_finite(t): return (torch.isnan(t) + torch.isinf(t)) == 0 class AverageMeter: """Computes and stores the average and current value""" def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.seq_avg = [] def reset(self): self.__init__() def update(self, val, n=1): if not np.isnan(val): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def update_multi(self, val): val = np.array(val) v = val[~np.isnan(val)] n = len(v) self.val = val self.sum += np.nansum(v) self.count += n self.avg = self.sum / self.count ================================================ FILE: external/AR/ltr/models/layers/__init__.py ================================================ ================================================ FILE: external/AR/ltr/models/layers/activation.py ================================================ import math import torch import torch.nn as nn import torch.nn.functional as F def softmax_reg(x: torch.Tensor, dim, reg=None): """Softmax with optinal denominator regularization.""" if reg is None: return torch.softmax(x, dim=dim) dim %= x.dim() if isinstance(reg, (float, int)): reg = x.new_tensor([reg]) reg = reg.expand([1 if d==dim else x.shape[d] for d in range(x.dim())]) x = torch.cat((x, reg), dim=dim) return torch.softmax(x, dim=dim)[[slice(-1) if d==dim else slice(None) for d in range(x.dim())]] class MLU(nn.Module): r"""MLU activation """ def __init__(self, min_val, inplace=False): super().__init__() self.min_val = min_val self.inplace = inplace def forward(self, input): return F.elu(F.leaky_relu(input, 1/self.min_val, inplace=self.inplace), self.min_val, inplace=self.inplace) class LeakyReluPar(nn.Module): r"""LeakyRelu parametric activation """ def forward(self, x, a): return (1.0 - a)/2.0 * torch.abs(x) + (1.0 + a)/2.0 * x class LeakyReluParDeriv(nn.Module): r"""Derivative of the LeakyRelu parametric activation, wrt x. """ def forward(self, x, a): return (1.0 - a)/2.0 * torch.sign(x.detach()) + (1.0 + a)/2.0 class BentIdentPar(nn.Module): r"""BentIdent parametric activation """ def __init__(self, b=1.0): super().__init__() self.b = b def forward(self, x, a): return (1.0 - a)/2.0 * (torch.sqrt(x*x + 4.0*self.b*self.b) - 2.0*self.b) + (1.0 + a)/2.0 * x class BentIdentParDeriv(nn.Module): r"""BentIdent parametric activation deriv """ def __init__(self, b=1.0): super().__init__() self.b = b def forward(self, x, a): return (1.0 - a)/2.0 * (x / torch.sqrt(x*x + 4.0*self.b*self.b)) + (1.0 + a)/2.0 ================================================ FILE: external/AR/ltr/models/layers/blocks.py ================================================ from torch import nn def conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True, batch_norm=True, relu=True, padding_mode='zeros'): layers = [] assert padding_mode == 'zeros' or padding_mode == 'replicate' if padding_mode == 'replicate' and padding > 0: assert isinstance(padding, int) layers.append(nn.ReflectionPad2d(padding)) padding = 0 layers.append(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)) if batch_norm: layers.append(nn.BatchNorm2d(out_planes)) if relu: layers.append(nn.ReLU(inplace=True)) return nn.Sequential(*layers) class LinearBlock(nn.Module): def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True): super().__init__() self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias) self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None self.relu = nn.ReLU(inplace=True) if relu else None def forward(self, x): x = self.linear(x.reshape(x.shape[0], -1)) if self.bn is not None: x = self.bn(x.reshape(x.shape[0], x.shape[1], 1, 1)) if self.relu is not None: x = self.relu(x) return x.reshape(x.shape[0], -1) ================================================ FILE: external/AR/ltr/models/layers/distance.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F class DistanceMap(nn.Module): """Generate a distance map from a origin center location. args: num_bins: Number of bins in the map. bin_displacement: Displacement of the bins. """ def __init__(self, num_bins, bin_displacement=1.0): super().__init__() self.num_bins = num_bins self.bin_displacement = bin_displacement def forward(self, center, output_sz): """Create the distance map. args: center: Torch tensor with (y,x) center position. Dims (batch, 2) output_sz: Size of output distance map. 2-dimensional tuple.""" center = center.view(-1,2) bin_centers = torch.arange(self.num_bins, dtype=torch.float32, device=center.device).view(1, -1, 1, 1) k0 = torch.arange(output_sz[0], dtype=torch.float32, device=center.device).view(1,1,-1,1) k1 = torch.arange(output_sz[1], dtype=torch.float32, device=center.device).view(1,1,1,-1) d0 = k0 - center[:,0].view(-1,1,1,1) d1 = k1 - center[:,1].view(-1,1,1,1) dist = torch.sqrt(d0*d0 + d1*d1) bin_diff = dist / self.bin_displacement - bin_centers bin_val = torch.cat((F.relu(1.0 - torch.abs(bin_diff[:,:-1,:,:]), inplace=True), (1.0 + bin_diff[:,-1:,:,:]).clamp(0, 1)), dim=1) return bin_val ================================================ FILE: external/AR/ltr/models/layers/filter.py ================================================ import torch import torch.nn.functional as F def apply_filter(feat, filter, dilation_factors=None): """Applies the filter on the input features (feat). The number of groups is automatically calculated. args: feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W) filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW) or (sequences, filters, feat_dim/groups, fH, fW) output: scores: Output of filtering. Dimensions (images_in_sequence, sequences, yH, yW) or (images_in_sequence, sequences, filters, yH, yW) """ multiple_filters = (filter.dim() == 5) padding = (filter.shape[-2] // 2, filter.shape[-1] // 2) num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 num_filters = filter.shape[1] if multiple_filters else 1 num_channels = feat.shape[-3] groups = num_channels // filter.shape[-3] assert num_filters % groups == 0 and num_channels % groups == 0 if multiple_filters: if dilation_factors is None: scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter.view(-1, *filter.shape[-3:]), padding=padding, groups=num_sequences*groups) return scores.view(num_images, num_sequences, -1, scores.shape[-2], scores.shape[-1]) else: scores_all = [] start_id = 0 for d_factor, num_filters_with_d in dilation_factors.items(): f_d = filter[:, start_id:start_id+num_filters_with_d, ...].contiguous() padding_d = [p+d_factor-1 for p in padding] scores_d = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), f_d.view(-1, *f_d.shape[-3:]), padding=padding_d, groups=num_sequences * groups, dilation=d_factor) scores_d = scores_d.view(num_images, num_sequences, -1, scores_d.shape[-2], scores_d.shape[-1]) scores_all.append(scores_d) start_id += num_filters_with_d scores = torch.cat(scores_all, dim=2) return scores scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter, padding=padding, groups=num_sequences) return scores.view(num_images, num_sequences, scores.shape[-2], scores.shape[-1]) def apply_feat_transpose(feat, input, filter_ksz, training=True, groups=1): """Applies the transposed operation off apply_filter w.r.t. filter itself. Can be used to compute the filter gradient. args: feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W) input: Input activation (e.g. residuals). Must have dimensions (images_in_sequence, sequences, yH, yW) or (images_in_sequence, sequences, filters, yH, yW) training: Choose the faster implementation whether training or not. output: Output of transposed operation. Dimensions (sequences, feat_dim, fH, fW) """ if groups != 1: raise NotImplementedError('Not implemented other values of group.') if training or input.dim() == 5: return _apply_feat_transpose_v3(feat, input, filter_ksz) return _apply_feat_transpose_v2(feat, input, filter_ksz) def _apply_feat_transpose_v1(feat, input, filter_ksz): """This one is slow as hell!!!!""" num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 feat_sz = (feat.shape[-2], feat.shape[-1]) if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) # trans_pad = sz + padding - filter_ksz trans_pad = [sz + ksz//2 - ksz for sz, ksz in zip(feat_sz, filter_ksz)] filter_grad = F.conv_transpose2d(input.flip((2, 3)).view(1, -1, input.shape[-2], input.shape[-1]), feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0) def _apply_feat_transpose_v2(feat, input, filter_ksz): """Fast forward and slow backward""" multiple_filters = (input.dim() == 5) num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 num_filters = input.shape[2] if multiple_filters else 1 if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) trans_pad = [(ksz-1)//2 for ksz in filter_ksz] if multiple_filters: filter_grad = F.conv2d(input.reshape(-1, num_filters, input.shape[-2], input.shape[-1]).permute(1,0,2,3), feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) if num_images == 1: return filter_grad.view(num_filters, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).flip((3,4)).permute(1,0,2,3,4) return filter_grad.view(num_filters, num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).flip((3,4)).permute(1,0,2,3,4) filter_grad = F.conv2d(input.reshape(1, -1, input.shape[-2], input.shape[-1]), feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0).flip((2,3)) def _apply_feat_transpose_v3(feat, input, filter_ksz): """Slow forward fast backward""" multiple_filters = (input.dim() == 5) num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 num_filters = input.shape[2] if multiple_filters else 1 if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) trans_pad = [ksz//2 for ksz in filter_ksz] filter_grad = F.conv2d(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]).permute(1,0,2,3), input.reshape(-1, 1, input.shape[-2], input.shape[-1]), padding=trans_pad, groups=num_images * num_sequences) if multiple_filters: if num_images == 1: return filter_grad.view(-1, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).permute(1,2,0,3,4) return filter_grad.view(-1, num_images, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,2,0,3,4) if num_images == 1: return filter_grad.permute(1,0,2,3) return filter_grad.view(-1, num_images, num_sequences, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,0,2,3) def _apply_feat_transpose_v4(feat, input, filter_ksz): """Slow forward fast backward""" num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 if isinstance(filter_ksz, int): filter_ksz = (filter_ksz, filter_ksz) trans_pad = [ksz//2 for ksz in filter_ksz] filter_grad = F.conv2d(feat.permute(2,1,0,3,4).reshape(feat.shape[-3], -1, feat.shape[-2], feat.shape[-1]), input.permute(1,0,2,3), padding=trans_pad, groups=num_sequences) return filter_grad.permute(1,0,2,3) def filter_gradient(feat, filter, label=None, training=True): """Computes gradient of the filter when applied on the input features and ground truth label. args: feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W) filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW) label: Ground truth label in the L2 loss. Dimensions (images_in_sequence, sequences, yH, yW) output: filter_gradient: Dimensions same as input filter (sequences, feat_dim, fH, fW) """ residuals = apply_filter(feat, filter) if label is not None: residuals = residuals - label filter_ksz = (filter.shape[-2], filter.shape[-1]) return apply_feat_transpose(feat, residuals, filter_ksz, training=training) ================================================ FILE: external/AR/ltr/models/layers/normalization.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F class InstanceL2Norm(nn.Module): """Instance L2 normalization. """ def __init__(self, size_average=True, eps=1e-5, scale=1.0): super().__init__() self.size_average = size_average self.eps = eps self.scale = scale def forward(self, input): if self.size_average: return input * (self.scale * ((input.shape[1] * input.shape[2] * input.shape[3]) / ( torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps)).sqrt()) else: return input * (self.scale / (torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps).sqrt()) ================================================ FILE: external/AR/ltr/models/layers/transform.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict def interpolate(x, sz): """Interpolate 4D tensor x to size sz.""" sz = sz.tolist() if torch.is_tensor(sz) else sz return F.interpolate(x, sz, mode='bilinear', align_corners=False) if x.shape[-2:] != sz else x class InterpCat(nn.Module): """Interpolate and concatenate features of different resolutions.""" def forward(self, input): if isinstance(input, (dict, OrderedDict)): input = list(input.values()) output_shape = None for x in input: if output_shape is None or output_shape[0] > x.shape[-2]: output_shape = x.shape[-2:] return torch.cat([interpolate(x, output_shape) for x in input], dim=-3) ================================================ FILE: external/AR/ltr/models/loss/__init__.py ================================================ from .target_classification import LBHinge ================================================ FILE: external/AR/ltr/models/loss/kl_regression.py ================================================ import math import torch import torch.nn as nn from torch.nn import functional as F class KLRegression(nn.Module): """KL-divergence loss for probabilistic regression. It is computed using Monte Carlo (MC) samples from an arbitrary distribution.""" def __init__(self, eps=0.0): super().__init__() self.eps = eps def forward(self, scores, sample_density, gt_density, mc_dim=-1): """Args: scores: predicted score values sample_density: probability density of the sample distribution gt_density: probability density of the ground truth distribution mc_dim: dimension of the MC samples""" exp_val = scores - torch.log(sample_density + self.eps) L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \ torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim) return L.mean() class MLRegression(nn.Module): """Maximum likelihood loss for probabilistic regression. It is computed using Monte Carlo (MC) samples from an arbitrary distribution.""" def __init__(self, eps=0.0): super().__init__() self.eps = eps def forward(self, scores, sample_density, gt_density=None, mc_dim=-1): """Args: scores: predicted score values. First sample must be ground-truth sample_density: probability density of the sample distribution gt_density: not used mc_dim: dimension of the MC samples. Only mc_dim=1 supported""" assert mc_dim == 1 assert (sample_density[:,0,...] == -1).all() exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps) L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...] loss = L.mean() return loss class KLRegressionGrid(nn.Module): """KL-divergence loss for probabilistic regression. It is computed using the grid integration strategy.""" def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0): """Args: scores: predicted score values gt_density: probability density of the ground truth distribution grid_dim: dimension(s) of the grid grid_scale: area of one grid cell""" score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim) L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr return L.mean() ================================================ FILE: external/AR/ltr/models/loss/target_classification.py ================================================ import torch.nn as nn import torch from torch.nn import functional as F class LBHinge(nn.Module): """Loss that uses a 'hinge' on the lower bound. This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is also smaller than that threshold. args: error_matric: What base loss to use (MSE by default). threshold: Threshold to use for the hinge. clip: Clip the loss if it is above this value. """ def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None): super().__init__() self.error_metric = error_metric self.threshold = threshold if threshold is not None else -100 self.clip = clip def forward(self, prediction, label, target_bb=None): negative_mask = (label < self.threshold).float() positive_mask = (1.0 - negative_mask) prediction = negative_mask * F.relu(prediction) + positive_mask * prediction loss = self.error_metric(prediction, positive_mask * label) if self.clip is not None: loss = torch.min(loss, torch.tensor([self.clip], device=loss.device)) return loss ================================================ FILE: external/AR/ltr/models/meta/__init__.py ================================================ ================================================ FILE: external/AR/ltr/models/meta/steepestdescent.py ================================================ import math import torch import torch.nn as nn from pytracking import TensorList from ltr.models.layers import activation class GNSteepestDescent(nn.Module): """General module for steepest descent based meta learning.""" def __init__(self, residual_module, num_iter=1, compute_losses=False, detach_length=float('Inf'), parameter_batch_dim=0, residual_batch_dim=0, steplength_reg=0.0, filter_dilation_factors=None): super().__init__() self.residual_module = residual_module self.num_iter = num_iter self.compute_losses = compute_losses self.detach_length = detach_length self.steplength_reg = steplength_reg self._parameter_batch_dim = parameter_batch_dim self._residual_batch_dim = residual_batch_dim self.filter_dilation_factors = filter_dilation_factors def _sqr_norm(self, x: TensorList, batch_dim=0): sum_keep_batch_dim = lambda e: e.sum(dim=[d for d in range(e.dim()) if d != batch_dim]) return sum((x * x).apply(sum_keep_batch_dim)) def _compute_loss(self, res): return sum((res * res).sum()) / sum(res.numel()) def forward(self, meta_parameter: TensorList, num_iter=None, *args, **kwargs): # Make sure grad is enabled torch_grad_enabled = torch.is_grad_enabled() torch.set_grad_enabled(True) num_iter = self.num_iter if num_iter is None else num_iter meta_parameter_iterates = [meta_parameter] losses = [] for i in range(num_iter): if i > 0 and i % self.detach_length == 0: meta_parameter = meta_parameter.detach() meta_parameter.requires_grad_(True) # Compute residual vector r = self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs) if self.compute_losses: losses.append(self._compute_loss(r)) # Compute gradient of loss u = r.clone() g = TensorList(torch.autograd.grad(r, meta_parameter, u, create_graph=True)) # Multiply gradient with Jacobian h = TensorList(torch.autograd.grad(g, u, g, create_graph=True)) # Compute squared norms ip_gg = self._sqr_norm(g, batch_dim=self._parameter_batch_dim) ip_hh = self._sqr_norm(h, batch_dim=self._residual_batch_dim) # Compute step length alpha = ip_gg / (ip_hh + self.steplength_reg * ip_gg).clamp(1e-8) # Compute optimization step step = g.apply(lambda e: alpha.reshape([-1 if d==self._parameter_batch_dim else 1 for d in range(e.dim())]) * e) # Add step to parameter meta_parameter = meta_parameter - step meta_parameter_iterates.append(meta_parameter) if self.compute_losses: losses.append(self._compute_loss(self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs))) # Reset the grad enabled flag torch.set_grad_enabled(torch_grad_enabled) if not torch_grad_enabled: meta_parameter.detach_() for w in meta_parameter_iterates: w.detach_() for l in losses: l.detach_() return meta_parameter, meta_parameter_iterates, losses ================================================ FILE: external/AR/ltr/models/neck/CorrNL.py ================================================ import torch.nn as nn import torch from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D from torch.nn import functional as F from ltr.models.neck.neck_utils import * class CorrNL(nn.Module): """Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture. It uses two backbone feature layers as input. args: input_dim: Feature dimensionality of the two input backbone layers. pred_input_dim: Dimensionality input the the prediction network. pred_inter_dim: Intermediate dimensionality in the prediction network.""" def __init__(self, pool_size=8, use_NL=True): super().__init__() self.prroi_pool = PrRoIPool2D(pool_size, pool_size, 1/16) num_corr_channel = pool_size*pool_size self.channel_attention = SEModule(num_corr_channel,reduction=4) self.spatial_attention = NONLocalBlock2D(in_channels=num_corr_channel) self.use_NL = use_NL def forward(self, feat1, feat2, bb1): """Runs the ATOM IoUNet during training operation. This forward pass is mainly used for training. Call the individual functions during tracking instead. args: feat1: Features from the reference frames (4 or 5 dims). feat2: Features from the test frames (4 or 5 dims). bb1: Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4). proposals2: Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).""" assert bb1.dim() == 3 # num_images, num_sequences = bb1.size()[:2] # 1, 64 # Extract first train sample if len(feat1)==1: feat1 = feat1[0] # size为(64,C,H,W) feat2 = feat2[0] # size为(64,C,H,W) bb1 = bb1[0,...] # (64,4) else: raise ValueError("Only support single-layer feature map") '''get PrRoIPool feature ''' # Add batch_index to rois batch_size = bb1.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device) # (64,1) # input bb is in format xywh, convert it to x0y0x1y1 format bb1 = bb1.clone() bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4] roi1 = torch.cat((batch_index, bb1), dim=1) #(64,1),(64,4) ---> (64,5) feat_roi1 = self.prroi_pool(feat1, roi1) # (64,C,H,W) feat_corr,_ = self.corr_fun(feat_roi1, feat2) # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr) '''channel attention: Squeeze and Excitation''' feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征 '''spatial attention: Non-local 2D''' feat_sa = self.spatial_attention(feat_ca) return feat_sa def get_ref_kernel(self, feat1, bb1): assert bb1.dim() == 3 # num_images, num_sequences = bb1.size()[:2] # 1, 64 # Extract first train sample if len(feat1) == 1: feat1 = feat1[0] # size为(64,C,H,W) bb1 = bb1[0, ...] # (64,4) else: raise ValueError("Only support single-layer feature map") '''get PrRoIPool feature ''' # Add batch_index to rois batch_size = bb1.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device) # (64,1) # input bb is in format xywh, convert it to x0y0x1y1 format bb1 = bb1.clone() bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4] roi1 = torch.cat((batch_index, bb1), dim=1) # (64,1),(64,4) ---> (64,5) '''注意: feat1 and roi1 must be cuda tensor''' self.ref_kernel = self.prroi_pool(feat1.float(), roi1) # (64,C,H,W) # self.ref_kernel.half() def fuse_feat(self, feat2): '''fuse features from reference and test branch''' if len(feat2) == 1: feat2 = feat2[0] '''Step1: pixel-wise correlation''' feat_corr,_ = self.corr_fun(self.ref_kernel, feat2) # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr) (batch,64,16,16) '''Step2: channel attention: Squeeze and Excitation''' feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征 if not self.use_NL: # print('not use non-local') return feat_ca else: '''Step3: spatial attention: Non-local 2D''' feat_sa = self.spatial_attention(feat_ca) return feat_sa def corr_fun(self, Kernel_tmp, Feature, KERs=None): size = Kernel_tmp.size() CORR = [] Kernel = [] for i in range(len(Feature)): ker = Kernel_tmp[i:i + 1] fea = Feature[i:i + 1] ker = ker.view(size[1], size[2] * size[3]).transpose(0, 1) ker = ker.unsqueeze(2).unsqueeze(3) if not (type(KERs) == type(None)): ker = torch.cat([ker, KERs[i]], 0) co = F.conv2d(fea, ker.contiguous()) CORR.append(co) ker = ker.unsqueeze(0) Kernel.append(ker) corr = torch.cat(CORR, 0) Kernel = torch.cat(Kernel, 0) return corr, Kernel ================================================ FILE: external/AR/ltr/models/neck/neck_utils.py ================================================ import torch.nn as nn import torch from torch.nn import functional as F def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) '''Channel attention module''' class SEModule(nn.Module): def __init__(self, channels, reduction=4): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x '''Non-local module''' class _NonLocalBlockND(nn.Module): def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True): """ :param in_channels: :param inter_channels: :param dimension: :param sub_sample: :param bn_layer: """ super(_NonLocalBlockND, self).__init__() assert dimension in [1, 2, 3] self.dimension = dimension self.sub_sample = sub_sample self.in_channels = in_channels self.inter_channels = inter_channels if self.inter_channels is None: self.inter_channels = in_channels // 2 if self.inter_channels == 0: self.inter_channels = 1 if dimension == 3: conv_nd = nn.Conv3d max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) bn = nn.BatchNorm3d elif dimension == 2: conv_nd = nn.Conv2d max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) bn = nn.BatchNorm2d else: conv_nd = nn.Conv1d max_pool_layer = nn.MaxPool1d(kernel_size=(2)) bn = nn.BatchNorm1d self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) if bn_layer: self.W = nn.Sequential( conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0), bn(self.in_channels) ) nn.init.constant_(self.W[1].weight, 0) nn.init.constant_(self.W[1].bias, 0) else: self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0) nn.init.constant_(self.W.weight, 0) nn.init.constant_(self.W.bias, 0) self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0) if sub_sample: self.g = nn.Sequential(self.g, max_pool_layer) self.phi = nn.Sequential(self.phi, max_pool_layer) def forward(self, x, return_nl_map=False): """ :param x: (b, c, t, h, w) :param return_nl_map: if True return z, nl_map, else only return z. :return: """ batch_size = x.size(0) g_x = self.g(x).view(batch_size, self.inter_channels, -1) g_x = g_x.permute(0, 2, 1) theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) theta_x = theta_x.permute(0, 2, 1) phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) f = torch.matmul(theta_x, phi_x) f_div_C = F.softmax(f, -1) y = torch.matmul(f_div_C, g_x) y = y.permute(0, 2, 1).contiguous() y = y.view(batch_size, self.inter_channels, *x.size()[2:]) W_y = self.W(y) z = W_y + x if return_nl_map: return z, f_div_C return z class NONLocalBlock2D(_NonLocalBlockND): def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True): super(NONLocalBlock2D, self).__init__(in_channels, inter_channels=inter_channels, dimension=2, sub_sample=sub_sample, bn_layer=bn_layer,) ================================================ FILE: external/AR/ltr/models/target_classifier/__init__.py ================================================ from .linear_filter import LinearFilter ================================================ FILE: external/AR/ltr/models/target_classifier/features.py ================================================ import torch from torch import nn import torch.nn.functional as F from torchvision.models.resnet import BasicBlock, Bottleneck from ltr.models.layers.normalization import InstanceL2Norm from ltr.models.layers.transform import InterpCat def residual_basic_block(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, interp_cat=False, final_relu=False, init_pool=False): """Construct a network block based on the BasicBlock used in ResNet 18 and 34.""" if out_dim is None: out_dim = feature_dim feat_layers = [] if interp_cat: feat_layers.append(InterpCat()) if init_pool: feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) for i in range(num_blocks): odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim feat_layers.append(BasicBlock(feature_dim, odim)) if final_conv: feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False)) if final_relu: feat_layers.append(nn.ReLU(inplace=True)) if l2norm: feat_layers.append(InstanceL2Norm(scale=norm_scale)) return nn.Sequential(*feat_layers) def residual_basic_block_pool(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, pool=True): """Construct a network block based on the BasicBlock used in ResNet.""" if out_dim is None: out_dim = feature_dim feat_layers = [] for i in range(num_blocks): odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim feat_layers.append(BasicBlock(feature_dim, odim)) if final_conv: feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False)) if pool: feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) if l2norm: feat_layers.append(InstanceL2Norm(scale=norm_scale)) return nn.Sequential(*feat_layers) def residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, interp_cat=False, final_relu=False, final_pool=False): """Construct a network block based on the Bottleneck block used in ResNet.""" if out_dim is None: out_dim = feature_dim feat_layers = [] if interp_cat: feat_layers.append(InterpCat()) for i in range(num_blocks): planes = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim // 4 feat_layers.append(Bottleneck(4*feature_dim, planes)) if final_conv: feat_layers.append(nn.Conv2d(4*feature_dim, out_dim, kernel_size=3, padding=1, bias=False)) if final_relu: feat_layers.append(nn.ReLU(inplace=True)) if final_pool: feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) if l2norm: feat_layers.append(InstanceL2Norm(scale=norm_scale)) return nn.Sequential(*feat_layers) ================================================ FILE: external/AR/ltr/models/target_classifier/initializer.py ================================================ import torch.nn as nn import torch import torch.nn.functional as F from ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D from ltr.models.layers.blocks import conv_block import math class FilterPool(nn.Module): """Pool the target region in a feature map. args: filter_size: Size of the filter. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region.""" def __init__(self, filter_size=1, feature_stride=16, pool_square=False): super().__init__() self.prroi_pool = PrRoIPool2D(filter_size, filter_size, 1/feature_stride) self.pool_square = pool_square def forward(self, feat, bb): """Pool the regions in bb. args: feat: Input feature maps. Dims (num_samples, feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (num_samples, 4). returns: pooled_feat: Pooled features. Dims (num_samples, feat_dim, wH, wW).""" # Add batch_index to rois bb = bb.reshape(-1,4) num_images_total = bb.shape[0] batch_index = torch.arange(num_images_total, dtype=torch.float32).reshape(-1, 1).to(bb.device) # input bb is in format xywh, convert it to x0y0x1y1 format pool_bb = bb.clone() if self.pool_square: bb_sz = pool_bb[:, 2:4].prod(dim=1, keepdim=True).sqrt() pool_bb[:, :2] += pool_bb[:, 2:]/2 - bb_sz/2 pool_bb[:, 2:] = bb_sz pool_bb[:, 2:4] = pool_bb[:, 0:2] + pool_bb[:, 2:4] roi1 = torch.cat((batch_index, pool_bb), dim=1) return self.prroi_pool(feat, roi1) class FilterInitializer(nn.Module): """Initializes a target classification filter by applying a number of conv layers before and after pooling the target region. args: filter_size: Size of the filter. feature_dim: Input feature dimentionality. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region. filter_norm: Normalize the output filter with its size in the end. num_filter_pre_convs: Conv layers before pooling. num_filter_post_convs: Conv layers after pooling.""" def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True, num_filter_pre_convs=1, num_filter_post_convs=0): super().__init__() self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square) self.filter_norm = filter_norm # Make pre conv pre_conv_layers = [] for i in range(num_filter_pre_convs): pre_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=3, padding=1)) self.filter_pre_layers = nn.Sequential(*pre_conv_layers) if pre_conv_layers else None # Make post conv post_conv_layers = [] for i in range(num_filter_post_convs): post_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=1, padding=0)) post_conv_layers.append(nn.Conv2d(feature_dim, feature_dim, kernel_size=1, padding=0)) self.filter_post_layers = nn.Sequential(*post_conv_layers) # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_images = bb.shape[0] if bb.dim() == 3 else 1 if self.filter_pre_layers is not None: feat = self.filter_pre_layers(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1])) feat_post = self.filter_pool(feat, bb) weights = self.filter_post_layers(feat_post) if num_images > 1: weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0) if self.filter_norm: weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3]) return weights class FilterInitializerLinear(nn.Module): """Initializes a target classification filter by applying a linear conv layer and then pooling the target region. args: filter_size: Size of the filter. feature_dim: Input feature dimentionality. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region. filter_norm: Normalize the output filter with its size in the end. conv_ksz: Kernel size of the conv layer before pooling.""" def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True, conv_ksz=3, init_weights='default'): super().__init__() self.filter_conv = nn.Conv2d(feature_dim, feature_dim, kernel_size=conv_ksz, padding=conv_ksz // 2) self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square) self.filter_norm = filter_norm # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d): if init_weights == 'default': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif init_weights == 'zero': m.weight.data.zero_() if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_images = feat.shape[0] feat = self.filter_conv(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1])) weights = self.filter_pool(feat, bb) # If multiple input images, compute the initial filter as the average filter. if num_images > 1: weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0) if self.filter_norm: weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3]) return weights class FilterInitializerZero(nn.Module): """Initializes a target classification filter with zeros. args: filter_size: Size of the filter. feature_dim: Input feature dimentionality.""" def __init__(self, filter_size=1, feature_dim=256): super().__init__() self.filter_size = (feature_dim, filter_size, filter_size) def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_sequences = feat.shape[1] if feat.dim() == 5 else 1 return feat.new_zeros(num_sequences, self.filter_size[0], self.filter_size[1], self.filter_size[2]) class FilterInitializerSiamese(nn.Module): """Initializes a target classification filter by only pooling the target region (similar to Siamese trackers). args: filter_size: Size of the filter. feature_stride: Input feature stride. pool_square: Do a square pooling instead of pooling the exact target region. filter_norm: Normalize the output filter with its size in the end.""" def __init__(self, filter_size=1, feature_stride=16, pool_square=False, filter_norm=True): super().__init__() self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square) self.filter_norm = filter_norm # Init weights for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, feat, bb): """Runs the initializer module. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). returns: weights: The output weights. Dims (sequences, feat_dim, wH, wW).""" num_images = feat.shape[0] feat = feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]) weights = self.filter_pool(feat, bb) if num_images > 1: weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0) if self.filter_norm: weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3]) return weights ================================================ FILE: external/AR/ltr/models/target_classifier/linear_filter.py ================================================ import torch.nn as nn import ltr.models.layers.filter as filter_layer import math class LinearFilter(nn.Module): """Target classification filter module. args: filter_size: Size of filter (int). filter_initialize: Filter initializer module. filter_optimizer: Filter optimizer module. feature_extractor: Feature extractor module applied to the input backbone features.""" def __init__(self, filter_size, filter_initializer, filter_optimizer=None, feature_extractor=None): super().__init__() self.filter_size = filter_size # Modules self.filter_initializer = filter_initializer self.filter_optimizer = filter_optimizer self.feature_extractor = feature_extractor # Init weights for m in self.feature_extractor.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, train_feat, test_feat, train_bb, *args, **kwargs): """Learns a target classification filter based on the train samples and return the resulting classification scores on the test samples. The forward function is ONLY used for training. Call the individual functions during tracking. args: train_feat: Backbone features for the train samples (4 or 5 dims). test_feat: Backbone features for the test samples (4 or 5 dims). trian_bb: Target boxes (x,y,w,h) for the train samples in image coordinates. Dims (images, sequences, 4). *args, **kwargs: These are passed to the optimizer module. returns: test_scores: Classification scores on the test samples.""" assert train_bb.dim() == 3 num_sequences = train_bb.shape[1] if train_feat.dim() == 5: train_feat = train_feat.reshape(-1, *train_feat.shape[-3:]) if test_feat.dim() == 5: test_feat = test_feat.reshape(-1, *test_feat.shape[-3:]) # Extract features train_feat = self.extract_classification_feat(train_feat, num_sequences) test_feat = self.extract_classification_feat(test_feat, num_sequences) # Train filter filter, filter_iter, losses = self.get_filter(train_feat, train_bb, *args, **kwargs) # Classify samples using all return filters test_scores = [self.classify(f, test_feat) for f in filter_iter] return test_scores def extract_classification_feat(self, feat, num_sequences=None): """Extract classification features based on the input backbone features.""" if self.feature_extractor is None: return feat if num_sequences is None: return self.feature_extractor(feat) output = self.feature_extractor(feat) return output.reshape(-1, num_sequences, *output.shape[-3:]) def classify(self, weights, feat): """Run classifier (filter) on the features (feat).""" scores = filter_layer.apply_filter(feat, weights) return scores def get_filter(self, feat, bb, *args, **kwargs): """Outputs the learned filter based on the input features (feat) and target boxes (bb) by running the filter initializer and optimizer. Note that [] denotes an optional dimension. args: feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). *args, **kwargs: These are passed to the optimizer module. returns: weights: The final oprimized weights. Dims (sequences, feat_dim, wH, wW). weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" weights = self.filter_initializer(feat, bb) if self.filter_optimizer is not None: weights, weights_iter, losses = self.filter_optimizer(weights, feat=feat, bb=bb, *args, **kwargs) else: weights_iter = [weights] losses = None return weights, weights_iter, losses def train_classifier(self, backbone_feat, bb): num_sequences = bb.shape[1] if backbone_feat.dim() == 5: backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:]) # Extract features train_feat = self.extract_classification_feat(backbone_feat, num_sequences) # Get filters from each iteration final_filter, _, train_losses = self.get_filter(train_feat, bb) return final_filter, train_losses def track_frame(self, filter_weights, backbone_feat): if backbone_feat.dim() == 5: num_sequences = backbone_feat.shape[1] backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:]) else: num_sequences = None test_feat = self.extract_classification_feat(backbone_feat, num_sequences) scores = filter_layer.apply_filter(test_feat, filter_weights) return scores ================================================ FILE: external/AR/ltr/models/target_classifier/optimizer.py ================================================ import torch.nn as nn import torch import torch.nn.functional as F import ltr.models.layers.filter as filter_layer import ltr.models.layers.activation as activation from ltr.models.layers.distance import DistanceMap import math class DiMPSteepestDescentGN(nn.Module): """Optimizer module for DiMP. It unrolls the steepest descent with Gauss-Newton iterations to optimize the target filter. Moreover it learns parameters in the loss itself, as described in the DiMP paper. args: num_iter: Number of default optimization iterations. feat_stride: The stride of the input feature. init_step_length: Initial scaling of the step length (which is then learned). init_filter_reg: Initial filter regularization weight (which is then learned). init_gauss_sigma: The standard deviation to use for the initialization of the label function. num_dist_bins: Number of distance bins used for learning the loss label, mask and weight. bin_displacement: The displacement of the bins (level of discritization). mask_init_factor: Parameter controlling the initialization of the target mask. score_act: Type of score activation (target mask computation) to use. The default 'relu' is what is described in the paper. act_param: Parameter for the score_act. min_filter_reg: Enforce a minimum value on the regularization (helps stability sometimes). mask_act: What activation to do on the output of the mask computation ('sigmoid' or 'linear'). detach_length: Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'. alpha_eps: Term in the denominator of the steepest descent that stabalizes learning. """ def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, score_act='relu', act_param=None, min_filter_reg=1e-3, mask_act='sigmoid', detach_length=float('Inf'), alpha_eps=0): super().__init__() self.num_iter = num_iter self.feat_stride = feat_stride self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1)) self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.distance_map = DistanceMap(num_dist_bins, bin_displacement) self.min_filter_reg = min_filter_reg self.detach_length = detach_length self.alpha_eps = alpha_eps # Distance coordinates d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement if init_gauss_sigma == 0: init_gauss = torch.zeros_like(d) init_gauss[0,0,0,0] = 1 else: init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2) # Module that predicts the target label function (y in the paper) self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.label_map_predictor.weight.data = init_gauss - init_gauss.min() # Module that predicts the target mask (m in the paper) mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)] if mask_act == 'sigmoid': mask_layers.append(nn.Sigmoid()) init_bias = 0.0 elif mask_act == 'linear': init_bias = 0.5 else: raise ValueError('Unknown activation') self.target_mask_predictor = nn.Sequential(*mask_layers) self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias # Module that predicts the residual weights (v in the paper) self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.spatial_weight_predictor.weight.data.fill_(1.0) # The score actvation and its derivative if score_act == 'bentpar': self.score_activation = activation.BentIdentPar(act_param) self.score_activation_deriv = activation.BentIdentParDeriv(act_param) elif score_act == 'relu': self.score_activation = activation.LeakyReluPar() self.score_activation_deriv = activation.LeakyReluParDeriv() else: raise ValueError('Unknown score activation') def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True): """Runs the optimizer module. Note that [] denotes an optional dimension. args: weights: Initial weights. Dims (sequences, feat_dim, wH, wW). feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). sample_weight: Optional weight for each sample. Dims: (images_in_sequence, [sequences]). num_iter: Number of iterations to run. compute_losses: Whether to compute the (train) loss in each iteration. returns: weights: The final oprimized weights. weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" # Sizes num_iter = self.num_iter if num_iter is None else num_iter num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (weights.shape[-2], weights.shape[-1]) output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2) # Get learnable scalars step_length_factor = torch.exp(self.log_step_length) reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2) # Compute distance map dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0 center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,)) - dmap_offset dist_map = self.distance_map(center, output_sz) # Compute label map masks and weight label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:]) target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:]) spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:]) # Get total sample weights if sample_weight is None: sample_weight = math.sqrt(1.0 / num_images) * spatial_weight elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1) * spatial_weight backprop_through_learning = (self.detach_length > 0) weight_iterates = [weights] losses = [] for i in range(num_iter): if not backprop_through_learning or (i > 0 and i % self.detach_length == 0): weights = weights.detach() # Compute residuals scores = filter_layer.apply_filter(feat, weights) scores_act = self.score_activation(scores, target_mask) score_mask = self.score_activation_deriv(scores, target_mask) residuals = sample_weight * (scores_act - label_map) if compute_losses: losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences) # Compute gradient residuals_mapped = score_mask * (sample_weight * residuals) weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \ reg_weight * weights # Map the gradient with the Jacobian scores_grad = filter_layer.apply_filter(feat, weights_grad) scores_grad = sample_weight * (score_mask * scores_grad) # Compute optimal step length alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3)) alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8) alpha = alpha_num / alpha_den # Update filter weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad # Add the weight iterate weight_iterates.append(weights) if compute_losses: scores = filter_layer.apply_filter(feat, weights) scores = self.score_activation(scores, target_mask) losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences) return weights, weight_iterates, losses class DiMPL2SteepestDescentGN(nn.Module): """A simpler optimizer module that uses L2 loss. args: num_iter: Number of default optimization iterations. feat_stride: The stride of the input feature. init_step_length: Initial scaling of the step length (which is then learned). gauss_sigma: The standard deviation of the label function. hinge_threshold: Threshold for the hinge-based loss (see DiMP paper). init_filter_reg: Initial filter regularization weight (which is then learned). min_filter_reg: Enforce a minimum value on the regularization (helps stability sometimes). detach_length: Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'. alpha_eps: Term in the denominator of the steepest descent that stabalizes learning. """ def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, gauss_sigma=1.0, hinge_threshold=-999, init_filter_reg=1e-2, min_filter_reg=1e-3, detach_length=float('Inf'), alpha_eps=0.0): super().__init__() self.num_iter = num_iter self.feat_stride = feat_stride self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1)) self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.min_filter_reg = min_filter_reg self.detach_length = detach_length self.hinge_threshold = hinge_threshold self.gauss_sigma = gauss_sigma self.alpha_eps = alpha_eps def get_label(self, center, output_sz): center = center.reshape(center.shape[0], -1, center.shape[-1]) k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device) k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device) g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2) g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2) gauss = g0 * g1 return gauss def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True): """Runs the optimizer module. Note that [] denotes an optional dimension. args: weights: Initial weights. Dims (sequences, feat_dim, wH, wW). feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). sample_weight: Optional weight for each sample. Dims: (images_in_sequence, [sequences]). num_iter: Number of iterations to run. compute_losses: Whether to compute the (train) loss in each iteration. returns: weights: The final oprimized weights. weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" # Sizes num_iter = self.num_iter if num_iter is None else num_iter num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (weights.shape[-2], weights.shape[-1]) output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2) # Get learnable scalars step_length_factor = torch.exp(self.log_step_length) reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2) # Compute distance map dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0 center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - dmap_offset label_map = self.get_label(center, output_sz) target_mask = (label_map > self.hinge_threshold).float() label_map *= target_mask # Get total sample weights if sample_weight is None: sample_weight = math.sqrt(1.0 / num_images) elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1) weight_iterates = [weights] losses = [] for i in range(num_iter): if i > 0 and i % self.detach_length == 0: weights = weights.detach() # Compute residuals scores = filter_layer.apply_filter(feat, weights) scores_act = target_mask * scores + (1.0 - target_mask) * F.relu(scores) score_mask = target_mask + (1.0 - target_mask) * (scores.detach() > 0).float() residuals = sample_weight * (scores_act - label_map) if compute_losses: losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences) # Compute gradient residuals_mapped = score_mask * (sample_weight * residuals) weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \ reg_weight * weights # Map the gradient with the Jacobian scores_grad = filter_layer.apply_filter(feat, weights_grad) scores_grad = sample_weight * (score_mask * scores_grad) # Compute optimal step length alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3)) alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8) alpha = alpha_num / alpha_den # Update filter weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad # Add the weight iterate weight_iterates.append(weights) if compute_losses: scores = filter_layer.apply_filter(feat, weights) scores = target_mask * scores + (1.0 - target_mask) * F.relu(scores) losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences) return weights, weight_iterates, losses class PrDiMPSteepestDescentNewton(nn.Module): """Optimizer module for PrDiMP. It unrolls the steepest descent with Newton iterations to optimize the target filter. See the PrDiMP paper. args: num_iter: Number of default optimization iterations. feat_stride: The stride of the input feature. init_step_length: Initial scaling of the step length (which is then learned). init_filter_reg: Initial filter regularization weight (which is then learned). gauss_sigma: The standard deviation to use for the label density function. min_filter_reg: Enforce a minimum value on the regularization (helps stability sometimes). detach_length: Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'. alpha_eps: Term in the denominator of the steepest descent that stabalizes learning. init_uni_weight: Weight of uniform label distribution. normalize_label: Wheter to normalize the label distribution. label_shrink: How much to shrink to label distribution. softmax_reg: Regularization in the denominator of the SoftMax. label_threshold: Threshold probabilities smaller than this. """ def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, init_filter_reg=1e-2, gauss_sigma=1.0, min_filter_reg=1e-3, detach_length=float('Inf'), alpha_eps=0.0, init_uni_weight=None, normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0.0): super().__init__() self.num_iter = num_iter self.feat_stride = feat_stride self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1)) self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.gauss_sigma = gauss_sigma self.min_filter_reg = min_filter_reg self.detach_length = detach_length self.alpha_eps = alpha_eps self.uni_weight = 0 if init_uni_weight is None else init_uni_weight self.normalize_label = normalize_label self.label_shrink = label_shrink self.softmax_reg = softmax_reg self.label_threshold = label_threshold def get_label_density(self, center, output_sz): center = center.reshape(center.shape[0], -1, center.shape[-1]) k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device) k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device) dist0 = (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2 dist1 = (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2 if self.gauss_sigma == 0: dist0_view = dist0.reshape(-1, dist0.shape[-2]) dist1_view = dist1.reshape(-1, dist1.shape[-1]) one_hot0 = torch.zeros_like(dist0_view) one_hot1 = torch.zeros_like(dist1_view) one_hot0[torch.arange(one_hot0.shape[0]), dist0_view.argmin(dim=-1)] = 1.0 one_hot1[torch.arange(one_hot1.shape[0]), dist1_view.argmin(dim=-1)] = 1.0 gauss = one_hot0.reshape(dist0.shape) * one_hot1.reshape(dist1.shape) else: g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist0) g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist1) gauss = (g0 / (2*math.pi*self.gauss_sigma**2)) * g1 gauss = gauss * (gauss > self.label_threshold).float() if self.normalize_label: gauss /= (gauss.sum(dim=(-2,-1), keepdim=True) + 1e-8) label_dens = (1.0 - self.label_shrink)*((1.0 - self.uni_weight) * gauss + self.uni_weight / (output_sz[0]*output_sz[1])) return label_dens def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True): """Runs the optimizer module. Note that [] denotes an optional dimension. args: weights: Initial weights. Dims (sequences, feat_dim, wH, wW). feat: Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W). bb: Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4). sample_weight: Optional weight for each sample. Dims: (images_in_sequence, [sequences]). num_iter: Number of iterations to run. compute_losses: Whether to compute the (train) loss in each iteration. returns: weights: The final oprimized weights. weight_iterates: The weights computed in each iteration (including initial input and final output). losses: Train losses.""" # Sizes num_iter = self.num_iter if num_iter is None else num_iter num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (weights.shape[-2], weights.shape[-1]) output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2) # Get learnable scalars step_length_factor = torch.exp(self.log_step_length) reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2) # Compute label density offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0 center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - offset label_density = self.get_label_density(center, output_sz) # Get total sample weights if sample_weight is None: sample_weight = torch.Tensor([1.0 / num_images]).to(feat.device) elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.reshape(num_images, num_sequences, 1, 1) exp_reg = 0 if self.softmax_reg is None else math.exp(self.softmax_reg) def _compute_loss(scores, weights): return torch.sum(sample_weight.reshape(sample_weight.shape[0], -1) * (torch.log(scores.exp().sum(dim=(-2, -1)) + exp_reg) - (label_density * scores).sum(dim=(-2, -1)))) / num_sequences +\ reg_weight * (weights ** 2).sum() / num_sequences weight_iterates = [weights] losses = [] for i in range(num_iter): if i > 0 and i % self.detach_length == 0: weights = weights.detach() # Compute "residuals" scores = filter_layer.apply_filter(feat, weights) scores_softmax = activation.softmax_reg(scores.reshape(num_images, num_sequences, -1), dim=2, reg=self.softmax_reg).reshape(scores.shape) res = sample_weight*(scores_softmax - label_density) if compute_losses: losses.append(_compute_loss(scores, weights)) # Compute gradient weights_grad = filter_layer.apply_feat_transpose(feat, res, filter_sz, training=self.training) + \ reg_weight * weights # Map the gradient with the Hessian scores_grad = filter_layer.apply_filter(feat, weights_grad) sm_scores_grad = scores_softmax * scores_grad hes_scores_grad = sm_scores_grad - scores_softmax * torch.sum(sm_scores_grad, dim=(-2,-1), keepdim=True) grad_hes_grad = (scores_grad * hes_scores_grad).reshape(num_images, num_sequences, -1).sum(dim=2).clamp(min=0) grad_hes_grad = (sample_weight.reshape(sample_weight.shape[0], -1) * grad_hes_grad).sum(dim=0) # Compute optimal step length alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3)) alpha_den = (grad_hes_grad + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8) alpha = alpha_num / alpha_den # Update filter weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad # Add the weight iterate weight_iterates.append(weights) if compute_losses: scores = filter_layer.apply_filter(feat, weights) losses.append(_compute_loss(scores, weights)) return weights, weight_iterates, losses ================================================ FILE: external/AR/ltr/models/target_classifier/residual_modules.py ================================================ import torch import torch.nn as nn import math import ltr.models.layers.filter as filter_layer import ltr.models.layers.activation as activation from ltr.models.layers.distance import DistanceMap from pytracking import TensorList class LinearFilterLearnGen(nn.Module): def __init__(self, feat_stride=16, init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, score_act='bentpar', act_param=None, mask_act='sigmoid'): super().__init__() self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1)) self.feat_stride = feat_stride self.distance_map = DistanceMap(num_dist_bins, bin_displacement) # Distance coordinates d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement if init_gauss_sigma == 0: init_gauss = torch.zeros_like(d) init_gauss[0,0,0,0] = 1 else: init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2) self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.label_map_predictor.weight.data = init_gauss - init_gauss.min() mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)] if mask_act == 'sigmoid': mask_layers.append(nn.Sigmoid()) init_bias = 0.0 elif mask_act == 'linear': init_bias = 0.5 else: raise ValueError('Unknown activation') self.target_mask_predictor = nn.Sequential(*mask_layers) self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False) self.spatial_weight_predictor.weight.data.fill_(1.0) if score_act == 'bentpar': self.score_activation = activation.BentIdentPar(act_param) elif score_act == 'relu': self.score_activation = activation.LeakyReluPar() else: raise ValueError('Unknown activation') def forward(self, meta_parameter: TensorList, feat, bb, sample_weight=None, is_distractor=None): filter = meta_parameter[0] num_images = feat.shape[0] num_sequences = feat.shape[1] if feat.dim() == 5 else 1 filter_sz = (filter.shape[-2], filter.shape[-1]) # Compute scores scores = filter_layer.apply_filter(feat, filter) # Compute distance map center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,)) if is_distractor is not None: center[is_distractor.reshape(-1), :] = 99999 dist_map = self.distance_map(center, scores.shape[-2:]) # Compute label map masks and weight label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1]) target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1]) spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1]) if sample_weight is None: sample_weight = math.sqrt(1.0 / num_images) * spatial_weight elif isinstance(sample_weight, torch.Tensor): sample_weight = sample_weight.sqrt().reshape(-1, 1, 1, 1) * spatial_weight # Compute data residual scores_act = self.score_activation(scores, target_mask) data_residual = sample_weight * (scores_act - label_map) # Compute regularization residual. Put batch in second dimension reg_residual = self.filter_reg*filter.reshape(1, num_sequences, -1) return TensorList([data_residual, reg_residual]) ================================================ FILE: external/AR/ltr/models/tracking/__init__.py ================================================ ================================================ FILE: external/AR/ltr/models/tracking/dimpnet.py ================================================ import math import torch import torch.nn as nn from collections import OrderedDict from ltr.models.meta import steepestdescent import ltr.models.target_classifier.linear_filter as target_clf import ltr.models.target_classifier.features as clf_features import ltr.models.target_classifier.initializer as clf_initializer import ltr.models.target_classifier.optimizer as clf_optimizer import ltr.models.bbreg as bbmodels import ltr.models.backbone as backbones from ltr import model_constructor class DiMPnet(nn.Module): """The DiMP network. args: feature_extractor: Backbone feature extractor network. Must return a dict of feature maps classifier: Target classification module. bb_regressor: Bounding box regression module. classification_layer: Name of the backbone feature layer to use for classification. bb_regressor_layer: Names of the backbone layers to use for bounding box regression.""" def __init__(self, feature_extractor, classifier, bb_regressor, classification_layer, bb_regressor_layer): super().__init__() self.feature_extractor = feature_extractor self.classifier = classifier self.bb_regressor = bb_regressor self.classification_layer = [classification_layer] if isinstance(classification_layer, str) else classification_layer self.bb_regressor_layer = bb_regressor_layer self.output_layers = sorted(list(set(self.classification_layer + self.bb_regressor_layer))) def forward(self, train_imgs, test_imgs, train_bb, test_proposals, *args, **kwargs): """Runs the DiMP network the way it is applied during training. The forward function is ONLY used for training. Call the individual functions during tracking. args: train_imgs: Train image samples (images, sequences, 3, H, W). test_imgs: Test image samples (images, sequences, 3, H, W). trian_bb: Target boxes (x,y,w,h) for the train images. Dims (images, sequences, 4). test_proposals: Proposal boxes to use for the IoUNet (bb_regressor) module. *args, **kwargs: These are passed to the classifier module. returns: test_scores: Classification scores on the test samples. iou_pred: Predicted IoU scores for the test_proposals.""" assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs' # Extract backbone features train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:])) test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:])) # Classification features train_feat_clf = self.get_backbone_clf_feat(train_feat) test_feat_clf = self.get_backbone_clf_feat(test_feat) # Run classifier module target_scores = self.classifier(train_feat_clf, test_feat_clf, train_bb, *args, **kwargs) # Get bb_regressor features train_feat_iou = self.get_backbone_bbreg_feat(train_feat) test_feat_iou = self.get_backbone_bbreg_feat(test_feat) # Run the IoUNet module iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb, test_proposals) return target_scores, iou_pred def get_backbone_clf_feat(self, backbone_feat): feat = OrderedDict({l: backbone_feat[l] for l in self.classification_layer}) if len(self.classification_layer) == 1: return feat[self.classification_layer[0]] return feat def get_backbone_bbreg_feat(self, backbone_feat): return [backbone_feat[l] for l in self.bb_regressor_layer] def extract_classification_feat(self, backbone_feat): return self.classifier.extract_classification_feat(self.get_backbone_clf_feat(backbone_feat)) def extract_backbone_features(self, im, layers=None): if layers is None: layers = self.output_layers return self.feature_extractor(im, layers) def extract_features(self, im, layers=None): if layers is None: layers = self.bb_regressor_layer + ['classification'] if 'classification' not in layers: return self.feature_extractor(im, layers) backbone_layers = sorted(list(set([l for l in layers + self.classification_layer if l != 'classification']))) all_feat = self.feature_extractor(im, backbone_layers) all_feat['classification'] = self.extract_classification_feat(all_feat) return OrderedDict({l: all_feat[l] for l in layers}) @model_constructor def dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=256, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), score_act='relu', act_param=None, target_mask_act='sigmoid', detach_length=float('Inf'), frozen_backbone_layers=()): # Backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim) # Optimizer for the DiMP classifier optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma, num_dist_bins=num_dist_bins, bin_displacement=bin_displacement, mask_init_factor=mask_init_factor, score_act=score_act, act_param=act_param, mask_act=target_mask_act, detach_length=detach_length) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def dimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=512, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), score_act='relu', act_param=None, target_mask_act='sigmoid', detach_length=float('Inf'), frozen_backbone_layers=()): # Backbone backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features if classification_layer == 'layer3': feature_dim = 256 elif classification_layer == 'layer4': feature_dim = 512 else: raise Exception clf_feature_extractor = clf_features.residual_bottleneck(feature_dim=feature_dim, num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim) # Optimizer for the DiMP classifier optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma, num_dist_bins=num_dist_bins, bin_displacement=bin_displacement, mask_init_factor=mask_init_factor, score_act=score_act, act_param=act_param, mask_act=target_mask_act, detach_length=detach_length) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def L2dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=256, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), detach_length=float('Inf'), hinge_threshold=-999, gauss_sigma=1.0, alpha_eps=0): # Backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim) # Optimizer for the DiMP classifier optimizer = clf_optimizer.DiMPL2SteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, hinge_threshold=hinge_threshold, init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma, detach_length=detach_length, alpha_eps=alpha_eps) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def klcedimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=256, gauss_sigma=1.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True, init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, init_pool_square=False, frozen_backbone_layers=()): if not train_feature_extractor: frozen_backbone_layers = 'all' # Backbone backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim, final_relu=final_relu) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim, init_weights=init_initializer, pool_square=init_pool_square) # Optimizer for the DiMP classifier optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma, detach_length=detach_length, alpha_eps=alpha_eps, init_uni_weight=init_uni_weight, min_filter_reg=optim_min_reg, normalize_label=normalize_label, label_shrink=label_shrink, softmax_reg=softmax_reg, label_threshold=label_threshold) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net @model_constructor def klcedimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01, classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0, clf_feat_norm=True, init_filter_norm=False, final_conv=True, out_feature_dim=512, gauss_sigma=1.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256), detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True, init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, frozen_backbone_layers=()): if not train_feature_extractor: frozen_backbone_layers = 'all' # Backbone backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers) # Feature normalization norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size)) # Classifier features clf_feature_extractor = clf_features.residual_bottleneck(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm, final_conv=final_conv, norm_scale=norm_scale, out_dim=out_feature_dim, final_relu=final_relu) # Initializer for the DiMP classifier initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm, feature_dim=out_feature_dim, init_weights=init_initializer) # Optimizer for the DiMP classifier optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride, init_step_length=optim_init_step, init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma, detach_length=detach_length, alpha_eps=alpha_eps, init_uni_weight=init_uni_weight, min_filter_reg=optim_min_reg, normalize_label=normalize_label, label_shrink=label_shrink, softmax_reg=softmax_reg, label_threshold=label_threshold) # The classifier module classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer, filter_optimizer=optimizer, feature_extractor=clf_feature_extractor) # Bounding box regressor bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim) # DiMP network net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor, classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3']) return net ================================================ FILE: external/AR/ltr/run_training.py ================================================ import os import sys import argparse import importlib import multiprocessing import cv2 as cv import torch.backends.cudnn env_path = os.path.join(os.path.dirname(__file__), '..') if env_path not in sys.path: sys.path.append(env_path) import ltr.admin.settings as ws_settings def run_training(train_module, train_name, cudnn_benchmark=True): """Run a train scripts in train_settings. args: train_module: Name of module in the "train_settings/" folder. train_name: Name of the train settings file. cudnn_benchmark: Use cudnn benchmark or not (default is True). """ # This is needed to avoid strange crashes related to opencv cv.setNumThreads(0) torch.backends.cudnn.benchmark = cudnn_benchmark print('Training: {} {}'.format(train_module, train_name)) settings = ws_settings.Settings() settings.module_name = train_module settings.script_name = train_name settings.project_path = 'ltr/{}/{}'.format(train_module, train_name) expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name)) expr_func = getattr(expr_module, 'run') expr_func(settings) def main(): parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.') parser.add_argument('train_module', type=str, help='Name of module in the "train_settings/" folder.') parser.add_argument('train_name', type=str, help='Name of the train settings file.') parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).') args = parser.parse_args() run_training(args.train_module, args.train_name, args.cudnn_benchmark) if __name__ == '__main__': multiprocessing.set_start_method('spawn', force=True) main() ================================================ FILE: external/AR/ltr/train_settings/__init__.py ================================================ ================================================ FILE: external/AR/ltr/train_settings/bbreg/__init__.py ================================================ ================================================ FILE: external/AR/ltr/train_settings/bbreg/atom.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = nn.MSELoss() actor = actors.AtomActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/bbreg/atom_gmm_sampl.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM IoUNet using the baseline ATOM* settings in [https://arxiv.org/abs/1909.12297].' \ 'Unlike standard ATOM, it employs the GMM-based proposal sampling and minor parameter changes.' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'proposal_method': 'gmm', 'boxes_per_frame': 128, 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = nn.MSELoss() actor = actors.AtomActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/bbreg/atom_paper.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM IoUNet with default settings according to the paper.' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets trackingnet_val = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11,12))) # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = nn.MSELoss() actor = actors.AtomActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/bbreg/atom_prob_ml.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k from ltr.data import processing, sampler, LTRLoader import ltr.models.bbreg.atom as atom_models import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.bbreg as bbreg_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm def run(settings): # Most common settings are assigned in the settings struct settings.description = 'ATOM using the probabilistic maximum likelihood trained regression model for bounding-box' \ 'regression presented in [https://arxiv.org/abs/1909.12297].' settings.batch_size = 64 settings.num_workers = 8 settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 0, 'test': 4.5} settings.scale_jitter_factor = {'train': 0, 'test': 0.5} # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # The joint augmentation transform, that is applied to the pairs jointly transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) # The augmentation transform applied to the training set (individually to each image in the pair) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The augmentation transform applied to the validation set (individually to each image in the pair) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # Data processing to do on the training pairs proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0, 0), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)], 'add_mean_box': True} data_processing_train = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_train, joint_transform=transform_joint) # Data processing to do on the validation pairs data_processing_val = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, transform=transform_val, joint_transform=transform_joint) # The sampler for training dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train) # The loader for training loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # The sampler for validation dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200, processing=data_processing_val) # The loader for validation loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = atom_models.atom_resnet18(backbone_pretrained=True) objective = klreg_losses.MLRegression() actor = bbreg_actors.AtomBBKLActor(net=net, objective=objective) # Optimizer optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) # Create trainer trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) # Run training (set fail_safe=False if you are debugging) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/dimp/__init__.py ================================================ ================================================ FILE: external/AR/ltr/train_settings/dimp/dimp18.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for DiMP with ResNet18 as backbone.' settings.batch_size = 26 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.dimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, final_conv=True, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)} loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400} actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.parameters()}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/dimp/dimp50.py ================================================ import torch.nn as nn import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses from ltr import actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for DiMP with ResNet50 as backbone.' settings.batch_size = 10 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/clf_ce', 'ClfTrain/test_loss'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)} loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400} actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5}, {'params': actor.net.bb_regressor.parameters()}, {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/dimp/prdimp18.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.tracking as tracking_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for PrDiMP with ResNet18 as backbone.' settings.batch_size = 26 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True} data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.klcedimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, final_conv=True, optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05, gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()} loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0} actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.parameters()}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/dimp/prdimp50.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.tracking as tracking_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'Default train settings for PrDiMP with ResNet50 as backbone.' settings.batch_size = 10 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 5.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 18 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 4.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True} data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1], samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.klcedimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05, gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero') # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()} loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0} actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/train_settings/dimp/super_dimp.py ================================================ import torch.optim as optim from ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq from ltr.data import processing, sampler, LTRLoader from ltr.models.tracking import dimpnet import ltr.models.loss as ltr_losses import ltr.models.loss.kl_regression as klreg_losses import ltr.actors.tracking as tracking_actors from ltr.trainers import LTRTrainer import ltr.data.transforms as tfm from ltr import MultiGPU def run(settings): settings.description = 'SuperDiMP: Combines the DiMP classifier with the PrDiMP bounding box regressor and better' \ 'training settings (larger batch size, inside_major cropping, and flipping augmentation.' \ 'Gives results significantly better than both DiMP-50 and PrDiMP-50.' settings.batch_size = 20 settings.num_workers = 8 settings.multi_gpu = False settings.print_interval = 1 settings.normalize_mean = [0.485, 0.456, 0.406] settings.normalize_std = [0.229, 0.224, 0.225] settings.search_area_factor = 6.0 settings.output_sigma_factor = 1/4 settings.target_filter_sz = 4 settings.feature_sz = 22 settings.output_sz = settings.feature_sz * 16 settings.center_jitter_factor = {'train': 3, 'test': 5.5} settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5} settings.hinge_threshold = 0.05 # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss'] # Train datasets lasot_train = Lasot(settings.env.lasot_dir, split='train') got10k_train = Got10k(settings.env.got10k_dir, split='vottrain') trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4))) coco_train = MSCOCOSeq(settings.env.coco_dir) # Validation datasets got10k_val = Got10k(settings.env.got10k_dir, split='votval') # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)) # The tracking pairs processing module output_sigma = settings.output_sigma_factor / settings.search_area_factor proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]} label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz} data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, crop_type='inside_major', max_scale_change=1.5, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_train, joint_transform=transform_joint) data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, crop_type='inside_major', max_scale_change=1.5, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, label_density_params=label_density_params, transform=transform_val, joint_transform=transform_joint) # Train sampler and loader dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1], samples_per_epoch=40000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_train) loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1) # Validation samplers and loaders dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=10000, max_gap=200, num_test_frames=3, num_train_frames=3, processing=data_processing_val) loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1) # Create network and actor net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu', frozen_backbone_layers=['conv1', 'bn1', 'layer1', 'layer2']) # Wrap the network for multi GPU training if settings.multi_gpu: net = MultiGPU(net, dim=1) objective = {'bb_ce': klreg_losses.KLRegression(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)} loss_weight = {'bb_ce': 0.01, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400} actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight) # Optimizer optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5}, {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3}, {'params': actor.net.feature_extractor.layer3.parameters(), 'lr': 2e-5}], lr=2e-4) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler) trainer.train(50, load_latest=True, fail_safe=True) ================================================ FILE: external/AR/ltr/trainers/__init__.py ================================================ from .base_trainer import BaseTrainer from .ltr_trainer import LTRTrainer ================================================ FILE: external/AR/ltr/trainers/base_trainer.py ================================================ import os import glob import torch import traceback from ltr.admin import loading, multigpu class BaseTrainer: """Base trainer class. Contains functions for training and saving/loading chackpoints. Trainer classes should inherit from this one and overload the train_epoch function.""" def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ self.actor = actor self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.loaders = loaders self.update_settings(settings) self.epoch = 0 self.stats = {} self.device = getattr(settings, 'device', None) if self.device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() and settings.use_gpu else "cpu") self.actor.to(self.device) def update_settings(self, settings=None): """Updates the trainer settings. Must be called to update internal settings.""" if settings is not None: self.settings = settings if self.settings.env.workspace_dir is not None: self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir) self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints') if not os.path.exists(self._checkpoint_dir): os.makedirs(self._checkpoint_dir) else: self._checkpoint_dir = None def train(self, max_epochs, load_latest=False, fail_safe=True): """Do training for the given number of epochs. args: max_epochs - Max number of training epochs, load_latest - Bool indicating whether to resume from latest epoch. fail_safe - Bool indicating whether the training to automatically restart in case of any crashes. """ epoch = -1 num_tries = 10 for i in range(num_tries): try: if load_latest: self.load_checkpoint() for epoch in range(self.epoch+1, max_epochs+1): self.epoch = epoch self.train_epoch() if self.lr_scheduler is not None: self.lr_scheduler.step() if self._checkpoint_dir: self.save_checkpoint() except: print('Training crashed at epoch {}'.format(epoch)) if fail_safe: self.epoch -= 1 load_latest = True print('Traceback for the error!') print(traceback.format_exc()) print('Restarting training from last epoch ...') else: raise print('Finished training!') def train_epoch(self): raise NotImplementedError def save_checkpoint(self): """Saves a checkpoint of the network and other variables.""" net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ state = { 'epoch': self.epoch, 'actor_type': actor_type, 'net_type': net_type, 'net': net.state_dict(), 'net_info': getattr(net, 'info', None), 'constructor': getattr(net, 'constructor', None), 'optimizer': self.optimizer.state_dict(), 'stats': self.stats, 'settings': self.settings } directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path) if not os.path.exists(directory): os.makedirs(directory) # First save as a tmp file tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch) torch.save(state, tmp_file_path) file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch) # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure os.rename(tmp_file_path, file_path) def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False): """Loads a network checkpoint file. Can be called in three different ways: load_checkpoint(): Loads the latest epoch from the workspace. Use this to continue training. load_checkpoint(epoch_num): Loads the network at the given epoch number (int). load_checkpoint(path_to_checkpoint): Loads the file from the given absolute path (str). """ net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ if checkpoint is None: # Load most recent checkpoint checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: print('No matching checkpoint file found') return elif isinstance(checkpoint, int): # Checkpoint is the epoch number checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type, checkpoint) elif isinstance(checkpoint, str): # checkpoint is the path if os.path.isdir(checkpoint): checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No checkpoint found') else: checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network checkpoint_dict = loading.torch_load_legacy(checkpoint_path) assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.' if fields is None: fields = checkpoint_dict.keys() if ignore_fields is None: ignore_fields = ['settings'] # Never load the scheduler. It exists in older checkpoints. ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info']) # Load all fields for key in fields: if key in ignore_fields: continue if key == 'net': net.load_state_dict(checkpoint_dict[key]) elif key == 'optimizer': self.optimizer.load_state_dict(checkpoint_dict[key]) else: setattr(self, key, checkpoint_dict[key]) # Set the net info if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None: net.constructor = checkpoint_dict['constructor'] if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None: net.info = checkpoint_dict['net_info'] # Update the epoch in lr scheduler if 'epoch' in fields: self.lr_scheduler.last_epoch = self.epoch return True ================================================ FILE: external/AR/ltr/trainers/ltr_trainer.py ================================================ import os from collections import OrderedDict from ltr.trainers import BaseTrainer from ltr.admin.stats import AverageMeter, StatValue from ltr.admin.tensorboard import TensorboardWriter import torch import time class LTRTrainer(BaseTrainer): def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ super().__init__(actor, loaders, optimizer, settings, lr_scheduler) self._set_default_settings() # Initialize statistics variables self.stats = OrderedDict({loader.name: None for loader in self.loaders}) # Initialize tensorboard tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path) self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders]) self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True) def _set_default_settings(self): # Dict of all default values default = {'print_interval': 10, 'print_stats': None, 'description': ''} for param, default_value in default.items(): if getattr(self.settings, param, None) is None: setattr(self.settings, param, default_value) def cycle_dataset(self, loader): """Do a cycle of training or validation.""" self.actor.train(loader.training) torch.set_grad_enabled(loader.training) self._init_timing() for i, data in enumerate(loader, 1): # get inputs if self.move_data_to_gpu: data = data.to(self.device) data['epoch'] = self.epoch data['settings'] = self.settings # forward pass loss, stats = self.actor(data) # backward pass and update weights if loader.training: self.optimizer.zero_grad() loss.backward() self.optimizer.step() # update statistics batch_size = data['train_images'].shape[loader.stack_dim] self._update_stats(stats, batch_size, loader) # print statistics self._print_stats(i, loader, batch_size) def train_epoch(self): """Do one epoch for each loader.""" for loader in self.loaders: if self.epoch % loader.epoch_interval == 0: self.cycle_dataset(loader) self._stats_new_epoch() self._write_tensorboard() def _init_timing(self): self.num_frames = 0 self.start_time = time.time() self.prev_time = self.start_time def _update_stats(self, new_stats: OrderedDict, batch_size, loader): # Initialize stats if not initialized yet if loader.name not in self.stats.keys() or self.stats[loader.name] is None: self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()}) for name, val in new_stats.items(): if name not in self.stats[loader.name].keys(): self.stats[loader.name][name] = AverageMeter() self.stats[loader.name][name].update(val, batch_size) def _print_stats(self, i, loader, batch_size): self.num_frames += batch_size current_time = time.time() batch_fps = batch_size / (current_time - self.prev_time) average_fps = self.num_frames / (current_time - self.start_time) self.prev_time = current_time if i % self.settings.print_interval == 0 or i == loader.__len__(): print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()) print_str += 'FPS: %.1f (%.1f) , ' % (average_fps, batch_fps) for name, val in self.stats[loader.name].items(): if (self.settings.print_stats is None or name in self.settings.print_stats) and hasattr(val, 'avg'): print_str += '%s: %.5f , ' % (name, val.avg) print(print_str[:-5]) def _stats_new_epoch(self): # Record learning rate for loader in self.loaders: if loader.training: lr_list = self.lr_scheduler.get_lr() for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for loader_stats in self.stats.values(): if loader_stats is None: continue for stat_value in loader_stats.values(): if hasattr(stat_value, 'new_epoch'): stat_value.new_epoch() def _write_tensorboard(self): if self.epoch == 1: self.tensorboard_writer.write_info(self.settings.module_name, self.settings.script_name, self.settings.description) self.tensorboard_writer.write_epoch(self.stats, self.epoch) ================================================ FILE: external/AR/pytracking/ARcm_seg.py ================================================ import os import sys import torch import numpy as np import cv2 import torch.nn as nn from external.AR.pytracking.utils.loading import load_network from external.AR.ltr.data.processing_utils_SE import sample_target_SE, transform_image_to_crop_SE, map_mask_back env_path = os.path.join(os.path.dirname(__file__), '..') if env_path not in sys.path: sys.path.append(env_path) def mask_torch2numpy(Pmask): Pmask_arr = np.array(Pmask.squeeze().cpu()) # (H,W) (0,1) return Pmask_arr class ARcm_seg(object): def __init__(self, refine_net_dir, search_factor=2.0, input_sz=256): self.refine_network = self.get_network(refine_net_dir) self.search_factor = search_factor self.input_sz = input_sz self.mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3)) self.std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3)) def initialize(self, frame1, bbox1): ''' :param frame1: cv array (H,W,3) :param bbox1: ndarray (4,) :return: ''' '''Step1: get cropped patch(tensor)''' patch1, h_f, w_f = sample_target_SE(frame1, bbox1, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT) patch1_tensor = self.img_preprocess(patch1) '''Step2: get GT's cooridinate on the cropped patch(tensor)''' crop_sz = torch.Tensor((self.input_sz, self.input_sz)) bbox1_tensor = self.gt_preprocess(bbox1) # (4,) bbox1_crop_tensor = transform_image_to_crop_SE(bbox1_tensor, bbox1_tensor, h_f, w_f, crop_sz).cuda() '''Step3: forward prop (reference branch)''' with torch.no_grad(): self.refine_network.forward_ref(patch1_tensor, bbox1_crop_tensor) '''refine''' def get_mask(self, Cframe, Cbbox, dtm=None, vis=False): ''' :param Cframe: Current frame(cv2 array) :param Cbbox: Current bbox (ndarray) (x1,y1,w,h) :return: mask ''' '''Step1: get cropped patch(tensor)''' Cpatch, h_f, w_f = sample_target_SE(Cframe, Cbbox, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT) Cpatch_tensor = self.img_preprocess(Cpatch) '''Step2: forward prop (test branch)''' with torch.no_grad(): if dtm is not None: '''2020.4.26 support input dtm''' pred = self.refine_network.forward_test(Cpatch_tensor, dtm, mode='mask') else: pred = self.refine_network.forward_test(Cpatch_tensor,mode='mask') Pmask_arr = mask_torch2numpy(pred) mask_arr = map_mask_back(Cframe, Cbbox, self.search_factor, Pmask_arr, mode=cv2.BORDER_CONSTANT) if vis: return mask_arr, Cpatch, Pmask_arr else: return mask_arr def get_network(self,checkpoint_dir): network = load_network(checkpoint_dir) network.cuda() network.eval() return network def img_preprocess(self,img_arr): '''---> Pytorch tensor(RGB),Normal(-1 to 1,subtract mean, divide std) input img_arr (H,W,3) output (1,1,3,H,W) ''' norm_img = ((img_arr/255.0) - self.mean)/(self.std) img_f32 = norm_img.astype(np.float32) img_tensor = torch.from_numpy(img_f32).cuda() img_tensor = img_tensor.permute((2,0,1)) return img_tensor.unsqueeze(dim=0).unsqueeze(dim=0) def gt_preprocess(self,gt_arr): ''' :param gt: ndarray (4,) :return: torch tensor (4,) ''' return torch.from_numpy(gt_arr.astype(np.float32)) def add_frame_mask(frame, mask, threshold=0.5): mask_new = (mask>threshold)*255 #(H,W) frame_new = frame.copy().astype(np.float) frame_new[...,1] += 0.3*mask_new frame_new = frame_new.clip(0,255).astype(np.uint8) return frame_new def add_frame_bbox(frame, refined_box, color): x1, y1, w, h = refined_box.tolist() cv2.rectangle(frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2) return frame ================================================ FILE: external/AR/pytracking/VOT/tracker_DiMP.m ================================================ % Set path to the python in the pytracking conda environment python_path = 'PATH_TO_CONDA_INSTALLATION/envs/pytracking/bin/python'; % Set path to pytracking pytracking_path = 'PATH_TO_VISIONML/pytracking'; % Set path to trax installation. Check % https://trax.readthedocs.io/en/latest/tutorial_compiling.html for % compilation information trax_path = 'PATH_TO_VOT_TOOLKIT/native/trax'; tracker_name = 'dimp'; % Name of the tracker to evaluate runfile_name = 'dimp18_vot'; % Name of the parameter file to use debug = 0; %% tracker_label = [tracker_name, '_', runfile_name]; % Generate python command tracker_command = sprintf(['%s -c "import sys; sys.path.append(''%s'');', ... 'sys.path.append(''%s/support/python'');', ... 'import run_vot;', ... 'run_vot.run_vot(''%s'', ''%s'', debug=%d)"'],... python_path, pytracking_path, trax_path, ... tracker_name, runfile_name, debug); tracker_interpreter = python_path; tracker_linkpath = {[trax_path, '/build'],... [trax_path, '/build/support/client'],... [trax_path, '/build/support/opencv']}; ================================================ FILE: external/AR/pytracking/VOT/trackers.ini ================================================ [DiMP] # label = DiMP protocol = traxpython command = run_vot; run_vot.run_vot2020('dimp', 'dimp50') # Set the tracker name and the parameter name # Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths) paths = PATH_TO_PYTRACKING # Additional environment paths #env_PATH = ;${PATH} ================================================ FILE: external/AR/pytracking/VOT/vot.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016, 2019 """ import sys import copy import collections try: import trax except ImportError: raise Exception('TraX support not found. Please add trax module to Python path.') Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format, channels=None): """ Constructor Args: region_format: Region format options """ assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON]) if channels is None: channels = ['color'] elif channels == 'rgbd': channels = ['color', 'depth'] elif channels == 'rgbt': channels = ['color', 'ir'] elif channels == 'ir': channels = ['ir'] else: raise Exception('Illegal configuration {}.'.format(channels)) self._trax = trax.Server([region_format], [trax.Image.PATH], channels) request = self._trax.wait() assert(request.type == 'initialize') if isinstance(request.region, trax.Polygon): self._region = Polygon([Point(x[0], x[1]) for x in request.region]) else: self._region = Rectangle(*request.region.bounds()) self._image = [str(x) for k, x in request.image.items()] if len(self._image) == 1: self._image = self._image[0] self._trax.status(request.region) def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = None): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, Rectangle) or isinstance(region, Polygon)) if isinstance(region, Polygon): tregion = trax.Polygon.create([(x.x, x.y) for x in region.points]) else: tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height) properties = {} if not confidence is None: properties['confidence'] = confidence self._trax.status(tregion, properties) def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if hasattr(self, "_image"): image = self._image del self._image return tuple(image) request = self._trax.wait() if request.type == 'frame': image = [str(x) for k, x in request.image.items()] if len(image) == 1: image = image[0] return tuple(image) else: return None def quit(self): if hasattr(self, '_trax'): self._trax.quit() def __del__(self): self.quit() ================================================ FILE: external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_065.py ================================================ from pytracking.VOT2020_super_only_mask_384_HP.dimp_alpha_seg_class import run_vot_exp import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' # run_vot_exp('dimp','dimp50_vot19','SEbcm',0.60,VIS=False) run_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=False) # run_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=True) ================================================ FILE: external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_seg_class.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import torch import vot import sys import time '''Refine module & Pytracking base trackers''' import os from pytracking.evaluation import Tracker from pytracking.ARcm_seg import ARcm_seg from pytracking.vot20_utils import * '''''' '''DiMP-alpha class''' class DIMP_ALPHA(object): def __init__(self, tracker_name='dimp', para_name='dimp50_vot19', refine_model_name='ARcm_coco_seg', threshold=0.15): self.THRES = threshold '''create tracker''' '''DIMP''' tracker_info = Tracker(tracker_name, para_name, None) params = tracker_info.get_parameters() params.visualization = False params.debug = False params.visdom_info = {'use_visdom': False, 'server': '127.0.0.1', 'port': 8097} self.dimp = tracker_info.tracker_class(params) '''Alpha-Refine''' project_path = os.path.join(os.path.dirname(__file__), '..', '..') refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/') refine_path = os.path.join(refine_root, refine_model_name) '''2020.4.25 input size: 384x384''' self.alpha = ARcm_seg(refine_path, input_sz=384) def initialize(self, img_RGB, mask): region = rect_from_mask(mask) self.H, self.W, _ = img_RGB.shape gt_bbox_np = np.array(region).astype(np.float32) '''Initialize dimp for specific video''' gt_bbox_torch = torch.from_numpy(gt_bbox_np) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = self.dimp.initialize(img_RGB, init_info) '''initilize refinement module for specific video''' self.alpha.initialize(img_RGB, np.array(gt_bbox_np)) def track(self, img_RGB): '''TRACK''' '''base tracker''' outputs = self.dimp.track(img_RGB) pred_bbox = outputs['target_bbox'] '''Step1: Post-Process''' x1, y1, w, h = pred_bbox # add boundary and min size limit x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (self.H, self.W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy(np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy(np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / self.dimp.base_target_sz.prod()) ##### update self.dimp.pos = new_pos.clone() self.dimp.target_sz = new_target_sz self.dimp.target_scale = new_scale bbox_new = [x1, y1, w, h] '''Step2: Mask report''' pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(bbox_new), vis=True) final_mask = (pred_mask > self.THRES).astype(np.uint8) search_region = search.astype(np.uint8) search_mask = (search_mask > self.THRES).astype(np.uint8) return bbox_new, final_mask, search_region, search_mask def run_vot_exp(tracker_name, para_name, refine_model_name, threshold, VIS=False): torch.set_num_threads(1) # torch.cuda.set_device(CUDA_ID) # set GPU id save_root = os.path.join('', para_name) if VIS and (not os.path.exists(save_root)): os.mkdir(save_root) tracker = DIMP_ALPHA(tracker_name=tracker_name, para_name=para_name, refine_model_name=refine_model_name, threshold=threshold) handle = vot.VOT("mask") selection = handle.region() imagefile = handle.frame() if not imagefile: sys.exit(0) if VIS: '''for vis''' seq_name = imagefile.split('/')[-3] save_v_dir = os.path.join(save_root, seq_name) if not os.path.exists(save_v_dir): os.mkdir(save_v_dir) cur_time = int(time.time() % 10000) save_dir = os.path.join(save_v_dir, str(cur_time)) if not os.path.exists(save_dir): os.makedirs(save_dir) image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right # mask given by the toolkit ends with the target (zero-padding to the right and down is needed) mask = make_full_size(selection, (image.shape[1], image.shape[0])) tracker.initialize(image, mask) while True: imagefile = handle.frame() if not imagefile: break image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right b1, m, search, search_m = tracker.track(image) handle.report(m) if VIS: '''Visualization''' # original image image_ori = image[:, :, ::-1].copy() # RGB --> BGR image_name = imagefile.split('/')[-1] save_path = os.path.join(save_dir, image_name) cv2.imwrite(save_path, image_ori) # dimp box image_b = image_ori.copy() cv2.rectangle(image_b, (int(b1[0]), int(b1[1])), (int(b1[0] + b1[2]), int(b1[1] + b1[3])), (0, 0, 255), 2) image_b_name = image_name.replace('.jpg', '_bbox.jpg') save_path = os.path.join(save_dir, image_b_name) cv2.imwrite(save_path, image_b) # search region search_bgr = search[:, :, ::-1].copy() search_name = image_name.replace('.jpg', '_search.jpg') save_path = os.path.join(save_dir, search_name) cv2.imwrite(save_path, search_bgr) # search region mask search_bgr_m = search_bgr.astype(np.float32) search_bgr_m[:, :, 1] += 127.0 * search_m search_bgr_m[:, :, 2] += 127.0 * search_m contours, _ = cv2.findContours(search_m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) search_bgr_m = cv2.drawContours(search_bgr_m, contours, -1, (0, 255, 255), 4) search_bgr_m = search_bgr_m.clip(0, 255).astype(np.uint8) search_name_m = image_name.replace('.jpg', '_search_mask.jpg') save_path = os.path.join(save_dir, search_name_m) cv2.imwrite(save_path, search_bgr_m) # original image + mask image_m = image_ori.copy().astype(np.float32) image_m[:, :, 1] += 127.0 * m image_m[:, :, 2] += 127.0 * m contours, _ = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) image_m = cv2.drawContours(image_m, contours, -1, (0, 255, 255), 2) image_m = image_m.clip(0, 255).astype(np.uint8) image_mask_name_m = image_name.replace('.jpg', '_mask.jpg') save_path = os.path.join(save_dir, image_mask_name_m) cv2.imwrite(save_path, image_m) ================================================ FILE: external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_alpha_seg_class.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import torch import vot import sys import time import os import numpy as np from lib.test.tracker.mixformer_online import MixFormerOnline from pytracking.ARcm_seg import ARcm_seg from pytracking.vot20_utils import * import lib.test.parameter.mixformer_online as vot_params class MIXFORMER_ALPHA_SEG(object): def __init__(self, tracker, refine_model_name='ARcm_coco_seg', threshold=0.6): self.THRES = threshold self.tracker = tracker '''create tracker''' '''Alpha-Refine''' project_path = os.path.join(os.path.dirname(__file__), '..', '..') refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/') refine_path = os.path.join(refine_root, refine_model_name) '''2020.4.25 input size: 384x384''' self.alpha = ARcm_seg(refine_path, input_sz=384) def initialize(self, image, mask): region = rect_from_mask(mask) # init_info = {'init_bbox': region} # self.tracker.initialize(image, init_info) self.H, self.W, _ = image.shape gt_bbox_np = np.array(region).astype(np.float32) '''Initialize STARK for specific video''' init_info = {'init_bbox': list(gt_bbox_np)} self.tracker.initialize(image, init_info) '''initilize refinement module for specific video''' self.alpha.initialize(image, np.array(gt_bbox_np)) def track(self, img_RGB): '''TRACK''' '''base tracker''' outputs = self.tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''Step2: Mask report''' pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True) final_mask = (pred_mask > self.THRES).astype(np.uint8) return final_mask, 1 def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) refine_model_name = 'ARcm_coco_seg_only_mask_384' params = vot_params.parameters("baseline", model="mixformer_online_22k.pth.tar") # params = vot_params.parameters("baseline") mixformer = MixFormerOnline(params, "VOT20") tracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name) handle = vot.VOT("mask") selection = handle.region() imagefile = handle.frame() if not imagefile: sys.exit(0) image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right # mask given by the toolkit ends with the target (zero-padding to the right and down is needed) mask = make_full_size(selection, (image.shape[1], image.shape[0])) tracker.H = image.shape[0] tracker.W = image.shape[1] tracker.initialize(image, mask) while True: imagefile = handle.frame() if not imagefile: break image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right region, confidence = tracker.track(image) handle.report(region, confidence) ================================================ FILE: external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_large_alpha_seg_class.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import torch import vot import sys import time import os import numpy as np from lib.test.tracker.mixformer_online import MixFormerOnline from pytracking.ARcm_seg import ARcm_seg from pytracking.vot20_utils import * import lib.test.parameter.mixformer_online as vot_params class MIXFORMER_ALPHA_SEG(object): def __init__(self, tracker, refine_model_name='ARcm_coco_seg', threshold=0.6): self.THRES = threshold self.tracker = tracker '''create tracker''' '''Alpha-Refine''' project_path = os.path.join(os.path.dirname(__file__), '..', '..') refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/') refine_path = os.path.join(refine_root, refine_model_name) '''2020.4.25 input size: 384x384''' self.alpha = ARcm_seg(refine_path, input_sz=384) def initialize(self, image, mask): region = rect_from_mask(mask) # init_info = {'init_bbox': region} # self.tracker.initialize(image, init_info) self.H, self.W, _ = image.shape gt_bbox_np = np.array(region).astype(np.float32) '''Initialize STARK for specific video''' init_info = {'init_bbox': list(gt_bbox_np)} self.tracker.initialize(image, init_info) '''initilize refinement module for specific video''' self.alpha.initialize(image, np.array(gt_bbox_np)) def track(self, img_RGB): '''TRACK''' '''base tracker''' outputs = self.tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''Step2: Mask report''' pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True) final_mask = (pred_mask > self.THRES).astype(np.uint8) return final_mask, 1 def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) refine_model_name = 'ARcm_coco_seg_only_mask_384' # params = vot_params.parameters("baseline_large") params = vot_params.parameters("baseline_large", model="mixformerL_online_22k.pth.tar") mixformer = MixFormerOnline(params, "VOT20") tracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name) handle = vot.VOT("mask") selection = handle.region() imagefile = handle.frame() if not imagefile: sys.exit(0) image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right # mask given by the toolkit ends with the target (zero-padding to the right and down is needed) mask = make_full_size(selection, (image.shape[1], image.shape[0])) tracker.H = image.shape[0] tracker.W = image.shape[1] tracker.initialize(image, mask) while True: imagefile = handle.frame() if not imagefile: break image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB) # Right region, confidence = tracker.track(image) handle.report(region, confidence) ================================================ FILE: external/AR/pytracking/VOT2020_super_only_mask_384_HP/vot.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016 """ import sys import copy import collections import numpy as np try: import trax except ImportError: raise Exception('TraX support not found. Please add trax module to Python path.') Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format, channels=None): """ Constructor Args: region_format: Region format options """ assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK]) if channels is None: channels = ['color'] elif channels == 'rgbd': channels = ['color', 'depth'] elif channels == 'rgbt': channels = ['color', 'ir'] elif channels == 'ir': channels = ['ir'] else: raise Exception('Illegal configuration {}.'.format(channels)) self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot="python")) request = self._trax.wait() assert(request.type == 'initialize') if isinstance(request.region, trax.Polygon): self._region = Polygon([Point(x[0], x[1]) for x in request.region]) elif isinstance(request.region, trax.Mask): self._region = request.region.array(True) else: self._region = Rectangle(*request.region.bounds()) self._image = [x.path() for k, x in request.image.items()] if len(self._image) == 1: self._image = self._image[0] self._trax.status(request.region) def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = None): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, (Rectangle, Polygon, np.ndarray))) if isinstance(region, Polygon): tregion = trax.Polygon.create([(x.x, x.y) for x in region.points]) elif isinstance(region, np.ndarray): tregion = trax.Mask.create(region) else: tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height) properties = {} if not confidence is None: properties['confidence'] = confidence self._trax.status(tregion, properties) def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if hasattr(self, "_image"): image = self._image del self._image return image request = self._trax.wait() if request.type == 'frame': image = [x.path() for k, x in request.image.items()] if len(image) == 1: return image[0] return image else: return None def quit(self): if hasattr(self, '_trax'): self._trax.quit() def __del__(self): self.quit() ================================================ FILE: external/AR/pytracking/__init__.py ================================================ from pytracking.libs import TensorList, TensorDict ================================================ FILE: external/AR/pytracking/analysis/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/analysis/evaluate_vos.py ================================================ import os import numpy as np import torch import pandas as pd from collections import OrderedDict from ltr.data.image_loader import imread_indexed from pytracking.evaluation import get_dataset from pathlib import Path from pytracking.analysis.plot_results import generate_formatted_report import pytracking.analysis.vos_utils as utils # Originally db_eval_sequence() in the davis challenge toolkit: def evaluate_sequence(seq_name, segmentations, annotations, object_info, measure='J'): """ Evaluate video sequence results. Arguments: segmentations (dict of ndarray): segmentation labels. annotations (dict of ndarray): ground-truth labels. object_info dict: {object_id: first_frame_index} measure evaluation metric (J,F) """ results = dict(raw=OrderedDict()) _measures = {'J': utils.davis_jaccard_measure, 'F': utils.davis_f_measure} _statistics = {'decay': utils.decay, 'mean': utils.mean, 'recall': utils.recall, 'std': utils.std} for obj_id, first_frame in object_info.items(): r = np.ones((len(annotations))) * np.nan for i, (an, sg) in enumerate(zip(annotations, segmentations)): if list(annotations.keys()).index(first_frame) < i < len(annotations) - 1: r[i] = _measures[measure](annotations[an] == obj_id, segmentations[sg] == obj_id) results['raw'][obj_id] = r for stat, stat_fn in _statistics.items(): results[stat] = [float(stat_fn(r)) for r in results['raw'].values()] return results def evaluate_dataset(results_path, dset_name, measure='J', to_file=True, scores=False, sequences=None, quiet=False): dset = get_dataset(dset_name) results = OrderedDict() dset_scores = [] dset_decay = [] dset_recall = [] if to_file: f = open(results_path / ("evaluation-%s.txt" % measure), "w") def _print(msg): if not quiet: print(msg) if to_file: print(msg, file=f) if sequences is not None: sequences = [sequences] if not isinstance(sequences, (list, tuple)) else sequences target_names = [] for j, sequence in enumerate(dset): if (sequences is not None) and (sequence.name not in sequences): continue # Load all frames frames = sequence.ground_truth_seg annotations = OrderedDict() segmentations = OrderedDict() for f in frames: if f is None: continue file = Path(f) annotations[file.name] = imread_indexed(file) if not scores: segmentations[file.name] = imread_indexed(os.path.join(results_path, sequence.name, file.name)) else: raise NotImplementedError # Find object ids and starting frames object_info = dict() for f_id, d in sequence.init_data.items(): for obj_id in d['object_ids']: object_info[int(obj_id)] = Path(d['mask']).name if 0 in object_info: # Remove background object_info.pop(0) # Evaluate n_seqs = len(dset) n_objs = len(object_info) seq_name = sequence.name _print("%d/%d: %s: %d object%s" % (j + 1, n_seqs, seq_name, n_objs, "s" if n_objs > 1 else "")) r = evaluate_sequence(seq_name, segmentations, annotations, object_info, measure=measure) results[seq_name] = r # Print scores, per frame and object, ignoring NaNs per_obj_score = [] # Per-object accuracies, averaged over the sequence per_frame_score = [] # Per-frame accuracies, averaged over the objects for obj_id, score in r['raw'].items(): target_names.append('{}_{}'.format(seq_name, obj_id)) per_frame_score.append(score) s = utils.mean(score) # Sequence average for one object per_obj_score.append(s) if n_objs > 1: _print("joint {obj}: acc {score:.3f} ┊{apf}┊".format(obj=obj_id, score=s, apf=utils.text_bargraph(score))) # Print mean object score per frame and final score dset_decay.extend(r['decay']) dset_recall.extend(r['recall']) dset_scores.extend(per_obj_score) seq_score = utils.mean(per_obj_score) # Final score seq_mean_score = utils.nanmean(np.array(per_frame_score), axis=0) # Mean object score per frame # Print sequence results _print("final : acc {seq:.3f} ({dset:.3f}) ┊{apf}┊".format( seq=seq_score, dset=np.mean(dset_scores), apf=utils.text_bargraph(seq_mean_score))) _print("%s: %.3f, recall: %.3f, decay: %.3f" % (measure, utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay))) if to_file: f.close() return target_names, dset_scores, dset_recall, dset_decay def evaluate_vos(trackers, dataset='yt2019_jjval', force=False): """ evaluate a list of trackers on a vos dataset. args: trackers - list of trackers to evaluate dataset - name of the dataset force - Force re-evaluation. If False, the pre-computed results are loaded if available """ csv_name_global = f'{dataset}_global_results.csv' csv_name_per_sequence = f'{dataset}_per-sequence_results.csv' table_g_all = [] table_seq_all = [] scores = {'J-Mean': [], 'J-Recall': [], 'J-Decay': []} display_names = [] for t in trackers: if t.display_name is not None: disp_name = t.display_name elif t.run_id is not None: disp_name = '{} {}_{:03d}'.format(t.name, t.parameter_name, t.run_id) else: disp_name = '{} {}'.format(t.name, t.parameter_name) display_names.append(disp_name) results_path = t.segmentation_dir csv_name_global_path = os.path.join(results_path, csv_name_global) csv_name_per_sequence_path = os.path.join(results_path, csv_name_per_sequence) if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path) and not force: table_g = pd.read_csv(csv_name_global_path) table_seq = pd.read_csv(csv_name_per_sequence_path) else: seq_names, dset_scores, dset_recall, dset_decay = evaluate_dataset(results_path, dataset, measure='J', to_file=False, scores=False, sequences=None) g_measures = ['J-Mean', 'J-Recall', 'J-Decay'] g_res = np.array([utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay)]) g_res = np.reshape(g_res, [1, len(g_res)]) table_g = pd.DataFrame(data=g_res, columns=g_measures) with open(csv_name_global_path, 'w') as f: table_g.to_csv(f, index=False, float_format="%.3f") seq_measures = ['Sequence', 'J-Mean', 'J-Recall', 'J-Decay'] table_seq = pd.DataFrame(data=list(zip(seq_names, dset_scores, dset_recall, dset_decay)), columns=seq_measures) with open(csv_name_per_sequence_path, 'w') as f: table_seq.to_csv(f, index=False, float_format="%.3f") scores['J-Mean'].append(table_g['J-Mean'].values[0]*100) scores['J-Recall'].append(table_g['J-Recall'].values[0]*100) scores['J-Decay'].append(table_g['J-Decay'].values[0]*100) table_g_all.append(table_g) table_seq_all.append(table_seq) report = generate_formatted_report(display_names, scores) print(report) return table_g_all, table_seq_all ================================================ FILE: external/AR/pytracking/analysis/extract_results.py ================================================ import os import sys import importlib import numpy as np from pytracking.utils.load_text import load_text import torch import pickle from tqdm import tqdm env_path = os.path.join(os.path.dirname(__file__), '../..') if env_path not in sys.path: sys.path.append(env_path) from pytracking.evaluation.environment import env_settings def calc_err_center(pred_bb, anno_bb, normalized=False): pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0) anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0) if normalized: pred_center = pred_center / anno_bb[:, 2:] anno_center = anno_center / anno_bb[:, 2:] err_center = ((pred_center - anno_center)**2).sum(1).sqrt() return err_center def calc_iou_overlap(pred_bb, anno_bb): tl = torch.max(pred_bb[:, :2], anno_bb[:, :2]) br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0, anno_bb[:, :2] + anno_bb[:, 2:] - 1.0) sz = (br - tl + 1.0).clamp(0) # Area intersection = sz.prod(dim=1) union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection return intersection / union def calc_seq_err_robust(pred_bb, anno_bb, dataset, target_visible=None): pred_bb = pred_bb.clone() # Check if invalid values are present if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any(): raise Exception('Error: Invalid results') if torch.isnan(anno_bb).any(): if dataset == 'uav': pass else: raise Exception('Warning: NaNs in annotation') if (pred_bb[:, 2:] == 0.0).any(): for i in range(1, pred_bb.shape[0]): if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any(): pred_bb[i, :] = pred_bb[i-1, :] if pred_bb.shape[0] != anno_bb.shape[0]: if dataset == 'lasot': if pred_bb.shape[0] > anno_bb.shape[0]: # For monkey-17, there is a mismatch for some trackers. pred_bb = pred_bb[:anno_bb.shape[0], :] else: raise Exception('Mis-match in tracker prediction and GT lengths') else: # print('Warning: Mis-match in tracker prediction and GT lengths') if pred_bb.shape[0] > anno_bb.shape[0]: pred_bb = pred_bb[:anno_bb.shape[0], :] else: pad = torch.zeros((anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb) pred_bb = torch.cat((pred_bb, pad), dim=0) pred_bb[0, :] = anno_bb[0, :] if target_visible is not None: target_visible = target_visible.bool() valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible else: valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) err_center = calc_err_center(pred_bb, anno_bb) err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True) err_overlap = calc_iou_overlap(pred_bb, anno_bb) # handle invalid anno cases if dataset in ['uav']: err_center[~valid] = -1.0 else: err_center[~valid] = float("Inf") err_center_normalized[~valid] = -1.0 err_overlap[~valid] = -1.0 if dataset == 'lasot': err_center_normalized[~target_visible] = float("Inf") err_center[~target_visible] = float("Inf") if torch.isnan(err_overlap).any(): raise Exception('Nans in calculated overlap') return err_overlap, err_center, err_center_normalized, valid def extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05, exclude_invalid_frames=False): settings = env_settings() eps = 1e-16 result_plot_path = os.path.join(settings.result_plot_path, report_name) if not os.path.exists(result_plot_path): os.makedirs(result_plot_path) threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64) threshold_set_center = torch.arange(0, 51, dtype=torch.float64) threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0 avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64) ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()), dtype=torch.float32) ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()), dtype=torch.float32) ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()), dtype=torch.float32) valid_sequence = torch.ones(len(dataset), dtype=torch.uint8) for seq_id, seq in enumerate(tqdm(dataset)): # Load anno anno_bb = torch.tensor(seq.ground_truth_rect) target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None for trk_id, trk in enumerate(trackers): # Load results base_results_path = '{}/{}'.format(trk.results_dir, seq.name) results_path = '{}.txt'.format(base_results_path) if os.path.isfile(results_path): pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\t', ','), dtype=np.float64)) else: if skip_missing_seq: valid_sequence[seq_id] = 0 break else: raise Exception('Result not found. {}'.format(results_path)) # Calculate measures err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust( pred_bb, anno_bb, seq.dataset, target_visible) avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean() if exclude_invalid_frames: seq_length = valid_frame.long().sum() else: seq_length = anno_bb.shape[0] if seq_length <= 0: raise Exception('Seq length zero') ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length print('\n\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) # Prepare dictionary for saving data seq_names = [s.name for s in dataset] tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name} for t in trackers] eval_data = {'sequences': seq_names, 'trackers': tracker_names, 'valid_sequence': valid_sequence.tolist(), 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(), 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(), 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(), 'avg_overlap_all': avg_overlap_all.tolist(), 'threshold_set_overlap': threshold_set_overlap.tolist(), 'threshold_set_center': threshold_set_center.tolist(), 'threshold_set_center_norm': threshold_set_center_norm.tolist()} with open(result_plot_path + '/eval_data.pkl', 'wb') as fh: pickle.dump(eval_data, fh) return eval_data ================================================ FILE: external/AR/pytracking/analysis/playback_results.py ================================================ import os import sys import importlib import numpy as np import torch import time import matplotlib.patches as patches import cv2 as cv import matplotlib.pyplot as plt from pytracking.analysis.plot_results import get_plot_draw_styles from pytracking.utils.plotting import draw_figure from pytracking.evaluation import get_dataset, trackerlist env_path = os.path.join(os.path.dirname(__file__), '../..') if env_path not in sys.path: sys.path.append(env_path) class Display: def __init__(self, sequence_length, plot_draw_styles, sequence_name): self.active = True self.frame_number = 0 self.pause_mode = True self.step_size = 0 self.step_direction = 'forward' self.fig, self.ax = plt.subplots(1) self.fig.canvas.mpl_connect('key_press_event', self.key_callback_fn) plt.tight_layout() self.sequence_length = sequence_length self.sequence_name = sequence_name self.plot_draw_styles = plot_draw_styles def key_callback_fn(self, event): if event.key == ' ': self.pause_mode = not self.pause_mode self.step_size = 0 self.step_direction = 'forward' elif event.key == 'right': if self.pause_mode: self.frame_number += 1 if self.frame_number >= self.sequence_length: self.frame_number = self.sequence_length - 1 elif self.step_direction == 'stop': self.step_direction = 'forward' self.step_size = 0 elif self.step_direction == 'backward' and self.step_size == 0: self.step_direction = 'stop' else: self.step_size += 1 elif event.key == 'left': if self.pause_mode: self.frame_number -= 1 if self.frame_number < 0: self.frame_number = 0 elif self.step_direction == 'stop': self.step_direction = 'backward' self.step_size = 0 elif self.step_direction == 'forward' and self.step_size == 0: self.step_direction = 'stop' else: self.step_size -= 1 elif event.key == 'escape' or event.key == 'q': self.active = False def _get_speed(self): delta = 0 if self.step_direction == 'forward': delta = 2 ** abs(self.step_size) elif self.step_direction == 'backward': delta = -1 * 2 ** abs(self.step_size) return delta def step(self): delta = self._get_speed() self.frame_number += delta if self.frame_number < 0: self.frame_number = 0 elif self.frame_number >= self.sequence_length: self.frame_number = self.sequence_length - 1 def show(self, image, bb_list, trackers, gt=None): self.ax.cla() self.ax.imshow(image) # Draw rects rect_handles = [] for i, bb in enumerate(bb_list): rect = patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1, edgecolor=self.plot_draw_styles[i]['color'], facecolor='none') self.ax.add_patch(rect) rect_handles.append(patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1, edgecolor=self.plot_draw_styles[i]['color'], facecolor=self.plot_draw_styles[i]['color'], label=trackers[i])) if gt is not None: rect = patches.Rectangle((gt[0], gt[1]), gt[2], gt[3], linewidth=2, edgecolor='g', facecolor='none') self.ax.add_patch(rect) rect_handles.append(rect) self.ax.set_axis_off() self.ax.axis('equal') plt.legend(handles=rect_handles, loc=4, borderaxespad=0.) mode = 'manual' if self.pause_mode else 'auto ' speed = self._get_speed() self.fig.suptitle('Sequence: {} Mode: {} Speed: {:d}x'.format(self.sequence_name, mode, speed), fontsize=14) draw_figure(self.fig) def read_image(image_file: str): im = cv.imread(image_file) return cv.cvtColor(im, cv.COLOR_BGR2RGB) def _get_display_name(tracker): if tracker.display_name is None: if tracker.run_id is not None: return '{}_{}_{:03d}'.format(tracker.name, tracker.parameter_name, tracker.run_id) else: return '{}_{}'.format(tracker.name, tracker.parameter_name) else: return tracker.display_name def playback_results(trackers, sequence): """ Playback saved results of input trackers for a particular sequence. You can navigate the sequence using left/right arrow keys. You can also change to 'auto' mode by pressing space bar, in which case the sequence will be replayed at a particular speed. The speed for playback in 'auto' mode can be controlled using the left/right arrow keys. You can exit the application using escape or q keys. """ plot_draw_styles = get_plot_draw_styles() tracker_results = [] # Load results for trk_id, trk in enumerate(trackers): # Load results base_results_path = '{}/{}'.format(trk.results_dir, sequence.name) results_path = '{}.txt'.format(base_results_path) if os.path.isfile(results_path): try: pred_bb = torch.tensor(np.loadtxt(str(results_path), dtype=np.float64)) except: pred_bb = torch.tensor(np.loadtxt(str(results_path), delimiter=',', dtype=np.float64)) else: raise Exception('Result not found. {}'.format(results_path)) tracker_results.append(pred_bb) # Convert to list of shape seq_length * num_trackers * 4 tracker_results = torch.stack(tracker_results, dim=1).tolist() tracker_names = [_get_display_name(t) for t in trackers] display = Display(len(tracker_results), plot_draw_styles, sequence.name) while display.active: frame_number = display.frame_number image = read_image(sequence.frames[frame_number]) display.show(image, tracker_results[frame_number], tracker_names) time.sleep(0.01) if display.pause_mode and display.frame_number == frame_number: time.sleep(0.1) elif not display.pause_mode: display.step() ================================================ FILE: external/AR/pytracking/analysis/plot_results.py ================================================ import tikzplotlib import matplotlib import matplotlib.pyplot as plt import os import torch import pickle import json from pytracking.evaluation.environment import env_settings from pytracking.analysis.extract_results import extract_results def get_plot_draw_styles(): plot_draw_style = [{'color': (1.0, 0.0, 0.0), 'line_style': '-'}, {'color': (0.0, 1.0, 0.0), 'line_style': '-'}, {'color': (0.0, 0.0, 1.0), 'line_style': '-'}, {'color': (0.0, 0.0, 0.0), 'line_style': '-'}, {'color': (1.0, 0.0, 1.0), 'line_style': '-'}, {'color': (0.0, 1.0, 1.0), 'line_style': '-'}, {'color': (0.5, 0.5, 0.5), 'line_style': '-'}, {'color': (136.0 / 255.0, 0.0, 21.0 / 255.0), 'line_style': '-'}, {'color': (1.0, 127.0 / 255.0, 39.0 / 255.0), 'line_style': '-'}, {'color': (0.0, 162.0 / 255.0, 232.0 / 255.0), 'line_style': '-'}, {'color': (0.0, 0.5, 0.0), 'line_style': '-'}, {'color': (1.0, 0.5, 0.2), 'line_style': '-'}, {'color': (0.1, 0.4, 0.0), 'line_style': '-'}, {'color': (0.6, 0.3, 0.9), 'line_style': '-'}, {'color': (0.4, 0.7, 0.1), 'line_style': '-'}, {'color': (0.2, 0.1, 0.7), 'line_style': '-'}, {'color': (0.7, 0.6, 0.2), 'line_style': '-'}] return plot_draw_style def check_eval_data_is_valid(eval_data, trackers, dataset): """ Checks if the pre-computed results are valid""" seq_names = [s.name for s in dataset] seq_names_saved = eval_data['sequences'] tracker_names_f = [(t.name, t.parameter_name, t.run_id) for t in trackers] tracker_names_f_saved = [(t['name'], t['param'], t['run_id']) for t in eval_data['trackers']] return seq_names == seq_names_saved and tracker_names_f == tracker_names_f_saved def merge_multiple_runs(eval_data): new_tracker_names = [] ave_success_rate_plot_overlap_merged = [] ave_success_rate_plot_center_merged = [] ave_success_rate_plot_center_norm_merged = [] avg_overlap_all_merged = [] ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) trackers = eval_data['trackers'] merged = torch.zeros(len(trackers), dtype=torch.uint8) for i in range(len(trackers)): if merged[i]: continue base_tracker = trackers[i] new_tracker_names.append(base_tracker) match = [t['name'] == base_tracker['name'] and t['param'] == base_tracker['param'] for t in trackers] match = torch.tensor(match) ave_success_rate_plot_overlap_merged.append(ave_success_rate_plot_overlap[:, match, :].mean(1)) ave_success_rate_plot_center_merged.append(ave_success_rate_plot_center[:, match, :].mean(1)) ave_success_rate_plot_center_norm_merged.append(ave_success_rate_plot_center_norm[:, match, :].mean(1)) avg_overlap_all_merged.append(avg_overlap_all[:, match].mean(1)) merged[match] = 1 ave_success_rate_plot_overlap_merged = torch.stack(ave_success_rate_plot_overlap_merged, dim=1) ave_success_rate_plot_center_merged = torch.stack(ave_success_rate_plot_center_merged, dim=1) ave_success_rate_plot_center_norm_merged = torch.stack(ave_success_rate_plot_center_norm_merged, dim=1) avg_overlap_all_merged = torch.stack(avg_overlap_all_merged, dim=1) eval_data['trackers'] = new_tracker_names eval_data['ave_success_rate_plot_overlap'] = ave_success_rate_plot_overlap_merged.tolist() eval_data['ave_success_rate_plot_center'] = ave_success_rate_plot_center_merged.tolist() eval_data['ave_success_rate_plot_center_norm'] = ave_success_rate_plot_center_norm_merged.tolist() eval_data['avg_overlap_all'] = avg_overlap_all_merged.tolist() return eval_data def get_tracker_display_name(tracker): if tracker['disp_name'] is None: if tracker['run_id'] is None: disp_name = '{}_{}'.format(tracker['name'], tracker['param']) else: disp_name = '{}_{}_{:03d}'.format(tracker['name'], tracker['param'], tracker['run_id']) else: disp_name = tracker['disp_name'] return disp_name def plot_draw_save(y, x, scores, trackers, plot_draw_styles, result_plot_path, plot_opts): # Plot settings font_size = plot_opts.get('font_size', 12) font_size_axis = plot_opts.get('font_size_axis', 13) line_width = plot_opts.get('line_width', 2) font_size_legend = plot_opts.get('font_size_legend', 13) plot_type = plot_opts['plot_type'] legend_loc = plot_opts['legend_loc'] xlabel = plot_opts['xlabel'] ylabel = plot_opts['ylabel'] xlim = plot_opts['xlim'] ylim = plot_opts['ylim'] title = plot_opts['title'] matplotlib.rcParams.update({'font.size': font_size}) matplotlib.rcParams.update({'axes.titlesize': font_size_axis}) matplotlib.rcParams.update({'axes.titleweight': 'black'}) matplotlib.rcParams.update({'axes.labelsize': font_size_axis}) fig, ax = plt.subplots() index_sort = scores.argsort(descending=False) plotted_lines = [] legend_text = [] for id, id_sort in enumerate(index_sort): line = ax.plot(x.tolist(), y[id_sort, :].tolist(), linewidth=line_width, color=plot_draw_styles[index_sort.numel() - id - 1]['color'], linestyle=plot_draw_styles[index_sort.numel() - id - 1]['line_style']) plotted_lines.append(line[0]) tracker = trackers[id_sort] disp_name = get_tracker_display_name(tracker) legend_text.append('{} [{:.1f}]'.format(disp_name, scores[id_sort])) ax.legend(plotted_lines[::-1], legend_text[::-1], loc=legend_loc, fancybox=False, edgecolor='black', fontsize=font_size_legend, framealpha=1.0) ax.set(xlabel=xlabel, ylabel=ylabel, xlim=xlim, ylim=ylim, title=title) ax.grid(True, linestyle='-.') fig.tight_layout() tikzplotlib.save('{}/{}_plot.tex'.format(result_plot_path, plot_type)) fig.savefig('{}/{}_plot.pdf'.format(result_plot_path, plot_type), dpi=300, format='pdf', transparent=True) plt.draw() def check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation=False, **kwargs): # Load data settings = env_settings() # Load pre-computed results result_plot_path = os.path.join(settings.result_plot_path, report_name) eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl') if os.path.isfile(eval_data_path) and not force_evaluation: with open(eval_data_path, 'rb') as fh: eval_data = pickle.load(fh) else: # print('Pre-computed evaluation data not found. Computing results!') eval_data = extract_results(trackers, dataset, report_name, **kwargs) if not check_eval_data_is_valid(eval_data, trackers, dataset): # print('Pre-computed evaluation data invalid. Re-computing results!') eval_data = extract_results(trackers, dataset, report_name, **kwargs) else: # Update display names tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name} for t in trackers] eval_data['trackers'] = tracker_names return eval_data def get_auc_curve(ave_success_rate_plot_overlap, valid_sequence): ave_success_rate_plot_overlap = ave_success_rate_plot_overlap[valid_sequence, :, :] auc_curve = ave_success_rate_plot_overlap.mean(0) * 100.0 auc = auc_curve.mean(-1) return auc_curve, auc def get_prec_curve(ave_success_rate_plot_center, valid_sequence): ave_success_rate_plot_center = ave_success_rate_plot_center[valid_sequence, :, :] prec_curve = ave_success_rate_plot_center.mean(0) * 100.0 prec_score = prec_curve[:, 20] return prec_curve, prec_score def plot_results(trackers, dataset, report_name, merge_results=False, plot_types=('success'), force_evaluation=False, **kwargs): """ Plot results for the given trackers args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success', 'prec' (precision), and 'norm_prec' (normalized precision) """ # Load data settings = env_settings() plot_draw_styles = get_plot_draw_styles() # Load pre-computed results result_plot_path = os.path.join(settings.result_plot_path, report_name) eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nPlotting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) print('\nGenerating plots for: {}'.format(report_name)) # ******************************** Success Plot ************************************** if 'success' in plot_types: ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold', 'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'} plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts) # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) threshold_set_center = torch.tensor(eval_data['threshold_set_center']) precision_plot_opts = {'plot_type': 'precision', 'legend_loc': 'lower right', 'xlabel': 'Location error threshold [pixels]', 'ylabel': 'Distance Precision [%]', 'xlim': (0, 50), 'ylim': (0, 100), 'title': 'Precision plot'} plot_draw_save(prec_curve, threshold_set_center, prec_score, tracker_names, plot_draw_styles, result_plot_path, precision_plot_opts) # ******************************** Norm Precision Plot ************************************** if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) threshold_set_center_norm = torch.tensor(eval_data['threshold_set_center_norm']) norm_precision_plot_opts = {'plot_type': 'norm_precision', 'legend_loc': 'lower right', 'xlabel': 'Location error threshold', 'ylabel': 'Distance Precision [%]', 'xlim': (0, 0.5), 'ylim': (0, 100), 'title': 'Normalized Precision plot'} plot_draw_save(prec_curve, threshold_set_center_norm, prec_score, tracker_names, plot_draw_styles, result_plot_path, norm_precision_plot_opts) plt.show() def generate_formatted_report(row_labels, scores, table_name=''): name_width = max([len(d) for d in row_labels] + [len(table_name)]) + 5 min_score_width = 10 report_text = '\n{label: <{width}} |'.format(label=table_name, width=name_width) score_widths = [max(min_score_width, len(k) + 3) for k in scores.keys()] for s, s_w in zip(scores.keys(), score_widths): report_text = '{prev} {s: <{width}} |'.format(prev=report_text, s=s, width=s_w) report_text = '{prev}\n'.format(prev=report_text) for trk_id, d_name in enumerate(row_labels): # display name report_text = '{prev}{tracker: <{width}} |'.format(prev=report_text, tracker=d_name, width=name_width) for (score_type, score_value), s_w in zip(scores.items(), score_widths): report_text = '{prev} {score: <{width}} |'.format(prev=report_text, score='{:0.2f}'.format(score_value[trk_id].item()), width=s_w) report_text = '{prev}\n'.format(prev=report_text) return report_text def print_results(trackers, dataset, report_name, merge_results=False, plot_types=('success'), **kwargs): """ Print the results for the given trackers in a formatted table args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores), 'prec' (prints precision score), and 'norm_prec' (prints normalized precision score) """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) scores = {} # ******************************** Success Plot ************************************** if 'success' in plot_types: threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) scores['AUC'] = auc scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50] scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75] # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) scores['Precision'] = prec_score # ******************************** Norm Precision Plot ********************************* if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) scores['Norm Precision'] = norm_prec_score # Print tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name) print(report_text) def plot_got_success(trackers, report_name): """ Plot success plot for GOT-10k dataset using the json reports. Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to env_settings.got_reports_path The tracker name in the experiment file should be set to the name of the report file for that tracker, e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json args: trackers - List of trackers to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved """ # Load data settings = env_settings() plot_draw_styles = get_plot_draw_styles() result_plot_path = os.path.join(settings.result_plot_path, report_name) auc_curve = torch.zeros((len(trackers), 101)) scores = torch.zeros(len(trackers)) # Load results tracker_names = [] for trk_id, trk in enumerate(trackers): json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name) if os.path.isfile(json_path): with open(json_path, 'r') as f: eval_data = json.load(f) else: raise Exception('Report not found {}'.format(json_path)) if len(eval_data.keys()) > 1: raise Exception # First field is the tracker name. Index it out eval_data = eval_data[list(eval_data.keys())[0]] if 'succ_curve' in eval_data.keys(): curve = eval_data['succ_curve'] ao = eval_data['ao'] elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys(): curve = eval_data['overall']['succ_curve'] ao = eval_data['overall']['ao'] else: raise Exception('Invalid JSON file {}'.format(json_path)) auc_curve[trk_id, :] = torch.tensor(curve) * 100.0 scores[trk_id] = ao * 100.0 tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id, 'disp_name': trk.display_name}) threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64) success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold', 'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'} plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts) plt.show() def print_per_sequence_results(trackers, dataset, report_name, merge_results=False, filter_criteria=None, **kwargs): """ Print per-sequence results for the given trackers. Additionally, the sequences to list can be filtered using the filter criteria. args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged filter_criteria - Filter sequence results which are reported. Following modes are supported None: No filtering. Display results for all sequences in dataset 'ao_min': Only display sequences for which the minimum average overlap (AO) score over the trackers is less than a threshold filter_criteria['threshold']. This mode can be used to select sequences where at least one tracker performs poorly. 'ao_max': Only display sequences for which the maximum average overlap (AO) score over the trackers is less than a threshold filter_criteria['threshold']. This mode can be used to select sequences all tracker performs poorly. 'delta_ao': Only display sequences for which the performance of different trackers vary by at least filter_criteria['threshold'] in average overlap (AO) score. This mode can be used to select sequences where the behaviour of the trackers greatly differ between each other. """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) sequence_names = eval_data['sequences'] avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) * 100.0 # Filter sequences if filter_criteria is not None: if filter_criteria['mode'] == 'ao_min': min_ao = avg_overlap_all.min(dim=1)[0] valid_sequence = valid_sequence & (min_ao < filter_criteria['threshold']) elif filter_criteria['mode'] == 'ao_max': max_ao = avg_overlap_all.max(dim=1)[0] valid_sequence = valid_sequence & (max_ao < filter_criteria['threshold']) elif filter_criteria['mode'] == 'delta_ao': min_ao = avg_overlap_all.min(dim=1)[0] max_ao = avg_overlap_all.max(dim=1)[0] valid_sequence = valid_sequence & ((max_ao - min_ao) > filter_criteria['threshold']) else: raise Exception avg_overlap_all = avg_overlap_all[valid_sequence, :] sequence_names = [s + ' (ID={})'.format(i) for i, (s, v) in enumerate(zip(sequence_names, valid_sequence.tolist())) if v] tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] scores_per_tracker = {k: avg_overlap_all[:, i] for i, k in enumerate(tracker_disp_names)} report_text = generate_formatted_report(sequence_names, scores_per_tracker) print(report_text) ================================================ FILE: external/AR/pytracking/analysis/vos_utils.py ================================================ import warnings import numpy as np from skimage.morphology import binary_dilation, disk from math import floor def text_bargraph(values): blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o')) nsteps = len(blocks)-2-1 hstep = 1 / (2*nsteps) values = np.array(values) nans = np.isnan(values) values[nans] = 0 # '░' indices = ((values + hstep) * nsteps + 1).astype(np.int) indices[values < 0] = 0 indices[values > 1] = len(blocks)-1 graph = blocks[indices] graph[nans] = '░' graph = str.join('', graph) return graph # ---------------------------------------------------------------------------- # The 2017 DAVIS Challenge on Video Object Segmentation # ----------------------------------------------------------------------------- # Copyright (c) 2017 Federico Perazzi # Licensed under the BSD License [see LICENSE for details] # Written by Federico Perazzi (federico@disneyresearch.com) # Adapted from DAVIS 2016 (Federico Perazzi) # ---------------------------------------------------------------------------- # Originally db_eval_iou() in the davis challenge toolkit: def davis_jaccard_measure(fg_mask, gt_mask): """ Compute region similarity as the Jaccard Index. :param fg_mask: (ndarray): binary segmentation map. :param gt_mask: (ndarray): binary annotation map. :return: jaccard (float): region similarity """ gt_mask = gt_mask.astype(np.bool) fg_mask = fg_mask.astype(np.bool) if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0): return 1 else: return np.sum((gt_mask & fg_mask)) / \ np.sum((gt_mask | fg_mask), dtype=np.float32) def davis_jaccard_measure_torch(fg_mask, gt_mask): """ Compute region similarity as the Jaccard Index. :param fg_mask: (ndarray): binary segmentation map. :param gt_mask: (ndarray): binary annotation map. :return: jaccard (float): region similarity """ #gt_mask = gt_mask.astype(np.bool) #fg_mask = fg_mask.astype(np.bool) if gt_mask.sum() == 0 and fg_mask.sum() == 0: return 1 else: return (gt_mask & fg_mask).sum() / \ (gt_mask | fg_mask).sum().float() # Originally db_eval_boundary() in the davis challenge toolkit: def davis_f_measure(foreground_mask, gt_mask, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. Returns: F (float): boundaries F-measure P (float): boundaries precision R (float): boundaries recall """ assert np.atleast_3d(foreground_mask).shape[2] == 1 bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = seg2bmap(foreground_mask) gt_boundary = seg2bmap(gt_mask) fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not (width > w | height > h | abs(ar1 - ar2) > 0.01), \ 'Can''t convert %dx%d seg to %dx%d bmap.' % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + floor((y - 1) + height / h) i = 1 + floor((x - 1) + width / h) bmap[j, i] = 1 return bmap def nanmean(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) return np.nanmean(*args, **kwargs) def mean(X): """ Compute average ignoring NaN values. """ return np.nanmean(X) def recall(X, threshold=0.5): """ Fraction of values of X scoring higher than 'threshold' """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) x = X[~np.isnan(X)] x = mean(x > threshold) return x def decay(X, n_bins=4): """ Performance loss over time. """ X = X[~np.isnan(X)] ids = np.round(np.linspace(1, len(X), n_bins + 1) + 1e-10) - 1 ids = ids.astype(np.uint8) D_bins = [X[ids[i]:ids[i + 1] + 1] for i in range(0, 4)] with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3]) return D def std(X): """ Compute standard deviation. """ return np.nanstd(X) ================================================ FILE: external/AR/pytracking/evaluation/__init__.py ================================================ from .data import Sequence from .tracker import Tracker, trackerlist from .datasets import get_dataset ================================================ FILE: external/AR/pytracking/evaluation/data.py ================================================ import numpy as np from pytracking.evaluation.environment import env_settings from ltr.data.image_loader import imread_indexed from collections import OrderedDict class BaseDataset: """Base class for all datasets.""" def __init__(self): self.env_settings = env_settings() def __len__(self): """Overload this function in your dataset. This should return number of sequences in the dataset.""" raise NotImplementedError def get_sequence_list(self): """Overload this in your dataset. Should return the list of sequences in the dataset.""" raise NotImplementedError class Sequence: """Class for the sequence in an evaluation.""" def __init__(self, name, frames, dataset, ground_truth_rect, ground_truth_seg=None, init_data=None, object_class=None, target_visible=None, object_ids=None, multiobj_mode=False): self.name = name self.frames = frames self.dataset = dataset self.ground_truth_rect = ground_truth_rect self.ground_truth_seg = ground_truth_seg self.object_class = object_class self.target_visible = target_visible self.object_ids = object_ids self.multiobj_mode = multiobj_mode self.init_data = self._construct_init_data(init_data) self._ensure_start_frame() def _ensure_start_frame(self): # Ensure start frame is 0 start_frame = min(list(self.init_data.keys())) if start_frame > 0: self.frames = self.frames[start_frame:] if self.ground_truth_rect is not None: if isinstance(self.ground_truth_rect, (dict, OrderedDict)): for obj_id, gt in self.ground_truth_rect.items(): self.ground_truth_rect[obj_id] = gt[start_frame:,:] else: self.ground_truth_rect = self.ground_truth_rect[start_frame:,:] if self.ground_truth_seg is not None: self.ground_truth_seg = self.ground_truth_seg[start_frame:] assert len(self.frames) == len(self.ground_truth_seg) if self.target_visible is not None: self.target_visible = self.target_visible[start_frame:] self.init_data = {frame-start_frame: val for frame, val in self.init_data.items()} def _construct_init_data(self, init_data): if init_data is not None: if not self.multiobj_mode: assert self.object_ids is None or len(self.object_ids) == 1 for frame, init_val in init_data.items(): if 'bbox' in init_val and isinstance(init_val['bbox'], (dict, OrderedDict)): init_val['bbox'] = init_val['bbox'][self.object_ids[0]] # convert to list for frame, init_val in init_data.items(): if 'bbox' in init_val: if isinstance(init_val['bbox'], (dict, OrderedDict)): init_val['bbox'] = OrderedDict({obj_id: list(init) for obj_id, init in init_val['bbox'].items()}) else: init_val['bbox'] = list(init_val['bbox']) else: init_data = {0: dict()} # Assume start from frame 0 if self.object_ids is not None: init_data[0]['object_ids'] = self.object_ids if self.ground_truth_rect is not None: if self.multiobj_mode: assert isinstance(self.ground_truth_rect, (dict, OrderedDict)) init_data[0]['bbox'] = OrderedDict({obj_id: list(gt[0,:]) for obj_id, gt in self.ground_truth_rect.items()}) else: assert self.object_ids is None or len(self.object_ids) == 1 if isinstance(self.ground_truth_rect, (dict, OrderedDict)): init_data[0]['bbox'] = list(self.ground_truth_rect[self.object_ids[0]][0, :]) else: init_data[0]['bbox'] = list(self.ground_truth_rect[0,:]) if self.ground_truth_seg is not None: init_data[0]['mask'] = self.ground_truth_seg[0] return init_data def init_info(self): info = self.frame_info(frame_num=0) return info def frame_info(self, frame_num): info = self.object_init_data(frame_num=frame_num) return info def init_bbox(self, frame_num=0): return self.object_init_data(frame_num=frame_num).get('init_bbox') def init_mask(self, frame_num=0): return self.object_init_data(frame_num=frame_num).get('init_mask') def get_info(self, keys, frame_num=None): info = dict() for k in keys: val = self.get(k, frame_num=frame_num) if val is not None: info[k] = val return info def object_init_data(self, frame_num=None) -> dict: if frame_num is None: frame_num = 0 if frame_num not in self.init_data: return dict() init_data = dict() for key, val in self.init_data[frame_num].items(): if val is None: continue init_data['init_'+key] = val if 'init_mask' in init_data and init_data['init_mask'] is not None: anno = imread_indexed(init_data['init_mask']) if not self.multiobj_mode and self.object_ids is not None: assert len(self.object_ids) == 1 anno = (anno == int(self.object_ids[0])).astype(np.uint8) init_data['init_mask'] = anno if self.object_ids is not None: init_data['object_ids'] = self.object_ids init_data['sequence_object_ids'] = self.object_ids return init_data def target_class(self, frame_num=None): return self.object_class def get(self, name, frame_num=None): return getattr(self, name)(frame_num) def __repr__(self): return "{self.__class__.__name__} {self.name}, length={len} frames".format(self=self, len=len(self.frames)) class SequenceList(list): """List of sequences. Supports the addition operator to concatenate sequence lists.""" def __getitem__(self, item): if isinstance(item, str): for seq in self: if seq.name == item: return seq raise IndexError('Sequence name not in the dataset.') elif isinstance(item, int): return super(SequenceList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return SequenceList([super(SequenceList, self).__getitem__(i) for i in item]) else: return SequenceList(super(SequenceList, self).__getitem__(item)) def __add__(self, other): return SequenceList(super(SequenceList, self).__add__(other)) def copy(self): return SequenceList(super(SequenceList, self).copy()) ================================================ FILE: external/AR/pytracking/evaluation/datasets.py ================================================ from collections import namedtuple import importlib from pytracking.evaluation.data import SequenceList DatasetInfo = namedtuple('DatasetInfo', ['module', 'class_name', 'kwargs']) pt = "pytracking.evaluation.%sdataset" # Useful abbreviations to reduce the clutter dataset_dict = dict( otb=DatasetInfo(module=pt % "otb", class_name="OTBDataset", kwargs=dict()), nfs=DatasetInfo(module=pt % "nfs", class_name="NFSDataset", kwargs=dict()), uav=DatasetInfo(module=pt % "uav", class_name="UAVDataset", kwargs=dict()), tpl=DatasetInfo(module=pt % "tpl", class_name="TPLDataset", kwargs=dict()), tpl_nootb=DatasetInfo(module=pt % "tpl", class_name="TPLDataset", kwargs=dict(exclude_otb=True)), vot=DatasetInfo(module=pt % "vot", class_name="VOTDataset", kwargs=dict()), trackingnet=DatasetInfo(module=pt % "trackingnet", class_name="TrackingNetDataset", kwargs=dict()), got10k_test=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='test')), got10k_val=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='val')), got10k_ltrval=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='ltrval')), lasot=DatasetInfo(module=pt % "lasot", class_name="LaSOTDataset", kwargs=dict()), dv2017_val=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2017', split='val')), dv2016_val=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2016', split='val')), dv2017_test_dev=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2017', split='test-dev')), dv2017_test_chal=DatasetInfo(module="ltr.dataset.davis", class_name="Davis", kwargs=dict(version='2017', split='test-challenge')), yt2019_test=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='test')), yt2019_valid=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='valid')), yt2019_valid_all=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='valid', all_frames=True)), yt2018_valid_all=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2018', split='valid', all_frames=True)), yt2018_jjval=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2018', split='jjvalid')), yt2019_jjval=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='jjvalid', cleanup=['starts'])), yt2019_jjval_all=DatasetInfo(module="ltr.dataset.youtubevos", class_name="YouTubeVOS", kwargs=dict(version='2019', split='jjvalid', all_frames=True, cleanup=['starts'])), ) def load_dataset(name: str): """ Import and load a single dataset.""" name = name.lower() dset_info = dataset_dict.get(name) if dset_info is None: raise ValueError('Unknown dataset \'%s\'' % name) m = importlib.import_module(dset_info.module) dataset = getattr(m, dset_info.class_name)(**dset_info.kwargs) # Call the constructor return dataset.get_sequence_list() def get_dataset(*args): """ Get a single or set of datasets.""" dset = SequenceList() for name in args: dset.extend(load_dataset(name)) return dset ================================================ FILE: external/AR/pytracking/evaluation/environment.py ================================================ import importlib import os class EnvSettings: def __init__(self): pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) self.results_path = '{}/tracking_results/'.format(pytracking_path) self.segmentation_path = '{}/segmentation_results/'.format(pytracking_path) self.network_path = '{}/networks/'.format(pytracking_path) self.result_plot_path = '{}/result_plots/'.format(pytracking_path) self.otb_path = '' self.nfs_path = '' self.uav_path = '' self.tpl_path = '' self.vot_path = '' self.got10k_path = '' self.lasot_path = '' self.trackingnet_path = '' self.davis_dir = '' self.youtubevos_dir = '' self.got_packed_results_path = '' self.got_reports_path = '' self.tn_packed_results_path = '' def create_default_local_file(): comment = {'results_path': 'Where to store tracking results', 'network_path': 'Where tracking networks are stored.'} path = os.path.join(os.path.dirname(__file__), 'local.py') with open(path, 'w') as f: settings = EnvSettings() f.write('from pytracking.evaluation.environment import EnvSettings\n\n') f.write('def local_env_settings():\n') f.write(' settings = EnvSettings()\n\n') f.write(' # Set your local paths here.\n\n') for attr in dir(settings): comment_str = None if attr in comment: comment_str = comment[attr] attr_val = getattr(settings, attr) if not attr.startswith('__') and not callable(attr_val): if comment_str is None: f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) f.write('\n return settings\n\n') def env_settings(): env_module_name = 'pytracking.evaluation.local' try: env_module = importlib.import_module(env_module_name) return env_module.local_env_settings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') # Create a default file create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. ' 'Then try to run again.'.format(env_file)) ================================================ FILE: external/AR/pytracking/evaluation/got10kdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text import os class GOT10KDataset(BaseDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, split): super().__init__() # Split can be test, val, or ltrval (a validation split consisting of videos from the official train set) if split == 'test' or split == 'val': self.base_path = os.path.join(self.env_settings.got10k_path, split) else: self.base_path = os.path.join(self.env_settings.got10k_path, 'train') self.sequence_list = self._get_sequence_list(split) self.split = split def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) frames_path = '{}/{}'.format(self.base_path, sequence_name) frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")] frame_list.sort(key=lambda f: int(f[:-4])) frames_list = [os.path.join(frames_path, frame) for frame in frame_list] return Sequence(sequence_name, frames_list, 'got10k', ground_truth_rect.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self, split): with open('{}/list.txt'.format(self.base_path)) as f: sequence_list = f.read().splitlines() if split == 'ltrval': with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f: seq_ids = f.read().splitlines() sequence_list = [sequence_list[int(x)] for x in seq_ids] return sequence_list ================================================ FILE: external/AR/pytracking/evaluation/lasotdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class LaSOTDataset(BaseDataset): """ LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper) Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.lasot_path self.sequence_list = self._get_sequence_list() self.clean_list = self.clean_seq_list() def clean_seq_list(self): clean_lst = [] for i in range(len(self.sequence_list)): cls, _ = self.sequence_list[i].split('-') clean_lst.append(cls) return clean_lst def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): class_name = sequence_name.split('-')[0] anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name) # NOTE: pandas backed seems super super slow for loading occlusion/oov masks full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy') out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name) out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy') target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0) frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name) frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)] target_class = class_name return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4), object_class=target_class, target_visible=target_visible) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list = ['airplane-1', 'airplane-9', 'airplane-13', 'airplane-15', 'basketball-1', 'basketball-6', 'basketball-7', 'basketball-11', 'bear-2', 'bear-4', 'bear-6', 'bear-17', 'bicycle-2', 'bicycle-7', 'bicycle-9', 'bicycle-18', 'bird-2', 'bird-3', 'bird-15', 'bird-17', 'boat-3', 'boat-4', 'boat-12', 'boat-17', 'book-3', 'book-10', 'book-11', 'book-19', 'bottle-1', 'bottle-12', 'bottle-14', 'bottle-18', 'bus-2', 'bus-5', 'bus-17', 'bus-19', 'car-2', 'car-6', 'car-9', 'car-17', 'cat-1', 'cat-3', 'cat-18', 'cat-20', 'cattle-2', 'cattle-7', 'cattle-12', 'cattle-13', 'spider-14', 'spider-16', 'spider-18', 'spider-20', 'coin-3', 'coin-6', 'coin-7', 'coin-18', 'crab-3', 'crab-6', 'crab-12', 'crab-18', 'surfboard-12', 'surfboard-4', 'surfboard-5', 'surfboard-8', 'cup-1', 'cup-4', 'cup-7', 'cup-17', 'deer-4', 'deer-8', 'deer-10', 'deer-14', 'dog-1', 'dog-7', 'dog-15', 'dog-19', 'guitar-3', 'guitar-8', 'guitar-10', 'guitar-16', 'person-1', 'person-5', 'person-10', 'person-12', 'pig-2', 'pig-10', 'pig-13', 'pig-18', 'rubicCube-1', 'rubicCube-6', 'rubicCube-14', 'rubicCube-19', 'swing-10', 'swing-14', 'swing-17', 'swing-20', 'drone-13', 'drone-15', 'drone-2', 'drone-7', 'pool-12', 'pool-15', 'pool-3', 'pool-7', 'rabbit-10', 'rabbit-13', 'rabbit-17', 'rabbit-19', 'racing-10', 'racing-15', 'racing-16', 'racing-20', 'robot-1', 'robot-19', 'robot-5', 'robot-8', 'sepia-13', 'sepia-16', 'sepia-6', 'sepia-8', 'sheep-3', 'sheep-5', 'sheep-7', 'sheep-9', 'skateboard-16', 'skateboard-19', 'skateboard-3', 'skateboard-8', 'tank-14', 'tank-16', 'tank-6', 'tank-9', 'tiger-12', 'tiger-18', 'tiger-4', 'tiger-6', 'train-1', 'train-11', 'train-20', 'train-7', 'truck-16', 'truck-3', 'truck-6', 'truck-7', 'turtle-16', 'turtle-5', 'turtle-8', 'turtle-9', 'umbrella-17', 'umbrella-19', 'umbrella-2', 'umbrella-9', 'yoyo-15', 'yoyo-17', 'yoyo-19', 'yoyo-7', 'zebra-10', 'zebra-14', 'zebra-16', 'zebra-17', 'elephant-1', 'elephant-12', 'elephant-16', 'elephant-18', 'goldfish-3', 'goldfish-7', 'goldfish-8', 'goldfish-10', 'hat-1', 'hat-2', 'hat-5', 'hat-18', 'kite-4', 'kite-6', 'kite-10', 'kite-15', 'motorcycle-1', 'motorcycle-3', 'motorcycle-9', 'motorcycle-18', 'mouse-1', 'mouse-8', 'mouse-9', 'mouse-17', 'flag-3', 'flag-9', 'flag-5', 'flag-2', 'frog-3', 'frog-4', 'frog-20', 'frog-9', 'gametarget-1', 'gametarget-2', 'gametarget-7', 'gametarget-13', 'hand-2', 'hand-3', 'hand-9', 'hand-16', 'helmet-5', 'helmet-11', 'helmet-19', 'helmet-13', 'licenseplate-6', 'licenseplate-12', 'licenseplate-13', 'licenseplate-15', 'electricfan-1', 'electricfan-10', 'electricfan-18', 'electricfan-20', 'chameleon-3', 'chameleon-6', 'chameleon-11', 'chameleon-20', 'crocodile-3', 'crocodile-4', 'crocodile-10', 'crocodile-14', 'gecko-1', 'gecko-5', 'gecko-16', 'gecko-19', 'fox-2', 'fox-3', 'fox-5', 'fox-20', 'giraffe-2', 'giraffe-10', 'giraffe-13', 'giraffe-15', 'gorilla-4', 'gorilla-6', 'gorilla-9', 'gorilla-13', 'hippo-1', 'hippo-7', 'hippo-9', 'hippo-20', 'horse-1', 'horse-4', 'horse-12', 'horse-15', 'kangaroo-2', 'kangaroo-5', 'kangaroo-11', 'kangaroo-14', 'leopard-1', 'leopard-7', 'leopard-16', 'leopard-20', 'lion-1', 'lion-5', 'lion-12', 'lion-20', 'lizard-1', 'lizard-3', 'lizard-6', 'lizard-13', 'microphone-2', 'microphone-6', 'microphone-14', 'microphone-16', 'monkey-3', 'monkey-4', 'monkey-9', 'monkey-17', 'shark-2', 'shark-3', 'shark-5', 'shark-6', 'squirrel-8', 'squirrel-11', 'squirrel-13', 'squirrel-19', 'volleyball-1', 'volleyball-13', 'volleyball-18', 'volleyball-19'] return sequence_list ================================================ FILE: external/AR/pytracking/evaluation/mobifacedataset.py ================================================ from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList import glob import numpy as np import os.path as osp from collections import OrderedDict import pandas as pd class MobifaceDataset(BaseDataset): """ Mobiface dataset. Publication: MobiFace: A Novel Dataset for Mobile Face Tracking in the Wild Yiming Lin, Shiyang Cheng, Jie Shen, Maja Pantic arXiv:1805.09749, 2018 https://arxiv.org/pdf/1805.09749v2 Download dataset from https://mobiface.github.io/ """ def __init__(self, split): """ args: split - Split to use. Can be i) 'train': official training set, ii) 'test': official test set, iii) 'all': whole dataset. """ super().__init__() self.base_path = self.env_settings.mobiface_path self.sequence_list = self._get_sequence_list(split) self.split = split def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _get_sequence_list(self, split): self.train_meta_fn = osp.join(self.base_path, 'train.meta.csv') self.test_meta_fn = osp.join(self.base_path, 'test.meta.csv') self.train_meta = pd.read_csv(self.train_meta_fn,index_col=0).transpose().to_dict() self.test_meta = pd.read_csv(self.test_meta_fn,index_col=0).transpose().to_dict() if split == 'train': self.meta = self.train_meta elif split == 'test': self.meta = self.test_meta else: self.meta = {**self.train_meta, **self.test_meta} # In Python 3.5 or greater self.meta = OrderedDict(sorted(self.meta.items(), key=lambda t: t[0])) self.anno_files = [] for k,v in self.meta.items(): if k in self.train_meta.keys(): self.anno_files.append(osp.abspath(osp.join(self.base_path,'train', k+'.annot.csv'))) else: self.anno_files.append(osp.abspath(osp.join(self.base_path,'test', k+'.annot.csv'))) self.seq_names = sorted(list(self.meta.keys())) self.seq_dirs = [fn[:-len('.annot.csv')] for fn in self.anno_files] return self.seq_names def _construct_sequence(self, sequence_name): index = self.seq_names.index(sequence_name) img_files = sorted(glob.glob(self.seq_dirs[index]+'/*.jpg')) if len(img_files) == 0: img_files = sorted(glob.glob(self.seq_dirs[index]+'.png')) with open(self.anno_files[index], 'r') as f: anno = np.loadtxt(f, delimiter=',', skiprows=1, dtype=int) anno = anno[:,1:] assert anno.shape[1] == 4 return Sequence(sequence_name, img_files, anno.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) ================================================ FILE: external/AR/pytracking/evaluation/multi_object_wrapper.py ================================================ import numpy as np from collections import OrderedDict import time import copy class MultiObjectWrapper: def __init__(self, base_tracker_class, params, visdom=None, fast_load=False): self.base_tracker_class = base_tracker_class self.params = params self.visdom = visdom self.initialized_ids = [] self.trackers = OrderedDict() self.fast_load = fast_load if self.fast_load: self.tracker_copy = self.base_tracker_class(self.params) if hasattr(self.tracker_copy, 'initialize_features'): self.tracker_copy.initialize_features() def create_tracker(self): tracker = None if self.fast_load: try: tracker = copy.deepcopy(self.tracker_copy) except: pass if tracker is None: tracker = self.base_tracker_class(self.params) tracker.visdom = self.visdom return tracker def _split_info(self, info): info_split = OrderedDict() init_other = OrderedDict() # Init other contains init info for all other objects for obj_id in info['init_object_ids']: info_split[obj_id] = dict() init_other[obj_id] = dict() info_split[obj_id]['object_ids'] = [obj_id] info_split[obj_id]['sequence_object_ids'] = info['sequence_object_ids'] if 'init_bbox' in info: info_split[obj_id]['init_bbox'] = info['init_bbox'][obj_id] init_other[obj_id]['init_bbox'] = info['init_bbox'][obj_id] if 'init_mask' in info: info_split[obj_id]['init_mask'] = (info['init_mask'] == int(obj_id)).astype(np.uint8) init_other[obj_id]['init_mask'] = info_split[obj_id]['init_mask'] for obj_info in info_split.values(): obj_info['init_other'] = init_other return info_split def _set_defaults(self, tracker_out: dict, defaults=None): defaults = {} if defaults is None else defaults for key, val in defaults.items(): if tracker_out.get(key) is None: tracker_out[key] = val return tracker_out def default_merge(self, out_all): out_merged = OrderedDict() out_first = list(out_all.values())[0] out_types = out_first.keys() # Merge segmentation mask if 'segmentation' in out_types and out_first['segmentation'] is not None: # Stack all masks # If a tracker outputs soft segmentation mask, use that. Else use the binary segmentation segmentation_maps = [out.get('segmentation_soft', out['segmentation']) for out in out_all.values()] segmentation_maps = np.stack(segmentation_maps) obj_ids = np.array([0, *map(int, out_all.keys())], dtype=np.uint8) segm_threshold = getattr(self.params, 'segmentation_threshold', 0.5) merged_segmentation = obj_ids[np.where(segmentation_maps.max(axis=0) > segm_threshold, segmentation_maps.argmax(axis=0) + 1, 0)] out_merged['segmentation'] = merged_segmentation # Merge other fields for key in out_types: if key == 'segmentation': pass else: out_merged[key] = {obj_id: out[key] for obj_id, out in out_all.items()} return out_merged def merge_outputs(self, out_all): if hasattr(self.base_tracker_class, 'merge_results'): out_merged = self.base_tracker_class.merge_results(out_all) else: out_merged = self.default_merge(out_all) return out_merged def initialize(self, image, info: dict) -> dict: self.initialized_ids = [] self.trackers = OrderedDict() if len(info['init_object_ids']) == 0: return None object_ids = info['object_ids'] init_info_split = self._split_info(info) self.trackers = OrderedDict({obj_id: self.create_tracker() for obj_id in object_ids}) out_all = OrderedDict() # Run individual trackers for each object for obj_id in info['init_object_ids']: start_time = time.time() out = self.trackers[obj_id].initialize(image, init_info_split[obj_id]) if out is None: out = {} init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'), 'time': time.time() - start_time, 'segmentation': init_info_split[obj_id].get('init_mask')} out = self._set_defaults(out, init_default) out_all[obj_id] = out # Merge results out_merged = self.merge_outputs(out_all) self.initialized_ids = info['init_object_ids'].copy() return out_merged def track(self, image, info: dict = None) -> dict: if info is None: info = {} prev_output = info.get('previous_output', OrderedDict()) if info.get('init_object_ids', False): init_info_split = self._split_info(info) for obj_init_info in init_info_split.values(): obj_init_info['previous_output'] = prev_output info['init_other'] = list(init_info_split.values())[0]['init_other'] out_all = OrderedDict() for obj_id in self.initialized_ids: start_time = time.time() out = self.trackers[obj_id].track(image, info) default = {'time': time.time() - start_time} out = self._set_defaults(out, default) out_all[obj_id] = out # Initialize new if info.get('init_object_ids', False): for obj_id in info['init_object_ids']: if not obj_id in self.trackers: self.trackers[obj_id] = self.create_tracker() start_time = time.time() out = self.trackers[obj_id].initialize(image, init_info_split[obj_id]) if out is None: out = {} init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'), 'time': time.time() - start_time, 'segmentation': init_info_split[obj_id].get('init_mask')} out = self._set_defaults(out, init_default) out_all[obj_id] = out self.initialized_ids.extend(info['init_object_ids']) # Merge results out_merged = self.merge_outputs(out_all) return out_merged def visdom_draw_tracking(self, image, box, segmentation): if isinstance(box, (OrderedDict, dict)): box = [v for k, v in box.items()] else: box = (box,) if segmentation is None: self.visdom.register((image, *box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking') ================================================ FILE: external/AR/pytracking/evaluation/nfsdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class NFSDataset(BaseDataset): """ NFS dataset. Publication: Need for Speed: A Benchmark for Higher Frame Rate Object Tracking H. Kiani Galoogahi, A. Fagg, C. Huang, D. Ramanan, and S.Lucey ICCV, 2017 http://openaccess.thecvf.com/content_ICCV_2017/papers/Galoogahi_Need_for_Speed_ICCV_2017_paper.pdf Download the dataset from http://ci2cv.net/nfs/index.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.nfs_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter='\t', dtype=np.float64) return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "nfs_Gymnastics", "path": "sequences/Gymnastics", "startFrame": 1, "endFrame": 368, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Gymnastics.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_MachLoop_jet", "path": "sequences/MachLoop_jet", "startFrame": 1, "endFrame": 99, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_MachLoop_jet.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_Skiing_red", "path": "sequences/Skiing_red", "startFrame": 1, "endFrame": 69, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Skiing_red.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_Skydiving", "path": "sequences/Skydiving", "startFrame": 1, "endFrame": 196, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Skydiving.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_airboard_1", "path": "sequences/airboard_1", "startFrame": 1, "endFrame": 425, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airboard_1.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_airplane_landing", "path": "sequences/airplane_landing", "startFrame": 1, "endFrame": 81, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airplane_landing.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_airtable_3", "path": "sequences/airtable_3", "startFrame": 1, "endFrame": 482, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airtable_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_1", "path": "sequences/basketball_1", "startFrame": 1, "endFrame": 282, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_1.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_2", "path": "sequences/basketball_2", "startFrame": 1, "endFrame": 102, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_3", "path": "sequences/basketball_3", "startFrame": 1, "endFrame": 421, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_6", "path": "sequences/basketball_6", "startFrame": 1, "endFrame": 224, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_7", "path": "sequences/basketball_7", "startFrame": 1, "endFrame": 240, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_7.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_basketball_player", "path": "sequences/basketball_player", "startFrame": 1, "endFrame": 369, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_player.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_basketball_player_2", "path": "sequences/basketball_player_2", "startFrame": 1, "endFrame": 437, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_player_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_beach_flipback_person", "path": "sequences/beach_flipback_person", "startFrame": 1, "endFrame": 61, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_beach_flipback_person.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_bee", "path": "sequences/bee", "startFrame": 1, "endFrame": 45, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bee.txt", "object_class": "insect", 'occlusion': False}, {"name": "nfs_biker_acrobat", "path": "sequences/biker_acrobat", "startFrame": 1, "endFrame": 128, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_acrobat.txt", "object_class": "bicycle", 'occlusion': False}, {"name": "nfs_biker_all_1", "path": "sequences/biker_all_1", "startFrame": 1, "endFrame": 113, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_all_1.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_biker_head_2", "path": "sequences/biker_head_2", "startFrame": 1, "endFrame": 132, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_head_2.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_biker_head_3", "path": "sequences/biker_head_3", "startFrame": 1, "endFrame": 254, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_head_3.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_biker_upper_body", "path": "sequences/biker_upper_body", "startFrame": 1, "endFrame": 194, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_upper_body.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_biker_whole_body", "path": "sequences/biker_whole_body", "startFrame": 1, "endFrame": 572, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_whole_body.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_billiard_2", "path": "sequences/billiard_2", "startFrame": 1, "endFrame": 604, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_3", "path": "sequences/billiard_3", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_6", "path": "sequences/billiard_6", "startFrame": 1, "endFrame": 771, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_7", "path": "sequences/billiard_7", "startFrame": 1, "endFrame": 724, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_7.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_8", "path": "sequences/billiard_8", "startFrame": 1, "endFrame": 778, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_8.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_bird_2", "path": "sequences/bird_2", "startFrame": 1, "endFrame": 476, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bird_2.txt", "object_class": "bird", 'occlusion': False}, {"name": "nfs_book", "path": "sequences/book", "startFrame": 1, "endFrame": 288, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_book.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_bottle", "path": "sequences/bottle", "startFrame": 1, "endFrame": 2103, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bottle.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_bowling_1", "path": "sequences/bowling_1", "startFrame": 1, "endFrame": 303, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_1.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_2", "path": "sequences/bowling_2", "startFrame": 1, "endFrame": 710, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_2.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_3", "path": "sequences/bowling_3", "startFrame": 1, "endFrame": 271, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_3.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_6", "path": "sequences/bowling_6", "startFrame": 1, "endFrame": 260, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_bowling_ball", "path": "sequences/bowling_ball", "startFrame": 1, "endFrame": 275, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_ball.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bunny", "path": "sequences/bunny", "startFrame": 1, "endFrame": 705, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bunny.txt", "object_class": "mammal", 'occlusion': False}, {"name": "nfs_car", "path": "sequences/car", "startFrame": 1, "endFrame": 2020, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car.txt", "object_class": "car", 'occlusion': True}, {"name": "nfs_car_camaro", "path": "sequences/car_camaro", "startFrame": 1, "endFrame": 36, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_camaro.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_drifting", "path": "sequences/car_drifting", "startFrame": 1, "endFrame": 173, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_drifting.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_jumping", "path": "sequences/car_jumping", "startFrame": 1, "endFrame": 22, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_jumping.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_rc_rolling", "path": "sequences/car_rc_rolling", "startFrame": 1, "endFrame": 62, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_rc_rolling.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_rc_rotating", "path": "sequences/car_rc_rotating", "startFrame": 1, "endFrame": 80, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_rc_rotating.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_side", "path": "sequences/car_side", "startFrame": 1, "endFrame": 108, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_side.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_white", "path": "sequences/car_white", "startFrame": 1, "endFrame": 2063, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_white.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_cheetah", "path": "sequences/cheetah", "startFrame": 1, "endFrame": 167, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cheetah.txt", "object_class": "mammal", 'occlusion': True}, {"name": "nfs_cup", "path": "sequences/cup", "startFrame": 1, "endFrame": 1281, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cup.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_cup_2", "path": "sequences/cup_2", "startFrame": 1, "endFrame": 182, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cup_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_dog", "path": "sequences/dog", "startFrame": 1, "endFrame": 1030, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dog_1", "path": "sequences/dog_1", "startFrame": 1, "endFrame": 168, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_1.txt", "object_class": "dog", 'occlusion': False}, {"name": "nfs_dog_2", "path": "sequences/dog_2", "startFrame": 1, "endFrame": 594, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_2.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dog_3", "path": "sequences/dog_3", "startFrame": 1, "endFrame": 200, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_3.txt", "object_class": "dog", 'occlusion': False}, {"name": "nfs_dogs", "path": "sequences/dogs", "startFrame": 1, "endFrame": 198, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dogs.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dollar", "path": "sequences/dollar", "startFrame": 1, "endFrame": 1426, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dollar.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_drone", "path": "sequences/drone", "startFrame": 1, "endFrame": 70, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_drone.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_ducks_lake", "path": "sequences/ducks_lake", "startFrame": 1, "endFrame": 107, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_ducks_lake.txt", "object_class": "bird", 'occlusion': False}, {"name": "nfs_exit", "path": "sequences/exit", "startFrame": 1, "endFrame": 359, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_exit.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_first", "path": "sequences/first", "startFrame": 1, "endFrame": 435, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_first.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_flower", "path": "sequences/flower", "startFrame": 1, "endFrame": 448, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_flower.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_footbal_skill", "path": "sequences/footbal_skill", "startFrame": 1, "endFrame": 131, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_footbal_skill.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_helicopter", "path": "sequences/helicopter", "startFrame": 1, "endFrame": 310, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_helicopter.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_horse_jumping", "path": "sequences/horse_jumping", "startFrame": 1, "endFrame": 117, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_horse_jumping.txt", "object_class": "horse", 'occlusion': True}, {"name": "nfs_horse_running", "path": "sequences/horse_running", "startFrame": 1, "endFrame": 139, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_horse_running.txt", "object_class": "horse", 'occlusion': False}, {"name": "nfs_iceskating_6", "path": "sequences/iceskating_6", "startFrame": 1, "endFrame": 603, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_iceskating_6.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_jellyfish_5", "path": "sequences/jellyfish_5", "startFrame": 1, "endFrame": 746, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_jellyfish_5.txt", "object_class": "invertebrate", 'occlusion': False}, {"name": "nfs_kid_swing", "path": "sequences/kid_swing", "startFrame": 1, "endFrame": 169, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_kid_swing.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_motorcross", "path": "sequences/motorcross", "startFrame": 1, "endFrame": 39, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_motorcross.txt", "object_class": "vehicle", 'occlusion': True}, {"name": "nfs_motorcross_kawasaki", "path": "sequences/motorcross_kawasaki", "startFrame": 1, "endFrame": 65, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_motorcross_kawasaki.txt", "object_class": "vehicle", 'occlusion': False}, {"name": "nfs_parkour", "path": "sequences/parkour", "startFrame": 1, "endFrame": 58, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_parkour.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_person_scooter", "path": "sequences/person_scooter", "startFrame": 1, "endFrame": 413, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_person_scooter.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_pingpong_2", "path": "sequences/pingpong_2", "startFrame": 1, "endFrame": 1277, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_pingpong_7", "path": "sequences/pingpong_7", "startFrame": 1, "endFrame": 1290, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_7.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_pingpong_8", "path": "sequences/pingpong_8", "startFrame": 1, "endFrame": 296, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_8.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_purse", "path": "sequences/purse", "startFrame": 1, "endFrame": 968, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_purse.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_rubber", "path": "sequences/rubber", "startFrame": 1, "endFrame": 1328, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_rubber.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_running", "path": "sequences/running", "startFrame": 1, "endFrame": 677, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_running_100_m", "path": "sequences/running_100_m", "startFrame": 1, "endFrame": 313, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_100_m.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_running_100_m_2", "path": "sequences/running_100_m_2", "startFrame": 1, "endFrame": 337, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_100_m_2.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_running_2", "path": "sequences/running_2", "startFrame": 1, "endFrame": 363, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_shuffleboard_1", "path": "sequences/shuffleboard_1", "startFrame": 1, "endFrame": 42, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_1.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_2", "path": "sequences/shuffleboard_2", "startFrame": 1, "endFrame": 41, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_4", "path": "sequences/shuffleboard_4", "startFrame": 1, "endFrame": 62, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_4.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_5", "path": "sequences/shuffleboard_5", "startFrame": 1, "endFrame": 32, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_5.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_6", "path": "sequences/shuffleboard_6", "startFrame": 1, "endFrame": 52, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_6.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_2", "path": "sequences/shuffletable_2", "startFrame": 1, "endFrame": 372, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_3", "path": "sequences/shuffletable_3", "startFrame": 1, "endFrame": 368, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_3.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_4", "path": "sequences/shuffletable_4", "startFrame": 1, "endFrame": 101, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_4.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_ski_long", "path": "sequences/ski_long", "startFrame": 1, "endFrame": 274, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_ski_long.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_soccer_ball", "path": "sequences/soccer_ball", "startFrame": 1, "endFrame": 163, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_ball_2", "path": "sequences/soccer_ball_2", "startFrame": 1, "endFrame": 1934, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_ball_3", "path": "sequences/soccer_ball_3", "startFrame": 1, "endFrame": 1381, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_player_2", "path": "sequences/soccer_player_2", "startFrame": 1, "endFrame": 475, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_player_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_soccer_player_3", "path": "sequences/soccer_player_3", "startFrame": 1, "endFrame": 319, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_player_3.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_stop_sign", "path": "sequences/stop_sign", "startFrame": 1, "endFrame": 302, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_stop_sign.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_suv", "path": "sequences/suv", "startFrame": 1, "endFrame": 2584, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_suv.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_tiger", "path": "sequences/tiger", "startFrame": 1, "endFrame": 1556, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_tiger.txt", "object_class": "mammal", 'occlusion': False}, {"name": "nfs_walking", "path": "sequences/walking", "startFrame": 1, "endFrame": 555, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_walking.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_walking_3", "path": "sequences/walking_3", "startFrame": 1, "endFrame": 1427, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_walking_3.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_water_ski_2", "path": "sequences/water_ski_2", "startFrame": 1, "endFrame": 47, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_water_ski_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_yoyo", "path": "sequences/yoyo", "startFrame": 1, "endFrame": 67, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_yoyo.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_zebra_fish", "path": "sequences/zebra_fish", "startFrame": 1, "endFrame": 671, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_zebra_fish.txt", "object_class": "fish", 'occlusion': False}, ] return sequence_info_list ================================================ FILE: external/AR/pytracking/evaluation/otbdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class OTBDataset(BaseDataset): """ OTB-2015 dataset Publication: Object Tracking Benchmark Wu, Yi, Jongwoo Lim, and Ming-hsuan Yan TPAMI, 2015 http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.otb_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) # NOTE: OTB has some weird annos which panda cannot handle ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "Basketball", "path": "Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "Basketball/groundtruth_rect.txt", "object_class": "person"}, {"name": "Biker", "path": "Biker/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "Biker/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Bird1", "path": "Bird1/img", "startFrame": 1, "endFrame": 408, "nz": 4, "ext": "jpg", "anno_path": "Bird1/groundtruth_rect.txt", "object_class": "bird"}, {"name": "Bird2", "path": "Bird2/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "Bird2/groundtruth_rect.txt", "object_class": "bird"}, {"name": "BlurBody", "path": "BlurBody/img", "startFrame": 1, "endFrame": 334, "nz": 4, "ext": "jpg", "anno_path": "BlurBody/groundtruth_rect.txt", "object_class": "person"}, {"name": "BlurCar1", "path": "BlurCar1/img", "startFrame": 247, "endFrame": 988, "nz": 4, "ext": "jpg", "anno_path": "BlurCar1/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar2", "path": "BlurCar2/img", "startFrame": 1, "endFrame": 585, "nz": 4, "ext": "jpg", "anno_path": "BlurCar2/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar3", "path": "BlurCar3/img", "startFrame": 3, "endFrame": 359, "nz": 4, "ext": "jpg", "anno_path": "BlurCar3/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar4", "path": "BlurCar4/img", "startFrame": 18, "endFrame": 397, "nz": 4, "ext": "jpg", "anno_path": "BlurCar4/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurFace", "path": "BlurFace/img", "startFrame": 1, "endFrame": 493, "nz": 4, "ext": "jpg", "anno_path": "BlurFace/groundtruth_rect.txt", "object_class": "face"}, {"name": "BlurOwl", "path": "BlurOwl/img", "startFrame": 1, "endFrame": 631, "nz": 4, "ext": "jpg", "anno_path": "BlurOwl/groundtruth_rect.txt", "object_class": "other"}, {"name": "Board", "path": "Board/img", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "Board/groundtruth_rect.txt", "object_class": "other"}, {"name": "Bolt", "path": "Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "Bolt/groundtruth_rect.txt", "object_class": "person"}, {"name": "Bolt2", "path": "Bolt2/img", "startFrame": 1, "endFrame": 293, "nz": 4, "ext": "jpg", "anno_path": "Bolt2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Box", "path": "Box/img", "startFrame": 1, "endFrame": 1161, "nz": 4, "ext": "jpg", "anno_path": "Box/groundtruth_rect.txt", "object_class": "other"}, {"name": "Boy", "path": "Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "Boy/groundtruth_rect.txt", "object_class": "face"}, {"name": "Car1", "path": "Car1/img", "startFrame": 1, "endFrame": 1020, "nz": 4, "ext": "jpg", "anno_path": "Car1/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car2", "path": "Car2/img", "startFrame": 1, "endFrame": 913, "nz": 4, "ext": "jpg", "anno_path": "Car2/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car24", "path": "Car24/img", "startFrame": 1, "endFrame": 3059, "nz": 4, "ext": "jpg", "anno_path": "Car24/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car4", "path": "Car4/img", "startFrame": 1, "endFrame": 659, "nz": 4, "ext": "jpg", "anno_path": "Car4/groundtruth_rect.txt", "object_class": "car"}, {"name": "CarDark", "path": "CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "CarDark/groundtruth_rect.txt", "object_class": "car"}, {"name": "CarScale", "path": "CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "CarScale/groundtruth_rect.txt", "object_class": "car"}, {"name": "ClifBar", "path": "ClifBar/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "ClifBar/groundtruth_rect.txt", "object_class": "other"}, {"name": "Coke", "path": "Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "Coke/groundtruth_rect.txt", "object_class": "other"}, {"name": "Couple", "path": "Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "Couple/groundtruth_rect.txt", "object_class": "person"}, {"name": "Coupon", "path": "Coupon/img", "startFrame": 1, "endFrame": 327, "nz": 4, "ext": "jpg", "anno_path": "Coupon/groundtruth_rect.txt", "object_class": "other"}, {"name": "Crossing", "path": "Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "Crossing/groundtruth_rect.txt", "object_class": "person"}, {"name": "Crowds", "path": "Crowds/img", "startFrame": 1, "endFrame": 347, "nz": 4, "ext": "jpg", "anno_path": "Crowds/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dancer", "path": "Dancer/img", "startFrame": 1, "endFrame": 225, "nz": 4, "ext": "jpg", "anno_path": "Dancer/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dancer2", "path": "Dancer2/img", "startFrame": 1, "endFrame": 150, "nz": 4, "ext": "jpg", "anno_path": "Dancer2/groundtruth_rect.txt", "object_class": "person"}, {"name": "David", "path": "David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "David/groundtruth_rect.txt", "object_class": "face"}, {"name": "David2", "path": "David2/img", "startFrame": 1, "endFrame": 537, "nz": 4, "ext": "jpg", "anno_path": "David2/groundtruth_rect.txt", "object_class": "face"}, {"name": "David3", "path": "David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "David3/groundtruth_rect.txt", "object_class": "person"}, {"name": "Deer", "path": "Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "Deer/groundtruth_rect.txt", "object_class": "mammal"}, {"name": "Diving", "path": "Diving/img", "startFrame": 1, "endFrame": 215, "nz": 4, "ext": "jpg", "anno_path": "Diving/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dog", "path": "Dog/img", "startFrame": 1, "endFrame": 127, "nz": 4, "ext": "jpg", "anno_path": "Dog/groundtruth_rect.txt", "object_class": "dog"}, {"name": "Dog1", "path": "Dog1/img", "startFrame": 1, "endFrame": 1350, "nz": 4, "ext": "jpg", "anno_path": "Dog1/groundtruth_rect.txt", "object_class": "dog"}, {"name": "Doll", "path": "Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "Doll/groundtruth_rect.txt", "object_class": "other"}, {"name": "DragonBaby", "path": "DragonBaby/img", "startFrame": 1, "endFrame": 113, "nz": 4, "ext": "jpg", "anno_path": "DragonBaby/groundtruth_rect.txt", "object_class": "face"}, {"name": "Dudek", "path": "Dudek/img", "startFrame": 1, "endFrame": 1145, "nz": 4, "ext": "jpg", "anno_path": "Dudek/groundtruth_rect.txt", "object_class": "face"}, {"name": "FaceOcc1", "path": "FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc1/groundtruth_rect.txt", "object_class": "face"}, {"name": "FaceOcc2", "path": "FaceOcc2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc2/groundtruth_rect.txt", "object_class": "face"}, {"name": "Fish", "path": "Fish/img", "startFrame": 1, "endFrame": 476, "nz": 4, "ext": "jpg", "anno_path": "Fish/groundtruth_rect.txt", "object_class": "other"}, {"name": "FleetFace", "path": "FleetFace/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "FleetFace/groundtruth_rect.txt", "object_class": "face"}, {"name": "Football", "path": "Football/img", "startFrame": 1, "endFrame": 362, "nz": 4, "ext": "jpg", "anno_path": "Football/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Football1", "path": "Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "Football1/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman1", "path": "Freeman1/img", "startFrame": 1, "endFrame": 326, "nz": 4, "ext": "jpg", "anno_path": "Freeman1/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman3", "path": "Freeman3/img", "startFrame": 1, "endFrame": 460, "nz": 4, "ext": "jpg", "anno_path": "Freeman3/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman4", "path": "Freeman4/img", "startFrame": 1, "endFrame": 283, "nz": 4, "ext": "jpg", "anno_path": "Freeman4/groundtruth_rect.txt", "object_class": "face"}, {"name": "Girl", "path": "Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Girl/groundtruth_rect.txt", "object_class": "face"}, {"name": "Girl2", "path": "Girl2/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "Girl2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Gym", "path": "Gym/img", "startFrame": 1, "endFrame": 767, "nz": 4, "ext": "jpg", "anno_path": "Gym/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human2", "path": "Human2/img", "startFrame": 1, "endFrame": 1128, "nz": 4, "ext": "jpg", "anno_path": "Human2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human3", "path": "Human3/img", "startFrame": 1, "endFrame": 1698, "nz": 4, "ext": "jpg", "anno_path": "Human3/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human4_2", "path": "Human4/img", "startFrame": 1, "endFrame": 667, "nz": 4, "ext": "jpg", "anno_path": "Human4/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Human5", "path": "Human5/img", "startFrame": 1, "endFrame": 713, "nz": 4, "ext": "jpg", "anno_path": "Human5/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human6", "path": "Human6/img", "startFrame": 1, "endFrame": 792, "nz": 4, "ext": "jpg", "anno_path": "Human6/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human7", "path": "Human7/img", "startFrame": 1, "endFrame": 250, "nz": 4, "ext": "jpg", "anno_path": "Human7/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human8", "path": "Human8/img", "startFrame": 1, "endFrame": 128, "nz": 4, "ext": "jpg", "anno_path": "Human8/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human9", "path": "Human9/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "Human9/groundtruth_rect.txt", "object_class": "person"}, {"name": "Ironman", "path": "Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "Ironman/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Jogging_1", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.1.txt", "object_class": "person"}, {"name": "Jogging_2", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Jump", "path": "Jump/img", "startFrame": 1, "endFrame": 122, "nz": 4, "ext": "jpg", "anno_path": "Jump/groundtruth_rect.txt", "object_class": "person"}, {"name": "Jumping", "path": "Jumping/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "Jumping/groundtruth_rect.txt", "object_class": "face"}, {"name": "KiteSurf", "path": "KiteSurf/img", "startFrame": 1, "endFrame": 84, "nz": 4, "ext": "png", "anno_path": "KiteSurf/groundtruth_rect.txt", "object_class": "face"}, {"name": "Lemming", "path": "Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "Lemming/groundtruth_rect.txt", "object_class": "other"}, {"name": "Liquor", "path": "Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "Liquor/groundtruth_rect.txt", "object_class": "other"}, {"name": "Man", "path": "Man/img", "startFrame": 1, "endFrame": 134, "nz": 4, "ext": "jpg", "anno_path": "Man/groundtruth_rect.txt", "object_class": "face"}, {"name": "Matrix", "path": "Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "Matrix/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Mhyang", "path": "Mhyang/img", "startFrame": 1, "endFrame": 1490, "nz": 4, "ext": "jpg", "anno_path": "Mhyang/groundtruth_rect.txt", "object_class": "face"}, {"name": "MotorRolling", "path": "MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "MotorRolling/groundtruth_rect.txt", "object_class": "vehicle"}, {"name": "MountainBike", "path": "MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "MountainBike/groundtruth_rect.txt", "object_class": "bicycle"}, {"name": "Panda", "path": "Panda/img", "startFrame": 1, "endFrame": 1000, "nz": 4, "ext": "jpg", "anno_path": "Panda/groundtruth_rect.txt", "object_class": "mammal"}, {"name": "RedTeam", "path": "RedTeam/img", "startFrame": 1, "endFrame": 1918, "nz": 4, "ext": "jpg", "anno_path": "RedTeam/groundtruth_rect.txt", "object_class": "vehicle"}, {"name": "Rubik", "path": "Rubik/img", "startFrame": 1, "endFrame": 1997, "nz": 4, "ext": "jpg", "anno_path": "Rubik/groundtruth_rect.txt", "object_class": "other"}, {"name": "Shaking", "path": "Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Shaking/groundtruth_rect.txt", "object_class": "face"}, {"name": "Singer1", "path": "Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "Singer1/groundtruth_rect.txt", "object_class": "person"}, {"name": "Singer2", "path": "Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "Singer2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skater", "path": "Skater/img", "startFrame": 1, "endFrame": 160, "nz": 4, "ext": "jpg", "anno_path": "Skater/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skater2", "path": "Skater2/img", "startFrame": 1, "endFrame": 435, "nz": 4, "ext": "jpg", "anno_path": "Skater2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skating1", "path": "Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "Skating1/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skating2_1", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.1.txt", "object_class": "person"}, {"name": "Skating2_2", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Skiing", "path": "Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "Skiing/groundtruth_rect.txt", "object_class": "person"}, {"name": "Soccer", "path": "Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "Soccer/groundtruth_rect.txt", "object_class": "face"}, {"name": "Subway", "path": "Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "Subway/groundtruth_rect.txt", "object_class": "person"}, {"name": "Surfer", "path": "Surfer/img", "startFrame": 1, "endFrame": 376, "nz": 4, "ext": "jpg", "anno_path": "Surfer/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Suv", "path": "Suv/img", "startFrame": 1, "endFrame": 945, "nz": 4, "ext": "jpg", "anno_path": "Suv/groundtruth_rect.txt", "object_class": "car"}, {"name": "Sylvester", "path": "Sylvester/img", "startFrame": 1, "endFrame": 1345, "nz": 4, "ext": "jpg", "anno_path": "Sylvester/groundtruth_rect.txt", "object_class": "other"}, {"name": "Tiger1", "path": "Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "Tiger1/groundtruth_rect.txt", "initOmit": 5, "object_class": "other"}, {"name": "Tiger2", "path": "Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Tiger2/groundtruth_rect.txt", "object_class": "other"}, {"name": "Toy", "path": "Toy/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Toy/groundtruth_rect.txt", "object_class": "other"}, {"name": "Trans", "path": "Trans/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "Trans/groundtruth_rect.txt", "object_class": "other"}, {"name": "Trellis", "path": "Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "Trellis/groundtruth_rect.txt", "object_class": "face"}, {"name": "Twinnings", "path": "Twinnings/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "Twinnings/groundtruth_rect.txt", "object_class": "other"}, {"name": "Vase", "path": "Vase/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Vase/groundtruth_rect.txt", "object_class": "other"}, {"name": "Walking", "path": "Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "Walking/groundtruth_rect.txt", "object_class": "person"}, {"name": "Walking2", "path": "Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Walking2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Woman", "path": "Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "Woman/groundtruth_rect.txt", "object_class": "person"} ] return sequence_info_list ================================================ FILE: external/AR/pytracking/evaluation/running.py ================================================ import numpy as np import multiprocessing import os import sys from itertools import product from collections import OrderedDict from pytracking.evaluation import Sequence, Tracker from ltr.data.image_loader import imwrite_indexed def _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict): """Saves the output of the tracker.""" if not os.path.exists(tracker.results_dir): os.makedirs(tracker.results_dir) base_results_path = os.path.join(tracker.results_dir, seq.name) segmentation_path = os.path.join(tracker.segmentation_dir, seq.name) frame_names = [os.path.splitext(os.path.basename(f))[0] for f in seq.frames] def save_bb(file, data): tracked_bb = np.array(data).astype(int) np.savetxt(file, tracked_bb, delimiter='\t', fmt='%d') def save_time(file, data): exec_times = np.array(data).astype(float) np.savetxt(file, exec_times, delimiter='\t', fmt='%f') def _convert_dict(input_dict): data_dict = {} for elem in input_dict: for k, v in elem.items(): if k in data_dict.keys(): data_dict[k].append(v) else: data_dict[k] = [v, ] return data_dict for key, data in output.items(): # If data is empty if not data: continue if key == 'target_bbox': if isinstance(data[0], (dict, OrderedDict)): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): bbox_file = '{}_{}.txt'.format(base_results_path, obj_id) save_bb(bbox_file, d) else: # Single-object mode bbox_file = '{}.txt'.format(base_results_path) save_bb(bbox_file, data) elif key == 'time': if isinstance(data[0], dict): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id) save_time(timings_file, d) else: timings_file = '{}_time.txt'.format(base_results_path) save_time(timings_file, data) elif key == 'segmentation': assert len(frame_names) == len(data) if not os.path.exists(segmentation_path): os.makedirs(segmentation_path) for frame_name, frame_seg in zip(frame_names, data): imwrite_indexed(os.path.join(segmentation_path, '{}.png'.format(frame_name)), frame_seg) def run_sequence(seq: Sequence, tracker: Tracker, debug=False, visdom_info=None): """Runs a tracker on a sequence.""" def _results_exist(): if seq.object_ids is None: bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name) return os.path.isfile(bbox_file) else: bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids] missing = [not os.path.isfile(f) for f in bbox_files] return sum(missing) == 0 visdom_info = {} if visdom_info is None else visdom_info if _results_exist() and not debug: print('FPS: {}'.format(-1)) return print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name)) if debug: output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info) else: try: output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info) except Exception as e: print(e) return sys.stdout.flush() if isinstance(output['time'][0], (dict, OrderedDict)): exec_time = sum([sum(times.values()) for times in output['time']]) num_frames = len(output['time']) else: exec_time = sum(output['time']) num_frames = len(output['time']) print('FPS: {}'.format(num_frames / exec_time)) if not debug: _save_tracker_output(seq, tracker, output) def run_dataset(dataset, trackers, debug=False, threads=0, visdom_info=None): """Runs a list of trackers on a dataset. args: dataset: List of Sequence instances, forming a dataset. trackers: List of Tracker instances. debug: Debug level. threads: Number of threads to use (default 0). visdom_info: Dict containing information about the server for visdom """ multiprocessing.set_start_method('spawn', force=True) print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset))) multiprocessing.set_start_method('spawn', force=True) visdom_info = {} if visdom_info is None else visdom_info if threads == 0: mode = 'sequential' else: mode = 'parallel' if mode == 'sequential': for seq in dataset: for tracker_info in trackers: run_sequence(seq, tracker_info, debug=debug, visdom_info=visdom_info) elif mode == 'parallel': param_list = [(seq, tracker_info, debug, visdom_info) for seq, tracker_info in product(dataset, trackers)] with multiprocessing.Pool(processes=threads) as pool: pool.starmap(run_sequence, param_list) print('Done') ================================================ FILE: external/AR/pytracking/evaluation/tpldataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class TPLDataset(BaseDataset): """ Temple Color 128 dataset Publication: Encoding Color Information for Visual Tracking: Algorithms and Benchmark P. Liang, E. Blasch, and H. Ling TIP, 2015 http://www.dabi.temple.edu/~hbling/publication/TColor-128.pdf Download the dataset from http://www.dabi.temple.edu/~hbling/data/TColor-128/TColor-128.html """ def __init__(self, exclude_otb=False): """ args: exclude_otb (bool) - If True, sequences overlapping with the OTB dataset are excluded """ super().__init__() self.base_path = self.env_settings.tpl_path self.sequence_info_list = self._get_sequence_info_list(exclude_otb) def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'tpl', ground_truth_rect[init_omit:,:]) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self, exclude_otb=False): sequence_info_list = [ {"name": "tpl_Skating2", "path": "tpl_Skating2/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating2/Skating2_gt.txt"}, {"name": "tpl_Pool_ce3", "path": "tpl_Pool_ce3/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "tpl_Pool_ce3/Pool_ce3_gt.txt"}, {"name": "tpl_Microphone_ce1", "path": "tpl_Microphone_ce1/img", "startFrame": 1, "endFrame": 204, "nz": 4, "ext": "jpg", "anno_path": "tpl_Microphone_ce1/Microphone_ce1_gt.txt"}, {"name": "tpl_Torus", "path": "tpl_Torus/img", "startFrame": 1, "endFrame": 264, "nz": 4, "ext": "jpg", "anno_path": "tpl_Torus/Torus_gt.txt"}, {"name": "tpl_Lemming", "path": "tpl_Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "tpl_Lemming/Lemming_gt.txt"}, {"name": "tpl_Eagle_ce", "path": "tpl_Eagle_ce/img", "startFrame": 1, "endFrame": 112, "nz": 4, "ext": "jpg", "anno_path": "tpl_Eagle_ce/Eagle_ce_gt.txt"}, {"name": "tpl_Skating_ce2", "path": "tpl_Skating_ce2/img", "startFrame": 1, "endFrame": 497, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating_ce2/Skating_ce2_gt.txt"}, {"name": "tpl_Yo_yos_ce3", "path": "tpl_Yo_yos_ce3/img", "startFrame": 1, "endFrame": 201, "nz": 4, "ext": "jpg", "anno_path": "tpl_Yo_yos_ce3/Yo-yos_ce3_gt.txt"}, {"name": "tpl_Board", "path": "tpl_Board/img", "startFrame": 1, "endFrame": 598, "nz": 4, "ext": "jpg", "anno_path": "tpl_Board/Board_gt.txt"}, {"name": "tpl_Tennis_ce3", "path": "tpl_Tennis_ce3/img", "startFrame": 1, "endFrame": 204, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tennis_ce3/Tennis_ce3_gt.txt"}, {"name": "tpl_SuperMario_ce", "path": "tpl_SuperMario_ce/img", "startFrame": 1, "endFrame": 146, "nz": 4, "ext": "jpg", "anno_path": "tpl_SuperMario_ce/SuperMario_ce_gt.txt"}, {"name": "tpl_Yo_yos_ce1", "path": "tpl_Yo_yos_ce1/img", "startFrame": 1, "endFrame": 235, "nz": 4, "ext": "jpg", "anno_path": "tpl_Yo_yos_ce1/Yo-yos_ce1_gt.txt"}, {"name": "tpl_Soccer", "path": "tpl_Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "tpl_Soccer/Soccer_gt.txt"}, {"name": "tpl_Fish_ce2", "path": "tpl_Fish_ce2/img", "startFrame": 1, "endFrame": 573, "nz": 4, "ext": "jpg", "anno_path": "tpl_Fish_ce2/Fish_ce2_gt.txt"}, {"name": "tpl_Liquor", "path": "tpl_Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "tpl_Liquor/Liquor_gt.txt"}, {"name": "tpl_Plane_ce2", "path": "tpl_Plane_ce2/img", "startFrame": 1, "endFrame": 653, "nz": 4, "ext": "jpg", "anno_path": "tpl_Plane_ce2/Plane_ce2_gt.txt"}, {"name": "tpl_Couple", "path": "tpl_Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "tpl_Couple/Couple_gt.txt"}, {"name": "tpl_Logo_ce", "path": "tpl_Logo_ce/img", "startFrame": 1, "endFrame": 610, "nz": 4, "ext": "jpg", "anno_path": "tpl_Logo_ce/Logo_ce_gt.txt"}, {"name": "tpl_Hand_ce2", "path": "tpl_Hand_ce2/img", "startFrame": 1, "endFrame": 251, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hand_ce2/Hand_ce2_gt.txt"}, {"name": "tpl_Kite_ce2", "path": "tpl_Kite_ce2/img", "startFrame": 1, "endFrame": 658, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kite_ce2/Kite_ce2_gt.txt"}, {"name": "tpl_Walking", "path": "tpl_Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "tpl_Walking/Walking_gt.txt"}, {"name": "tpl_David", "path": "tpl_David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "tpl_David/David_gt.txt"}, {"name": "tpl_Boat_ce1", "path": "tpl_Boat_ce1/img", "startFrame": 1, "endFrame": 377, "nz": 4, "ext": "jpg", "anno_path": "tpl_Boat_ce1/Boat_ce1_gt.txt"}, {"name": "tpl_Airport_ce", "path": "tpl_Airport_ce/img", "startFrame": 1, "endFrame": 148, "nz": 4, "ext": "jpg", "anno_path": "tpl_Airport_ce/Airport_ce_gt.txt"}, {"name": "tpl_Tiger2", "path": "tpl_Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tiger2/Tiger2_gt.txt"}, {"name": "tpl_Suitcase_ce", "path": "tpl_Suitcase_ce/img", "startFrame": 1, "endFrame": 184, "nz": 4, "ext": "jpg", "anno_path": "tpl_Suitcase_ce/Suitcase_ce_gt.txt"}, {"name": "tpl_TennisBall_ce", "path": "tpl_TennisBall_ce/img", "startFrame": 1, "endFrame": 288, "nz": 4, "ext": "jpg", "anno_path": "tpl_TennisBall_ce/TennisBall_ce_gt.txt"}, {"name": "tpl_Singer_ce1", "path": "tpl_Singer_ce1/img", "startFrame": 1, "endFrame": 214, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer_ce1/Singer_ce1_gt.txt"}, {"name": "tpl_Pool_ce2", "path": "tpl_Pool_ce2/img", "startFrame": 1, "endFrame": 133, "nz": 4, "ext": "jpg", "anno_path": "tpl_Pool_ce2/Pool_ce2_gt.txt"}, {"name": "tpl_Surf_ce3", "path": "tpl_Surf_ce3/img", "startFrame": 1, "endFrame": 279, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce3/Surf_ce3_gt.txt"}, {"name": "tpl_Bird", "path": "tpl_Bird/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bird/Bird_gt.txt"}, {"name": "tpl_Crossing", "path": "tpl_Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "tpl_Crossing/Crossing_gt.txt"}, {"name": "tpl_Plate_ce1", "path": "tpl_Plate_ce1/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "tpl_Plate_ce1/Plate_ce1_gt.txt"}, {"name": "tpl_Cup", "path": "tpl_Cup/img", "startFrame": 1, "endFrame": 303, "nz": 4, "ext": "jpg", "anno_path": "tpl_Cup/Cup_gt.txt"}, {"name": "tpl_Surf_ce2", "path": "tpl_Surf_ce2/img", "startFrame": 1, "endFrame": 391, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce2/Surf_ce2_gt.txt"}, {"name": "tpl_Busstation_ce2", "path": "tpl_Busstation_ce2/img", "startFrame": 6, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "tpl_Busstation_ce2/Busstation_ce2_gt.txt"}, {"name": "tpl_Charger_ce", "path": "tpl_Charger_ce/img", "startFrame": 1, "endFrame": 298, "nz": 4, "ext": "jpg", "anno_path": "tpl_Charger_ce/Charger_ce_gt.txt"}, {"name": "tpl_Pool_ce1", "path": "tpl_Pool_ce1/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "tpl_Pool_ce1/Pool_ce1_gt.txt"}, {"name": "tpl_MountainBike", "path": "tpl_MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "tpl_MountainBike/MountainBike_gt.txt"}, {"name": "tpl_Guitar_ce1", "path": "tpl_Guitar_ce1/img", "startFrame": 1, "endFrame": 268, "nz": 4, "ext": "jpg", "anno_path": "tpl_Guitar_ce1/Guitar_ce1_gt.txt"}, {"name": "tpl_Busstation_ce1", "path": "tpl_Busstation_ce1/img", "startFrame": 1, "endFrame": 363, "nz": 4, "ext": "jpg", "anno_path": "tpl_Busstation_ce1/Busstation_ce1_gt.txt"}, {"name": "tpl_Diving", "path": "tpl_Diving/img", "startFrame": 1, "endFrame": 231, "nz": 4, "ext": "jpg", "anno_path": "tpl_Diving/Diving_gt.txt"}, {"name": "tpl_Skating_ce1", "path": "tpl_Skating_ce1/img", "startFrame": 1, "endFrame": 409, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating_ce1/Skating_ce1_gt.txt"}, {"name": "tpl_Hurdle_ce2", "path": "tpl_Hurdle_ce2/img", "startFrame": 27, "endFrame": 330, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hurdle_ce2/Hurdle_ce2_gt.txt"}, {"name": "tpl_Plate_ce2", "path": "tpl_Plate_ce2/img", "startFrame": 1, "endFrame": 181, "nz": 4, "ext": "jpg", "anno_path": "tpl_Plate_ce2/Plate_ce2_gt.txt"}, {"name": "tpl_CarDark", "path": "tpl_CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "tpl_CarDark/CarDark_gt.txt"}, {"name": "tpl_Singer_ce2", "path": "tpl_Singer_ce2/img", "startFrame": 1, "endFrame": 999, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer_ce2/Singer_ce2_gt.txt"}, {"name": "tpl_Shaking", "path": "tpl_Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "tpl_Shaking/Shaking_gt.txt"}, {"name": "tpl_Iceskater", "path": "tpl_Iceskater/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Iceskater/Iceskater_gt.txt"}, {"name": "tpl_Badminton_ce2", "path": "tpl_Badminton_ce2/img", "startFrame": 1, "endFrame": 705, "nz": 4, "ext": "jpg", "anno_path": "tpl_Badminton_ce2/Badminton_ce2_gt.txt"}, {"name": "tpl_Spiderman_ce", "path": "tpl_Spiderman_ce/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "tpl_Spiderman_ce/Spiderman_ce_gt.txt"}, {"name": "tpl_Kite_ce1", "path": "tpl_Kite_ce1/img", "startFrame": 1, "endFrame": 484, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kite_ce1/Kite_ce1_gt.txt"}, {"name": "tpl_Skyjumping_ce", "path": "tpl_Skyjumping_ce/img", "startFrame": 1, "endFrame": 938, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skyjumping_ce/Skyjumping_ce_gt.txt"}, {"name": "tpl_Ball_ce1", "path": "tpl_Ball_ce1/img", "startFrame": 1, "endFrame": 391, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce1/Ball_ce1_gt.txt"}, {"name": "tpl_Yo_yos_ce2", "path": "tpl_Yo_yos_ce2/img", "startFrame": 1, "endFrame": 454, "nz": 4, "ext": "jpg", "anno_path": "tpl_Yo_yos_ce2/Yo-yos_ce2_gt.txt"}, {"name": "tpl_Ironman", "path": "tpl_Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ironman/Ironman_gt.txt"}, {"name": "tpl_FaceOcc1", "path": "tpl_FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "tpl_FaceOcc1/FaceOcc1_gt.txt"}, {"name": "tpl_Surf_ce1", "path": "tpl_Surf_ce1/img", "startFrame": 1, "endFrame": 404, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce1/Surf_ce1_gt.txt"}, {"name": "tpl_Ring_ce", "path": "tpl_Ring_ce/img", "startFrame": 1, "endFrame": 201, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ring_ce/Ring_ce_gt.txt"}, {"name": "tpl_Surf_ce4", "path": "tpl_Surf_ce4/img", "startFrame": 1, "endFrame": 135, "nz": 4, "ext": "jpg", "anno_path": "tpl_Surf_ce4/Surf_ce4_gt.txt"}, {"name": "tpl_Ball_ce4", "path": "tpl_Ball_ce4/img", "startFrame": 1, "endFrame": 538, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce4/Ball_ce4_gt.txt"}, {"name": "tpl_Bikeshow_ce", "path": "tpl_Bikeshow_ce/img", "startFrame": 1, "endFrame": 361, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bikeshow_ce/Bikeshow_ce_gt.txt"}, {"name": "tpl_Kobe_ce", "path": "tpl_Kobe_ce/img", "startFrame": 1, "endFrame": 582, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kobe_ce/Kobe_ce_gt.txt"}, {"name": "tpl_Tiger1", "path": "tpl_Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tiger1/Tiger1_gt.txt"}, {"name": "tpl_Skiing", "path": "tpl_Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skiing/Skiing_gt.txt"}, {"name": "tpl_Tennis_ce1", "path": "tpl_Tennis_ce1/img", "startFrame": 1, "endFrame": 454, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tennis_ce1/Tennis_ce1_gt.txt"}, {"name": "tpl_Carchasing_ce4", "path": "tpl_Carchasing_ce4/img", "startFrame": 1, "endFrame": 442, "nz": 4, "ext": "jpg", "anno_path": "tpl_Carchasing_ce4/Carchasing_ce4_gt.txt"}, {"name": "tpl_Walking2", "path": "tpl_Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Walking2/Walking2_gt.txt"}, {"name": "tpl_Sailor_ce", "path": "tpl_Sailor_ce/img", "startFrame": 1, "endFrame": 402, "nz": 4, "ext": "jpg", "anno_path": "tpl_Sailor_ce/Sailor_ce_gt.txt"}, {"name": "tpl_Railwaystation_ce", "path": "tpl_Railwaystation_ce/img", "startFrame": 1, "endFrame": 413, "nz": 4, "ext": "jpg", "anno_path": "tpl_Railwaystation_ce/Railwaystation_ce_gt.txt"}, {"name": "tpl_Bee_ce", "path": "tpl_Bee_ce/img", "startFrame": 1, "endFrame": 90, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bee_ce/Bee_ce_gt.txt"}, {"name": "tpl_Girl", "path": "tpl_Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Girl/Girl_gt.txt"}, {"name": "tpl_Subway", "path": "tpl_Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "tpl_Subway/Subway_gt.txt"}, {"name": "tpl_David3", "path": "tpl_David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "tpl_David3/David3_gt.txt"}, {"name": "tpl_Electricalbike_ce", "path": "tpl_Electricalbike_ce/img", "startFrame": 1, "endFrame": 818, "nz": 4, "ext": "jpg", "anno_path": "tpl_Electricalbike_ce/Electricalbike_ce_gt.txt"}, {"name": "tpl_Michaeljackson_ce", "path": "tpl_Michaeljackson_ce/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "tpl_Michaeljackson_ce/Michaeljackson_ce_gt.txt"}, {"name": "tpl_Woman", "path": "tpl_Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "tpl_Woman/Woman_gt.txt"}, {"name": "tpl_TableTennis_ce", "path": "tpl_TableTennis_ce/img", "startFrame": 1, "endFrame": 198, "nz": 4, "ext": "jpg", "anno_path": "tpl_TableTennis_ce/TableTennis_ce_gt.txt"}, {"name": "tpl_Motorbike_ce", "path": "tpl_Motorbike_ce/img", "startFrame": 1, "endFrame": 563, "nz": 4, "ext": "jpg", "anno_path": "tpl_Motorbike_ce/Motorbike_ce_gt.txt"}, {"name": "tpl_Baby_ce", "path": "tpl_Baby_ce/img", "startFrame": 1, "endFrame": 296, "nz": 4, "ext": "jpg", "anno_path": "tpl_Baby_ce/Baby_ce_gt.txt"}, {"name": "tpl_Gym", "path": "tpl_Gym/img", "startFrame": 1, "endFrame": 766, "nz": 4, "ext": "jpg", "anno_path": "tpl_Gym/Gym_gt.txt"}, {"name": "tpl_Matrix", "path": "tpl_Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "tpl_Matrix/Matrix_gt.txt"}, {"name": "tpl_Kite_ce3", "path": "tpl_Kite_ce3/img", "startFrame": 1, "endFrame": 528, "nz": 4, "ext": "jpg", "anno_path": "tpl_Kite_ce3/Kite_ce3_gt.txt"}, {"name": "tpl_Fish_ce1", "path": "tpl_Fish_ce1/img", "startFrame": 1, "endFrame": 401, "nz": 4, "ext": "jpg", "anno_path": "tpl_Fish_ce1/Fish_ce1_gt.txt"}, {"name": "tpl_Hand_ce1", "path": "tpl_Hand_ce1/img", "startFrame": 1, "endFrame": 401, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hand_ce1/Hand_ce1_gt.txt"}, {"name": "tpl_Doll", "path": "tpl_Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "tpl_Doll/Doll_gt.txt"}, {"name": "tpl_Carchasing_ce3", "path": "tpl_Carchasing_ce3/img", "startFrame": 1, "endFrame": 572, "nz": 4, "ext": "jpg", "anno_path": "tpl_Carchasing_ce3/Carchasing_ce3_gt.txt"}, {"name": "tpl_Thunder_ce", "path": "tpl_Thunder_ce/img", "startFrame": 1, "endFrame": 375, "nz": 4, "ext": "jpg", "anno_path": "tpl_Thunder_ce/Thunder_ce_gt.txt"}, {"name": "tpl_Singer2", "path": "tpl_Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer2/Singer2_gt.txt"}, {"name": "tpl_Basketball", "path": "tpl_Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball/Basketball_gt.txt"}, {"name": "tpl_Hand", "path": "tpl_Hand/img", "startFrame": 1, "endFrame": 244, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hand/Hand_gt.txt"}, {"name": "tpl_Cup_ce", "path": "tpl_Cup_ce/img", "startFrame": 1, "endFrame": 338, "nz": 4, "ext": "jpg", "anno_path": "tpl_Cup_ce/Cup_ce_gt.txt"}, {"name": "tpl_MotorRolling", "path": "tpl_MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "tpl_MotorRolling/MotorRolling_gt.txt"}, {"name": "tpl_Boat_ce2", "path": "tpl_Boat_ce2/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "tpl_Boat_ce2/Boat_ce2_gt.txt"}, {"name": "tpl_CarScale", "path": "tpl_CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "tpl_CarScale/CarScale_gt.txt"}, {"name": "tpl_Sunshade", "path": "tpl_Sunshade/img", "startFrame": 1, "endFrame": 172, "nz": 4, "ext": "jpg", "anno_path": "tpl_Sunshade/Sunshade_gt.txt"}, {"name": "tpl_Football1", "path": "tpl_Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "tpl_Football1/Football1_gt.txt"}, {"name": "tpl_Singer1", "path": "tpl_Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "tpl_Singer1/Singer1_gt.txt"}, {"name": "tpl_Hurdle_ce1", "path": "tpl_Hurdle_ce1/img", "startFrame": 1, "endFrame": 300, "nz": 4, "ext": "jpg", "anno_path": "tpl_Hurdle_ce1/Hurdle_ce1_gt.txt"}, {"name": "tpl_Basketball_ce3", "path": "tpl_Basketball_ce3/img", "startFrame": 1, "endFrame": 441, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball_ce3/Basketball_ce3_gt.txt"}, {"name": "tpl_Toyplane_ce", "path": "tpl_Toyplane_ce/img", "startFrame": 1, "endFrame": 405, "nz": 4, "ext": "jpg", "anno_path": "tpl_Toyplane_ce/Toyplane_ce_gt.txt"}, {"name": "tpl_Skating1", "path": "tpl_Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skating1/Skating1_gt.txt"}, {"name": "tpl_Juice", "path": "tpl_Juice/img", "startFrame": 1, "endFrame": 404, "nz": 4, "ext": "jpg", "anno_path": "tpl_Juice/Juice_gt.txt"}, {"name": "tpl_Biker", "path": "tpl_Biker/img", "startFrame": 1, "endFrame": 180, "nz": 4, "ext": "jpg", "anno_path": "tpl_Biker/Biker_gt.txt"}, {"name": "tpl_Boy", "path": "tpl_Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "tpl_Boy/Boy_gt.txt"}, {"name": "tpl_Jogging1", "path": "tpl_Jogging1/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "tpl_Jogging1/Jogging1_gt.txt"}, {"name": "tpl_Deer", "path": "tpl_Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "tpl_Deer/Deer_gt.txt"}, {"name": "tpl_Panda", "path": "tpl_Panda/img", "startFrame": 1, "endFrame": 241, "nz": 4, "ext": "jpg", "anno_path": "tpl_Panda/Panda_gt.txt"}, {"name": "tpl_Coke", "path": "tpl_Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "tpl_Coke/Coke_gt.txt"}, {"name": "tpl_Carchasing_ce1", "path": "tpl_Carchasing_ce1/img", "startFrame": 1, "endFrame": 501, "nz": 4, "ext": "jpg", "anno_path": "tpl_Carchasing_ce1/Carchasing_ce1_gt.txt"}, {"name": "tpl_Badminton_ce1", "path": "tpl_Badminton_ce1/img", "startFrame": 1, "endFrame": 579, "nz": 4, "ext": "jpg", "anno_path": "tpl_Badminton_ce1/Badminton_ce1_gt.txt"}, {"name": "tpl_Trellis", "path": "tpl_Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "tpl_Trellis/Trellis_gt.txt"}, {"name": "tpl_Face_ce2", "path": "tpl_Face_ce2/img", "startFrame": 1, "endFrame": 148, "nz": 4, "ext": "jpg", "anno_path": "tpl_Face_ce2/Face_ce2_gt.txt"}, {"name": "tpl_Ball_ce2", "path": "tpl_Ball_ce2/img", "startFrame": 1, "endFrame": 603, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce2/Ball_ce2_gt.txt"}, {"name": "tpl_Skiing_ce", "path": "tpl_Skiing_ce/img", "startFrame": 1, "endFrame": 511, "nz": 4, "ext": "jpg", "anno_path": "tpl_Skiing_ce/Skiing_ce_gt.txt"}, {"name": "tpl_Jogging2", "path": "tpl_Jogging2/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "tpl_Jogging2/Jogging2_gt.txt"}, {"name": "tpl_Bike_ce1", "path": "tpl_Bike_ce1/img", "startFrame": 1, "endFrame": 801, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bike_ce1/Bike_ce1_gt.txt"}, {"name": "tpl_Bike_ce2", "path": "tpl_Bike_ce2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bike_ce2/Bike_ce2_gt.txt"}, {"name": "tpl_Ball_ce3", "path": "tpl_Ball_ce3/img", "startFrame": 1, "endFrame": 273, "nz": 4, "ext": "jpg", "anno_path": "tpl_Ball_ce3/Ball_ce3_gt.txt"}, {"name": "tpl_Girlmov", "path": "tpl_Girlmov/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "tpl_Girlmov/Girlmov_gt.txt"}, {"name": "tpl_Bolt", "path": "tpl_Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bolt/Bolt_gt.txt"}, {"name": "tpl_Basketball_ce2", "path": "tpl_Basketball_ce2/img", "startFrame": 1, "endFrame": 455, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball_ce2/Basketball_ce2_gt.txt"}, {"name": "tpl_Bicycle", "path": "tpl_Bicycle/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "tpl_Bicycle/Bicycle_gt.txt"}, {"name": "tpl_Face_ce", "path": "tpl_Face_ce/img", "startFrame": 1, "endFrame": 620, "nz": 4, "ext": "jpg", "anno_path": "tpl_Face_ce/Face_ce_gt.txt"}, {"name": "tpl_Basketball_ce1", "path": "tpl_Basketball_ce1/img", "startFrame": 1, "endFrame": 496, "nz": 4, "ext": "jpg", "anno_path": "tpl_Basketball_ce1/Basketball_ce1_gt.txt"}, {"name": "tpl_Messi_ce", "path": "tpl_Messi_ce/img", "startFrame": 1, "endFrame": 272, "nz": 4, "ext": "jpg", "anno_path": "tpl_Messi_ce/Messi_ce_gt.txt"}, {"name": "tpl_Tennis_ce2", "path": "tpl_Tennis_ce2/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "tpl_Tennis_ce2/Tennis_ce2_gt.txt"}, {"name": "tpl_Microphone_ce2", "path": "tpl_Microphone_ce2/img", "startFrame": 1, "endFrame": 103, "nz": 4, "ext": "jpg", "anno_path": "tpl_Microphone_ce2/Microphone_ce2_gt.txt"}, {"name": "tpl_Guitar_ce2", "path": "tpl_Guitar_ce2/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "tpl_Guitar_ce2/Guitar_ce2_gt.txt"} ] otb_sequences = ['tpl_Skating2', 'tpl_Lemming', 'tpl_Board', 'tpl_Soccer', 'tpl_Liquor', 'tpl_Couple', 'tpl_Walking', 'tpl_David', 'tpl_Tiger2', 'tpl_Bird', 'tpl_Crossing', 'tpl_MountainBike', 'tpl_Diving', 'tpl_CarDark', 'tpl_Shaking', 'tpl_Ironman', 'tpl_FaceOcc1', 'tpl_Tiger1', 'tpl_Skiing', 'tpl_Walking2', 'tpl_Girl', 'tpl_Girlmov', 'tpl_Subway', 'tpl_David3', 'tpl_Woman', 'tpl_Gym', 'tpl_Matrix', 'tpl_Doll', 'tpl_Singer2', 'tpl_Basketball', 'tpl_MotorRolling', 'tpl_CarScale', 'tpl_Football1', 'tpl_Singer1', 'tpl_Skating1', 'tpl_Biker', 'tpl_Boy', 'tpl_Jogging1', 'tpl_Deer', 'tpl_Panda', 'tpl_Coke', 'tpl_Trellis', 'tpl_Jogging2', 'tpl_Bolt', ] if exclude_otb: sequence_info_list_nootb = [] for seq in sequence_info_list: if seq['name'] not in otb_sequences: sequence_info_list_nootb.append(seq) sequence_info_list = sequence_info_list_nootb return sequence_info_list ================================================ FILE: external/AR/pytracking/evaluation/tracker.py ================================================ import importlib import os import numpy as np from collections import OrderedDict from pytracking.evaluation.environment import env_settings import time import cv2 as cv from pytracking.utils.visdom import Visdom import matplotlib.pyplot as plt import matplotlib.patches as patches from pytracking.utils.plotting import draw_figure, overlay_mask from pytracking.utils.convert_vot_anno_to_rect import convert_vot_anno_to_rect from ltr.data.bounding_box_utils import masks_to_bboxes from pytracking.evaluation.multi_object_wrapper import MultiObjectWrapper import torch _tracker_disp_colors = {1: (0, 255, 0), 2: (0, 0, 255), 3: (255, 0, 0), 4: (255, 255, 255), 5: (0, 0, 0), 6: (0, 255, 128), 7: (123, 123, 123), 8: (255, 128, 0), 9: (128, 0, 255)} def trackerlist(name: str, parameter_name: str, run_ids = None, display_name: str = None): """Generate list of trackers. args: name: Name of tracking method. parameter_name: Name of parameter file. run_ids: A single or list of run_ids. display_name: Name to be displayed in the result plots. """ if run_ids is None or isinstance(run_ids, int): run_ids = [run_ids] return [Tracker(name, parameter_name, run_id, display_name) for run_id in run_ids] class Tracker: """Wraps the tracker for evaluation and running purposes. args: name: Name of tracking method. parameter_name: Name of parameter file. run_id: The run id. display_name: Name to be displayed in the result plots. """ def __init__(self, name: str, parameter_name: str, run_id: int = None, display_name: str = None): assert run_id is None or isinstance(run_id, int) self.name = name self.parameter_name = parameter_name self.run_id = run_id self.display_name = display_name env = env_settings() if self.run_id is None: self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name) self.segmentation_dir = '{}/{}/{}'.format(env.segmentation_path, self.name, self.parameter_name) else: self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id) self.segmentation_dir = '{}/{}/{}_{:03d}'.format(env.segmentation_path, self.name, self.parameter_name, self.run_id) tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tracker', self.name)) if os.path.isdir(tracker_module_abspath): tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name)) self.tracker_class = tracker_module.get_tracker_class() else: self.tracker_class = None self.visdom = None def _init_visdom(self, visdom_info, debug): visdom_info = {} if visdom_info is None else visdom_info self.pause_mode = False self.step = False if debug > 0 and visdom_info.get('use_visdom', True): try: self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'}, visdom_info=visdom_info) # Show help help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \ 'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \ 'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \ 'block list.' self.visdom.register(help_text, 'text', 1, 'Help') except: time.sleep(0.5) print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n' '!!! Start Visdom in a separate terminal window by typing \'visdom\' !!!') def _visdom_ui_handler(self, data): if data['event_type'] == 'KeyPress': if data['key'] == ' ': self.pause_mode = not self.pause_mode elif data['key'] == 'ArrowRight' and self.pause_mode: self.step = True def create_tracker(self, params): tracker = self.tracker_class(params) tracker.visdom = self.visdom return tracker def run_sequence(self, seq, visualization=None, debug=None, visdom_info=None, multiobj_mode=None): """Run tracker on sequence. args: seq: Sequence to run the tracker on. visualization: Set visualization flag (None means default value specified in the parameters). debug: Set debug level (None means default value specified in the parameters). visdom_info: Visdom info. multiobj_mode: Which mode to use for multiple objects. """ params = self.get_parameters() visualization_ = visualization debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) if visualization is None: if debug is None: visualization_ = getattr(params, 'visualization', False) else: visualization_ = True if debug else False params.visualization = visualization_ params.debug = debug_ self._init_visdom(visdom_info, debug_) if visualization_ and self.visdom is None: self.init_visualization() # Get init information init_info = seq.init_info() is_single_object = not seq.multiobj_mode if multiobj_mode is None: multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default' or is_single_object: tracker = self.create_tracker(params) elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) output = self._track_sequence(tracker, seq, init_info) return output def _track_sequence(self, tracker, seq, init_info): # Define outputs # Each field in output is a list containing tracker prediction for each frame. # In case of single object tracking mode: # target_bbox[i] is the predicted bounding box for frame i # time[i] is the processing time for frame i # segmentation[i] is the segmentation mask for frame i (numpy array) # In case of multi object tracking mode: # target_bbox[i] is an OrderedDict, where target_bbox[i][obj_id] is the predicted box for target obj_id in # frame i # time[i] is either the processing time for frame i, or an OrderedDict containing processing times for each # object in frame i # segmentation[i] is the multi-label segmentation mask for frame i (numpy array) output = {'target_bbox': [], 'time': [], 'segmentation': []} def _store_outputs(tracker_out: dict, defaults=None): defaults = {} if defaults is None else defaults for key in output.keys(): val = tracker_out.get(key, defaults.get(key, None)) if key in tracker_out or val is not None: output[key].append(val) # Initialize image = self._read_image(seq.frames[0]) if tracker.params.visualization and self.visdom is None: self.visualize(image, init_info.get('init_bbox')) start_time = time.time() out = tracker.initialize(image, init_info) if out is None: out = {} prev_output = OrderedDict(out) init_default = {'target_bbox': init_info.get('init_bbox'), 'time': time.time() - start_time, 'segmentation': init_info.get('init_mask')} _store_outputs(out, init_default) for frame_num, frame_path in enumerate(seq.frames[1:], start=1): while True: if not self.pause_mode: break elif self.step: self.step = False break else: time.sleep(0.1) image = self._read_image(frame_path) start_time = time.time() info = seq.frame_info(frame_num) info['previous_output'] = prev_output out = tracker.track(image, info) prev_output = OrderedDict(out) _store_outputs(out, {'time': time.time() - start_time}) segmentation = out['segmentation'] if 'segmentation' in out else None if self.visdom is not None: tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation) elif tracker.params.visualization: self.visualize(image, out['target_bbox'], segmentation) for key in ['target_bbox', 'segmentation']: if key in output and len(output[key]) <= 1: output.pop(key) return output def run_video(self, videofilepath, optional_box=None, debug=None, visdom_info=None): """Run the tracker with the vieofile. args: debug: Debug level. """ params = self.get_parameters() debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) params.debug = debug_ params.tracker_name = self.name params.param_name = self.parameter_name self._init_visdom(visdom_info, debug_) multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default': tracker = self.create_tracker(params) if hasattr(tracker, 'initialize_features'): tracker.initialize_features() elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) assert os.path.isfile(videofilepath), "Invalid param {}".format(videofilepath) ", videofilepath must be a valid videofile" cap = cv.VideoCapture(videofilepath) display_name = 'Display: ' + tracker.params.tracker_name cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO) cv.resizeWindow(display_name, 960, 720) success, frame = cap.read() cv.imshow(display_name, frame) def _build_init_info(box): return {'init_bbox': OrderedDict({1: box}), 'init_object_ids': [1, ], 'object_ids': [1, ], 'sequence_object_ids': [1, ]} if success is not True: print("Read frame from {} failed.".format(videofilepath)) exit(-1) if optional_box is not None: assert isinstance(optional_box, list, tuple) assert len(optional_box) == 4, "valid box's foramt is [x,y,w,h]" tracker.initialize(frame, _build_init_info(optional_box)) else: while True: # cv.waitKey() frame_disp = frame.copy() cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 1) x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False) init_state = [x, y, w, h] tracker.initialize(frame, _build_init_info(init_state)) break while True: ret, frame = cap.read() if frame is None: return frame_disp = frame.copy() # Draw box out = tracker.track(frame) state = [int(s) for s in out['target_bbox'][1]] cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]), (0, 255, 0), 5) font_color = (0, 0, 0) cv.putText(frame_disp, 'Tracking!', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press q to quit', (20, 80), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) # Display the resulting frame cv.imshow(display_name, frame_disp) key = cv.waitKey(1) if key == ord('q'): break elif key == ord('r'): ret, frame = cap.read() frame_disp = frame.copy() cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 1) cv.imshow(display_name, frame_disp) x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False) init_state = [x, y, w, h] tracker.initialize(frame, _build_init_info(init_state)) # When everything done, release the capture cap.release() cv.destroyAllWindows() def run_webcam(self, debug=None, visdom_info=None): """Run the tracker with the webcam. args: debug: Debug level. """ params = self.get_parameters() debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) params.debug = debug_ params.tracker_name = self.name params.param_name = self.parameter_name self._init_visdom(visdom_info, debug_) multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default': tracker = self.create_tracker(params) elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) class UIControl: def __init__(self): self.mode = 'init' # init, select, track self.target_tl = (-1, -1) self.target_br = (-1, -1) self.new_init = False def mouse_callback(self, event, x, y, flags, param): if event == cv.EVENT_LBUTTONDOWN and self.mode == 'init': self.target_tl = (x, y) self.target_br = (x, y) self.mode = 'select' elif event == cv.EVENT_MOUSEMOVE and self.mode == 'select': self.target_br = (x, y) elif event == cv.EVENT_LBUTTONDOWN and self.mode == 'select': self.target_br = (x, y) self.mode = 'init' self.new_init = True def get_tl(self): return self.target_tl if self.target_tl[0] < self.target_br[0] else self.target_br def get_br(self): return self.target_br if self.target_tl[0] < self.target_br[0] else self.target_tl def get_bb(self): tl = self.get_tl() br = self.get_br() bb = [min(tl[0], br[0]), min(tl[1], br[1]), abs(br[0] - tl[0]), abs(br[1] - tl[1])] return bb ui_control = UIControl() cap = cv.VideoCapture(0) display_name = 'Display: ' + self.name cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO) cv.resizeWindow(display_name, 960, 720) cv.setMouseCallback(display_name, ui_control.mouse_callback) next_object_id = 1 sequence_object_ids = [] prev_output = OrderedDict() while True: # Capture frame-by-frame ret, frame = cap.read() frame_disp = frame.copy() info = OrderedDict() info['previous_output'] = prev_output if ui_control.new_init: ui_control.new_init = False init_state = ui_control.get_bb() info['init_object_ids'] = [next_object_id, ] info['init_bbox'] = OrderedDict({next_object_id: init_state}) sequence_object_ids.append(next_object_id) next_object_id += 1 # Draw box if ui_control.mode == 'select': cv.rectangle(frame_disp, ui_control.get_tl(), ui_control.get_br(), (255, 0, 0), 2) if len(sequence_object_ids) > 0: info['sequence_object_ids'] = sequence_object_ids out = tracker.track(frame, info) prev_output = OrderedDict(out) if 'segmentation' in out: frame_disp = overlay_mask(frame_disp, out['segmentation']) if 'target_bbox' in out: for obj_id, state in out['target_bbox'].items(): state = [int(s) for s in state] cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]), _tracker_disp_colors[obj_id], 5) # Put text font_color = (0, 0, 0) cv.putText(frame_disp, 'Select target', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press q to quit', (20, 85), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) # Display the resulting frame cv.imshow(display_name, frame_disp) key = cv.waitKey(1) if key == ord('q'): break elif key == ord('r'): next_object_id = 1 sequence_object_ids = [] prev_output = OrderedDict() info = OrderedDict() info['object_ids'] = [] info['init_object_ids'] = [] info['init_bbox'] = OrderedDict() tracker.initialize(frame, info) ui_control.mode = 'init' # When everything done, release the capture cap.release() cv.destroyAllWindows() def run_vot2020(self, debug=None, visdom_info=None): params = self.get_parameters() params.tracker_name = self.name params.param_name = self.parameter_name params.run_id = self.run_id debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) if debug is None: visualization_ = getattr(params, 'visualization', False) else: visualization_ = True if debug else False params.visualization = visualization_ params.debug = debug_ self._init_visdom(visdom_info, debug_) tracker = self.create_tracker(params) tracker.initialize_features() output_segmentation = tracker.predicts_segmentation_mask() import pytracking.evaluation.vot2020 as vot def _convert_anno_to_list(vot_anno): vot_anno = [vot_anno[0], vot_anno[1], vot_anno[2], vot_anno[3]] return vot_anno def _convert_image_path(image_path): return image_path """Run tracker on VOT.""" if output_segmentation: handle = vot.VOT("mask") else: handle = vot.VOT("rectangle") vot_anno = handle.region() image_path = handle.frame() if not image_path: return image_path = _convert_image_path(image_path) image = self._read_image(image_path) if output_segmentation: vot_anno_mask = vot.make_full_size(vot_anno, (image.shape[1], image.shape[0])) bbox = masks_to_bboxes(torch.from_numpy(vot_anno_mask), fmt='t').squeeze().tolist() else: bbox = _convert_anno_to_list(vot_anno) vot_anno_mask = None out = tracker.initialize(image, {'init_mask': vot_anno_mask, 'init_bbox': bbox}) if out is None: out = {} prev_output = OrderedDict(out) # Track while True: image_path = handle.frame() if not image_path: break image_path = _convert_image_path(image_path) image = self._read_image(image_path) info = OrderedDict() info['previous_output'] = prev_output out = tracker.track(image, info) prev_output = OrderedDict(out) if output_segmentation: pred = out['segmentation'].astype(np.uint8) else: state = out['target_bbox'] pred = vot.Rectangle(*state) handle.report(pred, 1.0) segmentation = out['segmentation'] if 'segmentation' in out else None if self.visdom is not None: tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation) elif tracker.params.visualization: self.visualize(image, out['target_bbox'], segmentation) def run_vot(self, debug=None, visdom_info=None): params = self.get_parameters() params.tracker_name = self.name params.param_name = self.parameter_name params.run_id = self.run_id debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) if debug is None: visualization_ = getattr(params, 'visualization', False) else: visualization_ = True if debug else False params.visualization = visualization_ params.debug = debug_ self._init_visdom(visdom_info, debug_) tracker = self.create_tracker(params) tracker.initialize_features() import pytracking.evaluation.vot as vot def _convert_anno_to_list(vot_anno): vot_anno = [vot_anno[0][0][0], vot_anno[0][0][1], vot_anno[0][1][0], vot_anno[0][1][1], vot_anno[0][2][0], vot_anno[0][2][1], vot_anno[0][3][0], vot_anno[0][3][1]] return vot_anno def _convert_image_path(image_path): image_path_new = image_path[20:- 2] return "".join(image_path_new) """Run tracker on VOT.""" handle = vot.VOT("polygon") vot_anno_polygon = handle.region() vot_anno_polygon = _convert_anno_to_list(vot_anno_polygon) init_state = convert_vot_anno_to_rect(vot_anno_polygon, tracker.params.vot_anno_conversion_type) image_path = handle.frame() if not image_path: return image_path = _convert_image_path(image_path) image = self._read_image(image_path) tracker.initialize(image, {'init_bbox': init_state}) # Track while True: image_path = handle.frame() if not image_path: break image_path = _convert_image_path(image_path) image = self._read_image(image_path) out = tracker.track(image) state = out['target_bbox'] handle.report(vot.Rectangle(state[0], state[1], state[2], state[3])) segmentation = out['segmentation'] if 'segmentation' in out else None if self.visdom is not None: tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation) elif tracker.params.visualization: self.visualize(image, out['target_bbox'], segmentation) def get_parameters(self): """Get parameters.""" param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name)) params = param_module.parameters() return params def init_visualization(self): self.pause_mode = False self.fig, self.ax = plt.subplots(1) self.fig.canvas.mpl_connect('key_press_event', self.press) plt.tight_layout() def visualize(self, image, state, segmentation=None): self.ax.cla() self.ax.imshow(image) if segmentation is not None: self.ax.imshow(segmentation, alpha=0.5) if isinstance(state, (OrderedDict, dict)): boxes = [v for k, v in state.items()] else: boxes = (state,) for i, box in enumerate(boxes, start=1): col = _tracker_disp_colors[i] col = [float(c) / 255.0 for c in col] rect = patches.Rectangle((box[0], box[1]), box[2], box[3], linewidth=1, edgecolor=col, facecolor='none') self.ax.add_patch(rect) if getattr(self, 'gt_state', None) is not None: gt_state = self.gt_state rect = patches.Rectangle((gt_state[0], gt_state[1]), gt_state[2], gt_state[3], linewidth=1, edgecolor='g', facecolor='none') self.ax.add_patch(rect) self.ax.set_axis_off() self.ax.axis('equal') draw_figure(self.fig) if self.pause_mode: keypress = False while not keypress: keypress = plt.waitforbuttonpress() def reset_tracker(self): pass def press(self, event): if event.key == 'p': self.pause_mode = not self.pause_mode print("Switching pause mode!") elif event.key == 'r': self.reset_tracker() print("Resetting target pos to gt!") def _read_image(self, image_file: str): im = cv.imread(image_file) return cv.cvtColor(im, cv.COLOR_BGR2RGB) ================================================ FILE: external/AR/pytracking/evaluation/trackingnetdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList import os from pytracking.utils.load_text import load_text class TrackingNetDataset(BaseDataset): """ TrackingNet test set. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self): super().__init__() self.base_path = self.env_settings.trackingnet_path sets = 'TEST' if not isinstance(sets, (list, tuple)): if sets == 'TEST': sets = ['TEST'] elif sets == 'TRAIN': sets = ['TRAIN_{}'.format(i) for i in range(5)] self.sequence_list = self._list_sequences(self.base_path, sets) def get_sequence_list(self): return SequenceList([self._construct_sequence(set, seq_name) for set, seq_name in self.sequence_list]) def _construct_sequence(self, set, sequence_name): anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy') frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name) frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")] frame_list.sort(key=lambda f: int(f[:-4])) frames_list = [os.path.join(frames_path, frame) for frame in frame_list] return Sequence(sequence_name, frames_list, 'trackingnet', ground_truth_rect.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) def _list_sequences(self, root, set_ids): sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, s, "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list ================================================ FILE: external/AR/pytracking/evaluation/uavdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList from pytracking.utils.load_text import load_text class UAVDataset(BaseDataset): """ UAV123 dataset. Publication: A Benchmark and Simulator for UAV Tracking. Matthias Mueller, Neil Smith and Bernard Ghanem ECCV, 2016 https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx """ def __init__(self): super().__init__() self.base_path = self.env_settings.uav_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "uav_bike1", "path": "data_seq/UAV123/bike1", "startFrame": 1, "endFrame": 3085, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike1.txt", "object_class": "vehicle"}, {"name": "uav_bike2", "path": "data_seq/UAV123/bike2", "startFrame": 1, "endFrame": 553, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike2.txt", "object_class": "vehicle"}, {"name": "uav_bike3", "path": "data_seq/UAV123/bike3", "startFrame": 1, "endFrame": 433, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike3.txt", "object_class": "vehicle"}, {"name": "uav_bird1_1", "path": "data_seq/UAV123/bird1", "startFrame": 1, "endFrame": 253, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_1.txt", "object_class": "bird"}, {"name": "uav_bird1_2", "path": "data_seq/UAV123/bird1", "startFrame": 775, "endFrame": 1477, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_2.txt", "object_class": "bird"}, {"name": "uav_bird1_3", "path": "data_seq/UAV123/bird1", "startFrame": 1573, "endFrame": 2437, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_3.txt", "object_class": "bird"}, {"name": "uav_boat1", "path": "data_seq/UAV123/boat1", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat1.txt", "object_class": "vessel"}, {"name": "uav_boat2", "path": "data_seq/UAV123/boat2", "startFrame": 1, "endFrame": 799, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat2.txt", "object_class": "vessel"}, {"name": "uav_boat3", "path": "data_seq/UAV123/boat3", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat3.txt", "object_class": "vessel"}, {"name": "uav_boat4", "path": "data_seq/UAV123/boat4", "startFrame": 1, "endFrame": 553, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat4.txt", "object_class": "vessel"}, {"name": "uav_boat5", "path": "data_seq/UAV123/boat5", "startFrame": 1, "endFrame": 505, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat5.txt", "object_class": "vessel"}, {"name": "uav_boat6", "path": "data_seq/UAV123/boat6", "startFrame": 1, "endFrame": 805, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat6.txt", "object_class": "vessel"}, {"name": "uav_boat7", "path": "data_seq/UAV123/boat7", "startFrame": 1, "endFrame": 535, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat7.txt", "object_class": "vessel"}, {"name": "uav_boat8", "path": "data_seq/UAV123/boat8", "startFrame": 1, "endFrame": 685, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat8.txt", "object_class": "vessel"}, {"name": "uav_boat9", "path": "data_seq/UAV123/boat9", "startFrame": 1, "endFrame": 1399, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat9.txt", "object_class": "vessel"}, {"name": "uav_building1", "path": "data_seq/UAV123/building1", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building1.txt", "object_class": "other"}, {"name": "uav_building2", "path": "data_seq/UAV123/building2", "startFrame": 1, "endFrame": 577, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building2.txt", "object_class": "other"}, {"name": "uav_building3", "path": "data_seq/UAV123/building3", "startFrame": 1, "endFrame": 829, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building3.txt", "object_class": "other"}, {"name": "uav_building4", "path": "data_seq/UAV123/building4", "startFrame": 1, "endFrame": 787, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building4.txt", "object_class": "other"}, {"name": "uav_building5", "path": "data_seq/UAV123/building5", "startFrame": 1, "endFrame": 481, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building5.txt", "object_class": "other"}, {"name": "uav_car1_1", "path": "data_seq/UAV123/car1", "startFrame": 1, "endFrame": 751, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_1.txt", "object_class": "car"}, {"name": "uav_car1_2", "path": "data_seq/UAV123/car1", "startFrame": 751, "endFrame": 1627, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_2.txt", "object_class": "car"}, {"name": "uav_car1_3", "path": "data_seq/UAV123/car1", "startFrame": 1627, "endFrame": 2629, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_3.txt", "object_class": "car"}, {"name": "uav_car10", "path": "data_seq/UAV123/car10", "startFrame": 1, "endFrame": 1405, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car10.txt", "object_class": "car"}, {"name": "uav_car11", "path": "data_seq/UAV123/car11", "startFrame": 1, "endFrame": 337, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car11.txt", "object_class": "car"}, {"name": "uav_car12", "path": "data_seq/UAV123/car12", "startFrame": 1, "endFrame": 499, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car12.txt", "object_class": "car"}, {"name": "uav_car13", "path": "data_seq/UAV123/car13", "startFrame": 1, "endFrame": 415, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car13.txt", "object_class": "car"}, {"name": "uav_car14", "path": "data_seq/UAV123/car14", "startFrame": 1, "endFrame": 1327, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car14.txt", "object_class": "car"}, {"name": "uav_car15", "path": "data_seq/UAV123/car15", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car15.txt", "object_class": "car"}, {"name": "uav_car16_1", "path": "data_seq/UAV123/car16", "startFrame": 1, "endFrame": 415, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car16_1.txt", "object_class": "car"}, {"name": "uav_car16_2", "path": "data_seq/UAV123/car16", "startFrame": 415, "endFrame": 1993, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car16_2.txt", "object_class": "car"}, {"name": "uav_car17", "path": "data_seq/UAV123/car17", "startFrame": 1, "endFrame": 1057, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car17.txt", "object_class": "car"}, {"name": "uav_car18", "path": "data_seq/UAV123/car18", "startFrame": 1, "endFrame": 1207, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car18.txt", "object_class": "car"}, {"name": "uav_car1_s", "path": "data_seq/UAV123/car1_s", "startFrame": 1, "endFrame": 1475, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_s.txt", "object_class": "car"}, {"name": "uav_car2", "path": "data_seq/UAV123/car2", "startFrame": 1, "endFrame": 1321, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car2.txt", "object_class": "car"}, {"name": "uav_car2_s", "path": "data_seq/UAV123/car2_s", "startFrame": 1, "endFrame": 320, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car2_s.txt", "object_class": "car"}, {"name": "uav_car3", "path": "data_seq/UAV123/car3", "startFrame": 1, "endFrame": 1717, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car3.txt", "object_class": "car"}, {"name": "uav_car3_s", "path": "data_seq/UAV123/car3_s", "startFrame": 1, "endFrame": 1300, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car3_s.txt", "object_class": "car"}, {"name": "uav_car4", "path": "data_seq/UAV123/car4", "startFrame": 1, "endFrame": 1345, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car4.txt", "object_class": "car"}, {"name": "uav_car4_s", "path": "data_seq/UAV123/car4_s", "startFrame": 1, "endFrame": 830, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car4_s.txt", "object_class": "car"}, {"name": "uav_car5", "path": "data_seq/UAV123/car5", "startFrame": 1, "endFrame": 745, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car5.txt", "object_class": "car"}, {"name": "uav_car6_1", "path": "data_seq/UAV123/car6", "startFrame": 1, "endFrame": 487, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_1.txt", "object_class": "car"}, {"name": "uav_car6_2", "path": "data_seq/UAV123/car6", "startFrame": 487, "endFrame": 1807, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_2.txt", "object_class": "car"}, {"name": "uav_car6_3", "path": "data_seq/UAV123/car6", "startFrame": 1807, "endFrame": 2953, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_3.txt", "object_class": "car"}, {"name": "uav_car6_4", "path": "data_seq/UAV123/car6", "startFrame": 2953, "endFrame": 3925, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_4.txt", "object_class": "car"}, {"name": "uav_car6_5", "path": "data_seq/UAV123/car6", "startFrame": 3925, "endFrame": 4861, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_5.txt", "object_class": "car"}, {"name": "uav_car7", "path": "data_seq/UAV123/car7", "startFrame": 1, "endFrame": 1033, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car7.txt", "object_class": "car"}, {"name": "uav_car8_1", "path": "data_seq/UAV123/car8", "startFrame": 1, "endFrame": 1357, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car8_1.txt", "object_class": "car"}, {"name": "uav_car8_2", "path": "data_seq/UAV123/car8", "startFrame": 1357, "endFrame": 2575, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car8_2.txt", "object_class": "car"}, {"name": "uav_car9", "path": "data_seq/UAV123/car9", "startFrame": 1, "endFrame": 1879, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car9.txt", "object_class": "car"}, {"name": "uav_group1_1", "path": "data_seq/UAV123/group1", "startFrame": 1, "endFrame": 1333, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_1.txt", "object_class": "person"}, {"name": "uav_group1_2", "path": "data_seq/UAV123/group1", "startFrame": 1333, "endFrame": 2515, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_2.txt", "object_class": "person"}, {"name": "uav_group1_3", "path": "data_seq/UAV123/group1", "startFrame": 2515, "endFrame": 3925, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_3.txt", "object_class": "person"}, {"name": "uav_group1_4", "path": "data_seq/UAV123/group1", "startFrame": 3925, "endFrame": 4873, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_4.txt", "object_class": "person"}, {"name": "uav_group2_1", "path": "data_seq/UAV123/group2", "startFrame": 1, "endFrame": 907, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_1.txt", "object_class": "person"}, {"name": "uav_group2_2", "path": "data_seq/UAV123/group2", "startFrame": 907, "endFrame": 1771, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_2.txt", "object_class": "person"}, {"name": "uav_group2_3", "path": "data_seq/UAV123/group2", "startFrame": 1771, "endFrame": 2683, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_3.txt", "object_class": "person"}, {"name": "uav_group3_1", "path": "data_seq/UAV123/group3", "startFrame": 1, "endFrame": 1567, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_1.txt", "object_class": "person"}, {"name": "uav_group3_2", "path": "data_seq/UAV123/group3", "startFrame": 1567, "endFrame": 2827, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_2.txt", "object_class": "person"}, {"name": "uav_group3_3", "path": "data_seq/UAV123/group3", "startFrame": 2827, "endFrame": 4369, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_3.txt", "object_class": "person"}, {"name": "uav_group3_4", "path": "data_seq/UAV123/group3", "startFrame": 4369, "endFrame": 5527, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_4.txt", "object_class": "person"}, {"name": "uav_person1", "path": "data_seq/UAV123/person1", "startFrame": 1, "endFrame": 799, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person1.txt", "object_class": "person"}, {"name": "uav_person10", "path": "data_seq/UAV123/person10", "startFrame": 1, "endFrame": 1021, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person10.txt", "object_class": "person"}, {"name": "uav_person11", "path": "data_seq/UAV123/person11", "startFrame": 1, "endFrame": 721, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person11.txt", "object_class": "person"}, {"name": "uav_person12_1", "path": "data_seq/UAV123/person12", "startFrame": 1, "endFrame": 601, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person12_1.txt", "object_class": "person"}, {"name": "uav_person12_2", "path": "data_seq/UAV123/person12", "startFrame": 601, "endFrame": 1621, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person12_2.txt", "object_class": "person"}, {"name": "uav_person13", "path": "data_seq/UAV123/person13", "startFrame": 1, "endFrame": 883, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person13.txt", "object_class": "person"}, {"name": "uav_person14_1", "path": "data_seq/UAV123/person14", "startFrame": 1, "endFrame": 847, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_1.txt", "object_class": "person"}, {"name": "uav_person14_2", "path": "data_seq/UAV123/person14", "startFrame": 847, "endFrame": 1813, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_2.txt", "object_class": "person"}, {"name": "uav_person14_3", "path": "data_seq/UAV123/person14", "startFrame": 1813, "endFrame": 2923, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_3.txt", "object_class": "person"}, {"name": "uav_person15", "path": "data_seq/UAV123/person15", "startFrame": 1, "endFrame": 1339, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person15.txt", "object_class": "person"}, {"name": "uav_person16", "path": "data_seq/UAV123/person16", "startFrame": 1, "endFrame": 1147, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person16.txt", "object_class": "person"}, {"name": "uav_person17_1", "path": "data_seq/UAV123/person17", "startFrame": 1, "endFrame": 1501, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_1.txt", "object_class": "person"}, {"name": "uav_person17_2", "path": "data_seq/UAV123/person17", "startFrame": 1501, "endFrame": 2347, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_2.txt", "object_class": "person"}, {"name": "uav_person18", "path": "data_seq/UAV123/person18", "startFrame": 1, "endFrame": 1393, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person18.txt", "object_class": "person"}, {"name": "uav_person19_1", "path": "data_seq/UAV123/person19", "startFrame": 1, "endFrame": 1243, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_1.txt", "object_class": "person"}, {"name": "uav_person19_2", "path": "data_seq/UAV123/person19", "startFrame": 1243, "endFrame": 2791, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_2.txt", "object_class": "person"}, {"name": "uav_person19_3", "path": "data_seq/UAV123/person19", "startFrame": 2791, "endFrame": 4357, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_3.txt", "object_class": "person"}, {"name": "uav_person1_s", "path": "data_seq/UAV123/person1_s", "startFrame": 1, "endFrame": 1600, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person1_s.txt", "object_class": "person"}, {"name": "uav_person2_1", "path": "data_seq/UAV123/person2", "startFrame": 1, "endFrame": 1189, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_1.txt", "object_class": "person"}, {"name": "uav_person2_2", "path": "data_seq/UAV123/person2", "startFrame": 1189, "endFrame": 2623, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_2.txt", "object_class": "person"}, {"name": "uav_person20", "path": "data_seq/UAV123/person20", "startFrame": 1, "endFrame": 1783, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person20.txt", "object_class": "person"}, {"name": "uav_person21", "path": "data_seq/UAV123/person21", "startFrame": 1, "endFrame": 487, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person21.txt", "object_class": "person"}, {"name": "uav_person22", "path": "data_seq/UAV123/person22", "startFrame": 1, "endFrame": 199, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person22.txt", "object_class": "person"}, {"name": "uav_person23", "path": "data_seq/UAV123/person23", "startFrame": 1, "endFrame": 397, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person23.txt", "object_class": "person"}, {"name": "uav_person2_s", "path": "data_seq/UAV123/person2_s", "startFrame": 1, "endFrame": 250, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_s.txt", "object_class": "person"}, {"name": "uav_person3", "path": "data_seq/UAV123/person3", "startFrame": 1, "endFrame": 643, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person3.txt", "object_class": "person"}, {"name": "uav_person3_s", "path": "data_seq/UAV123/person3_s", "startFrame": 1, "endFrame": 505, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person3_s.txt", "object_class": "person"}, {"name": "uav_person4_1", "path": "data_seq/UAV123/person4", "startFrame": 1, "endFrame": 1501, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person4_1.txt", "object_class": "person"}, {"name": "uav_person4_2", "path": "data_seq/UAV123/person4", "startFrame": 1501, "endFrame": 2743, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person4_2.txt", "object_class": "person"}, {"name": "uav_person5_1", "path": "data_seq/UAV123/person5", "startFrame": 1, "endFrame": 877, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person5_1.txt", "object_class": "person"}, {"name": "uav_person5_2", "path": "data_seq/UAV123/person5", "startFrame": 877, "endFrame": 2101, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person5_2.txt", "object_class": "person"}, {"name": "uav_person6", "path": "data_seq/UAV123/person6", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person6.txt", "object_class": "person"}, {"name": "uav_person7_1", "path": "data_seq/UAV123/person7", "startFrame": 1, "endFrame": 1249, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person7_1.txt", "object_class": "person"}, {"name": "uav_person7_2", "path": "data_seq/UAV123/person7", "startFrame": 1249, "endFrame": 2065, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person7_2.txt", "object_class": "person"}, {"name": "uav_person8_1", "path": "data_seq/UAV123/person8", "startFrame": 1, "endFrame": 1075, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person8_1.txt", "object_class": "person"}, {"name": "uav_person8_2", "path": "data_seq/UAV123/person8", "startFrame": 1075, "endFrame": 1525, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person8_2.txt", "object_class": "person"}, {"name": "uav_person9", "path": "data_seq/UAV123/person9", "startFrame": 1, "endFrame": 661, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person9.txt", "object_class": "person"}, {"name": "uav_truck1", "path": "data_seq/UAV123/truck1", "startFrame": 1, "endFrame": 463, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck1.txt", "object_class": "truck"}, {"name": "uav_truck2", "path": "data_seq/UAV123/truck2", "startFrame": 1, "endFrame": 385, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck2.txt", "object_class": "truck"}, {"name": "uav_truck3", "path": "data_seq/UAV123/truck3", "startFrame": 1, "endFrame": 535, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck3.txt", "object_class": "truck"}, {"name": "uav_truck4_1", "path": "data_seq/UAV123/truck4", "startFrame": 1, "endFrame": 577, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck4_1.txt", "object_class": "truck"}, {"name": "uav_truck4_2", "path": "data_seq/UAV123/truck4", "startFrame": 577, "endFrame": 1261, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck4_2.txt", "object_class": "truck"}, {"name": "uav_uav1_1", "path": "data_seq/UAV123/uav1", "startFrame": 1, "endFrame": 1555, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_1.txt", "object_class": "aircraft"}, {"name": "uav_uav1_2", "path": "data_seq/UAV123/uav1", "startFrame": 1555, "endFrame": 2377, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_2.txt", "object_class": "aircraft"}, {"name": "uav_uav1_3", "path": "data_seq/UAV123/uav1", "startFrame": 2473, "endFrame": 3469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_3.txt", "object_class": "aircraft"}, {"name": "uav_uav2", "path": "data_seq/UAV123/uav2", "startFrame": 1, "endFrame": 133, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav2.txt", "object_class": "aircraft"}, {"name": "uav_uav3", "path": "data_seq/UAV123/uav3", "startFrame": 1, "endFrame": 265, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav3.txt", "object_class": "aircraft"}, {"name": "uav_uav4", "path": "data_seq/UAV123/uav4", "startFrame": 1, "endFrame": 157, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav4.txt", "object_class": "aircraft"}, {"name": "uav_uav5", "path": "data_seq/UAV123/uav5", "startFrame": 1, "endFrame": 139, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav5.txt", "object_class": "aircraft"}, {"name": "uav_uav6", "path": "data_seq/UAV123/uav6", "startFrame": 1, "endFrame": 109, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav6.txt", "object_class": "aircraft"}, {"name": "uav_uav7", "path": "data_seq/UAV123/uav7", "startFrame": 1, "endFrame": 373, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav7.txt", "object_class": "aircraft"}, {"name": "uav_uav8", "path": "data_seq/UAV123/uav8", "startFrame": 1, "endFrame": 301, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav8.txt", "object_class": "aircraft"}, {"name": "uav_wakeboard1", "path": "data_seq/UAV123/wakeboard1", "startFrame": 1, "endFrame": 421, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard1.txt", "object_class": "person"}, {"name": "uav_wakeboard10", "path": "data_seq/UAV123/wakeboard10", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard10.txt", "object_class": "person"}, {"name": "uav_wakeboard2", "path": "data_seq/UAV123/wakeboard2", "startFrame": 1, "endFrame": 733, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard2.txt", "object_class": "person"}, {"name": "uav_wakeboard3", "path": "data_seq/UAV123/wakeboard3", "startFrame": 1, "endFrame": 823, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard3.txt", "object_class": "person"}, {"name": "uav_wakeboard4", "path": "data_seq/UAV123/wakeboard4", "startFrame": 1, "endFrame": 697, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard4.txt", "object_class": "person"}, {"name": "uav_wakeboard5", "path": "data_seq/UAV123/wakeboard5", "startFrame": 1, "endFrame": 1675, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard5.txt", "object_class": "person"}, {"name": "uav_wakeboard6", "path": "data_seq/UAV123/wakeboard6", "startFrame": 1, "endFrame": 1165, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard6.txt", "object_class": "person"}, {"name": "uav_wakeboard7", "path": "data_seq/UAV123/wakeboard7", "startFrame": 1, "endFrame": 199, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard7.txt", "object_class": "person"}, {"name": "uav_wakeboard8", "path": "data_seq/UAV123/wakeboard8", "startFrame": 1, "endFrame": 1543, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard8.txt", "object_class": "person"}, {"name": "uav_wakeboard9", "path": "data_seq/UAV123/wakeboard9", "startFrame": 1, "endFrame": 355, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard9.txt", "object_class": "person"} ] return sequence_info_list ================================================ FILE: external/AR/pytracking/evaluation/vot.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016 """ import sys import copy import collections try: import trax import trax.server TRAX = True except ImportError: TRAX = False Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) def parse_region(string): tokens = map(float, string.split(',')) if len(tokens) == 4: return Rectangle(tokens[0], tokens[1], tokens[2], tokens[3]) elif len(tokens) % 2 == 0 and len(tokens) > 4: return Polygon([Point(tokens[i],tokens[i+1]) for i in xrange(0,len(tokens),2)]) return None def encode_region(region): if isinstance(region, Polygon): return ','.join(['{},{}'.format(p.x,p.y) for p in region.points]) elif isinstance(region, Rectangle): return '{},{},{},{}'.format(region.x, region.y, region.width, region.height) else: return "" def convert_region(region, to): if to == 'rectangle': if isinstance(region, Rectangle): return copy.copy(region) elif isinstance(region, Polygon): top = sys.float_info.max bottom = sys.float_info.min left = sys.float_info.max right = sys.float_info.min for point in region.points: top = min(top, point.y) bottom = max(bottom, point.y) left = min(left, point.x) right = max(right, point.x) return Rectangle(left, top, right - left, bottom - top) else: return None if to == 'polygon': if isinstance(region, Rectangle): points = [] points.append((region.x, region.y)) points.append((region.x + region.width, region.y)) points.append((region.x + region.width, region.y + region.height)) points.append((region.x, region.y + region.height)) return Polygon(points) elif isinstance(region, Polygon): return copy.copy(region) else: return None return None class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format): """ Constructor Args: region_format: Region format options """ assert(region_format in ['rectangle', 'polygon']) if TRAX: options = trax.server.ServerOptions(region_format, trax.image.PATH) self._trax = trax.server.Server(options) request = self._trax.wait() assert(request.type == 'initialize') if request.region.type == 'polygon': self._region = Polygon([Point(x[0], x[1]) for x in request.region.points]) else: self._region = Rectangle(request.region.x, request.region.y, request.region.width, request.region.height) self._image = str(request.image) self._trax.status(request.region) else: self._files = [x.strip('\n') for x in open('images.txt', 'r').readlines()] self._frame = 0 self._region = convert_region(parse_region(open('region.txt', 'r').readline()), region_format) self._result = [] def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = 0): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, Rectangle) or isinstance(region, Polygon)) if TRAX: if isinstance(region, Polygon): tregion = trax.region.Polygon([(x.x, x.y) for x in region.points]) else: tregion = trax.region.Rectangle(region.x, region.y, region.width, region.height) self._trax.status(tregion, {"confidence" : confidence}) else: self._result.append(region) self._frame += 1 def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if TRAX: if hasattr(self, "_image"): image = str(self._image) del self._image return image request = self._trax.wait() if request.type == 'frame': return str(request.image) else: return None else: if self._frame >= len(self._files): return None return self._files[self._frame] def quit(self): if TRAX: self._trax.quit() elif hasattr(self, '_result'): with open('output.txt', 'w') as f: for r in self._result: f.write(encode_region(r)) f.write('\n') def __del__(self): self.quit() ================================================ FILE: external/AR/pytracking/evaluation/vot2020.py ================================================ """ \file vot.py @brief Python utility functions for VOT integration @author Luka Cehovin, Alessio Dore @date 2016 """ import sys import copy import collections import numpy as np try: import trax except ImportError: raise Exception('TraX support not found. Please add trax module to Python path.') def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height']) Point = collections.namedtuple('Point', ['x', 'y']) Polygon = collections.namedtuple('Polygon', ['points']) class VOT(object): """ Base class for Python VOT integration """ def __init__(self, region_format, channels=None): """ Constructor Args: region_format: Region format options """ assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK]) if channels is None: channels = ['color'] elif channels == 'rgbd': channels = ['color', 'depth'] elif channels == 'rgbt': channels = ['color', 'ir'] elif channels == 'ir': channels = ['ir'] else: raise Exception('Illegal configuration {}.'.format(channels)) self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot="python")) request = self._trax.wait() assert(request.type == 'initialize') if isinstance(request.region, trax.Polygon): self._region = Polygon([Point(x[0], x[1]) for x in request.region]) if isinstance(request.region, trax.Mask): self._region = request.region.array(True) else: self._region = Rectangle(*request.region.bounds()) self._image = [x.path() for k, x in request.image.items()] if len(self._image) == 1: self._image = self._image[0] self._trax.status(request.region) def region(self): """ Send configuration message to the client and receive the initialization region and the path of the first image Returns: initialization region """ return self._region def report(self, region, confidence = None): """ Report the tracking results to the client Arguments: region: region for the frame """ assert(isinstance(region, (Rectangle, Polygon, np.ndarray))) if isinstance(region, Polygon): tregion = trax.Polygon.create([(x.x, x.y) for x in region.points]) if isinstance(region, np.ndarray): tregion = trax.Mask.create(region) else: tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height) properties = {} if not confidence is None: properties['confidence'] = confidence self._trax.status(tregion, properties) def frame(self): """ Get a frame (image path) from client Returns: absolute path of the image """ if hasattr(self, "_image"): image = self._image del self._image return image request = self._trax.wait() if request.type == 'frame': image = [x.path() for k, x in request.image.items()] if len(image) == 1: return image[0] return image else: return None def quit(self): if hasattr(self, '_trax'): self._trax.quit() def __del__(self): self.quit() ================================================ FILE: external/AR/pytracking/evaluation/votdataset.py ================================================ import numpy as np from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList class VOTDataset(BaseDataset): """ VOT2018 dataset Publication: The sixth Visual Object Tracking VOT2018 challenge results. Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir, Goutam Bhat, Alan Lukezic et al. ECCV, 2018 https://prints.vicos.si/publications/365 Download the dataset from http://www.votchallenge.net/vot2018/dataset.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.vot_path self.sequence_list = self._get_sequence_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): sequence_path = sequence_name nz = 8 ext = 'jpg' start_frame = 1 anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) try: ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64) except: ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64) end_frame = ground_truth_rect.shape[0] frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame, end_frame+1)] # Convert gt if ground_truth_rect.shape[1] > 4: gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]] gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]] x1 = np.amin(gt_x_all, 1).reshape(-1,1) y1 = np.amin(gt_y_all, 1).reshape(-1,1) x2 = np.amax(gt_x_all, 1).reshape(-1,1) y2 = np.amax(gt_y_all, 1).reshape(-1,1) ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1) return Sequence(sequence_name, frames, 'vot', ground_truth_rect) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list= ['ants1', 'ants3', 'bag', 'ball1', 'ball2', 'basketball', 'birds1', 'blanket', 'bmx', 'bolt1', 'bolt2', 'book', 'butterfly', 'car1', 'conduction1', 'crabs1', 'crossing', 'dinosaur', 'drone_across', 'drone_flip', 'drone1', 'fernando', 'fish1', 'fish2', 'fish3', 'flamingo1', 'frisbee', 'girl', 'glove', 'godfather', 'graduate', 'gymnastics1', 'gymnastics2', 'gymnastics3', 'hand', 'handball1', 'handball2', 'helicopter', 'iceskater1', 'iceskater2', 'leaves', 'matrix', 'motocross1', 'motocross2', 'nature', 'pedestrian1', 'rabbit', 'racing', 'road', 'shaking', 'sheep', 'singer2', 'singer3', 'soccer1', 'soccer2', 'soldier', 'tiger', 'traffic', 'wiper', 'zebrafish1'] return sequence_list ================================================ FILE: external/AR/pytracking/experiments/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/experiments/myexperiments.py ================================================ from pytracking.evaluation import Tracker, get_dataset, trackerlist def atom_nfs_uav(): # Run three runs of ATOM on NFS and UAV datasets trackers = trackerlist('atom', 'default', range(3)) dataset = get_dataset('nfs', 'uav') return trackers, dataset def uav_test(): # Run DiMP18, ATOM and ECO on the UAV dataset trackers = trackerlist('dimp', 'dimp18', range(1)) + \ trackerlist('atom', 'default', range(1)) + \ trackerlist('eco', 'default', range(1)) dataset = get_dataset('uav') return trackers, dataset ================================================ FILE: external/AR/pytracking/features/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/features/augmentation.py ================================================ import numpy as np import math import torch import torch.nn.functional as F import cv2 as cv import random from pytracking.features.preprocessing import numpy_to_torch, torch_to_numpy class Transform: """Base data augmentation transform class.""" def __init__(self, output_sz = None, shift = None): self.output_sz = output_sz self.shift = (0,0) if shift is None else shift def __call__(self, image, is_mask=False): raise NotImplementedError def crop_to_output(self, image): if isinstance(image, torch.Tensor): imsz = image.shape[2:] if self.output_sz is None: pad_h = 0 pad_w = 0 else: pad_h = (self.output_sz[0] - imsz[0]) / 2 pad_w = (self.output_sz[1] - imsz[1]) / 2 pad_left = math.floor(pad_w) + self.shift[1] pad_right = math.ceil(pad_w) - self.shift[1] pad_top = math.floor(pad_h) + self.shift[0] pad_bottom = math.ceil(pad_h) - self.shift[0] return F.pad(image, (pad_left, pad_right, pad_top, pad_bottom), 'replicate') else: raise NotImplementedError class Identity(Transform): """Identity transformation.""" def __call__(self, image, is_mask=False): return self.crop_to_output(image) class FlipHorizontal(Transform): """Flip along horizontal axis.""" def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(image.flip((3,))) else: return np.fliplr(image) class FlipVertical(Transform): """Flip along vertical axis.""" def __call__(self, image: torch.Tensor, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(image.flip((2,))) else: return np.flipud(image) class Translation(Transform): """Translate.""" def __init__(self, translation, output_sz = None, shift = None): super().__init__(output_sz, shift) self.shift = (self.shift[0] + translation[0], self.shift[1] + translation[1]) def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(image) else: raise NotImplementedError class Scale(Transform): """Scale.""" def __init__(self, scale_factor, output_sz = None, shift = None): super().__init__(output_sz, shift) self.scale_factor = scale_factor def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): # Calculate new size. Ensure that it is even so that crop/pad becomes easier h_orig, w_orig = image.shape[2:] if h_orig != w_orig: raise NotImplementedError h_new = round(h_orig /self.scale_factor) h_new += (h_new - h_orig) % 2 w_new = round(w_orig /self.scale_factor) w_new += (w_new - w_orig) % 2 image_resized = F.interpolate(image, [h_new, w_new], mode='bilinear') return self.crop_to_output(image_resized) else: raise NotImplementedError class Affine(Transform): """Affine transformation.""" def __init__(self, transform_matrix, output_sz = None, shift = None): super().__init__(output_sz, shift) self.transform_matrix = transform_matrix def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image)))) else: return cv.warpAffine(image, self.transform_matrix, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE) class Rotate(Transform): """Rotate with given angle.""" def __init__(self, angle, output_sz = None, shift = None): super().__init__(output_sz, shift) self.angle = math.pi * angle/180 def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image)))) else: c = (np.expand_dims(np.array(image.shape[:2]),1)-1)/2 R = np.array([[math.cos(self.angle), math.sin(self.angle)], [-math.sin(self.angle), math.cos(self.angle)]]) H =np.concatenate([R, c - R @ c], 1) return cv.warpAffine(image, H, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE) class Blur(Transform): """Blur with given sigma (can be axis dependent).""" def __init__(self, sigma, output_sz = None, shift = None): super().__init__(output_sz, shift) if isinstance(sigma, (float, int)): sigma = (sigma, sigma) self.sigma = sigma self.filter_size = [math.ceil(2*s) for s in self.sigma] x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size] self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)] self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum() self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum() def __call__(self, image, is_mask=False): if isinstance(image, torch.Tensor): sz = image.shape[2:] im1 = F.conv2d(image.view(-1,1,sz[0],sz[1]), self.filter[0], padding=(self.filter_size[0],0)) return self.crop_to_output(F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(1,-1,sz[0],sz[1])) else: raise NotImplementedError class RandomAffine(Transform): """Affine transformation.""" def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0, border_mode='constant', output_sz = None, shift = None): super().__init__(output_sz, shift) self.p_flip = p_flip self.max_rotation = max_rotation self.max_shear = max_shear self.max_scale = max_scale self.max_ar_factor = max_ar_factor self.pad_amount = 0 if border_mode == 'constant': self.border_flag = cv.BORDER_CONSTANT elif border_mode == 'replicate': self.border_flag == cv.BORDER_REPLICATE else: raise Exception self.roll_values = self.roll() def roll(self): do_flip = random.random() < self.p_flip theta = random.uniform(-self.max_rotation, self.max_rotation) shear_x = random.uniform(-self.max_shear, self.max_shear) shear_y = random.uniform(-self.max_shear, self.max_shear) ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor)) scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale)) return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor) def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors): im_h, im_w = image_shape t_mat = np.identity(3) if do_flip: if do_flip: t_mat[0, 0] = -1.0 t_mat[0, 2] = im_w t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0) t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3))) t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w], [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w], [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h], [0.0, 0.0, 1.0]]) t_mat = t_scale @ t_rot @ t_shear @ t_mat t_mat[0, 2] += self.pad_amount t_mat[1, 2] += self.pad_amount t_mat = t_mat[:2, :] return t_mat def __call__(self, image, is_mask=False): input_tensor = torch.is_tensor(image) if input_tensor: image = torch_to_numpy(image) do_flip, theta, shear_values, scale_factors = self.roll_values t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors) output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount) if not is_mask: image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR, borderMode=self.border_flag) else: image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_NEAREST, borderMode=self.border_flag) image_t = image_t.reshape(image.shape) if input_tensor: image_t = numpy_to_torch(image_t) return self.crop_to_output(image_t) ================================================ FILE: external/AR/pytracking/features/color.py ================================================ import torch from pytracking.features.featurebase import FeatureBase class RGB(FeatureBase): """RGB feature normalized to [-0.5, 0.5].""" def dim(self): return 3 def stride(self): return self.pool_stride def extract(self, im: torch.Tensor): return im/255 - 0.5 class Grayscale(FeatureBase): """Grayscale feature normalized to [-0.5, 0.5].""" def dim(self): return 1 def stride(self): return self.pool_stride def extract(self, im: torch.Tensor): return torch.mean(im/255 - 0.5, 1, keepdim=True) ================================================ FILE: external/AR/pytracking/features/deep.py ================================================ from pytracking.features.featurebase import FeatureBase, MultiFeatureBase import torch import torchvision from pytracking import TensorList from pytracking.evaluation.environment import env_settings import os from pytracking.utils.loading import load_network from ltr.models.backbone.resnet18_vggm import resnet18_vggmconv1 normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) class ResNet18m1(MultiFeatureBase): """ResNet18 feature together with the VGG-m conv1 layer. args: output_layers: List of layers to output. net_path: Relative or absolute net path (default should be fine). use_gpu: Use GPU or CPU. """ def __init__(self, output_layers, net_path=None, use_gpu=True, *args, **kwargs): super(ResNet18m1, self).__init__(*args, **kwargs) for l in output_layers: if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']: raise ValueError('Unknown layer') self.output_layers = list(output_layers) self.use_gpu = use_gpu self.net_path = 'resnet18_vggmconv1/resnet18_vggmconv1.pth' if net_path is None else net_path def initialize(self): if isinstance(self.pool_stride, int) and self.pool_stride == 1: self.pool_stride = [1] * len(self.output_layers) self.layer_stride = {'vggconv1': 2, 'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'fc': None} self.layer_dim = {'vggconv1': 96, 'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'fc': None} self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1) self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1) if os.path.isabs(self.net_path): net_path_full = [self.net_path] else: root_paths = env_settings().network_path if isinstance(root_paths, str): root_paths = [root_paths] net_path_full = [os.path.join(root, self.net_path) for root in root_paths] self.net = None for net_path in net_path_full: try: self.net = resnet18_vggmconv1(self.output_layers, path=net_path) break except: pass if self.net is None: raise Exception('Did not find network file {}'.format(self.net_path)) if self.use_gpu: self.net.cuda() self.net.eval() def dim(self): return TensorList([self.layer_dim[l] for l in self.output_layers]) def stride(self): return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)]) def extract(self, im: torch.Tensor): im = im / 255 im -= self.mean im /= self.std if self.use_gpu: im = im.cuda() with torch.no_grad(): return TensorList(self.net(im).values()) class ATOMResNet18(MultiFeatureBase): """ResNet18 feature with the ATOM IoUNet. args: output_layers: List of layers to output. net_path: Relative or absolute net path (default should be fine). use_gpu: Use GPU or CPU. """ def __init__(self, output_layers=('layer3',), net_path='atom_iou', use_gpu=True, *args, **kwargs): super().__init__(*args, **kwargs) self.output_layers = list(output_layers) self.use_gpu = use_gpu self.net_path = net_path def initialize(self): self.net = load_network(self.net_path) if self.use_gpu: self.net.cuda() self.net.eval() self.iou_predictor = self.net.bb_regressor self.layer_stride = {'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'classification': 16, 'fc': None} self.layer_dim = {'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'classification': 256, 'fc': None} self.iounet_feature_layers = self.net.bb_regressor_layer if isinstance(self.pool_stride, int) and self.pool_stride == 1: self.pool_stride = [1] * len(self.output_layers) self.feature_layers = sorted(list(set(self.output_layers + self.iounet_feature_layers))) self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1) self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1) def dim(self): return TensorList([self.layer_dim[l] for l in self.output_layers]) def stride(self): return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)]) def extract(self, im: torch.Tensor): im = im / 255 im -= self.mean im /= self.std if self.use_gpu: im = im.cuda() with torch.no_grad(): output_features = self.net.extract_features(im, self.feature_layers) # Store the raw resnet features which are input to iounet self.iounet_backbone_features = TensorList( [output_features[layer].clone() for layer in self.iounet_feature_layers]) # Store the processed features from iounet, just before pooling with torch.no_grad(): self.iounet_features = TensorList(self.iou_predictor.get_iou_feat(self.iounet_backbone_features)) return TensorList([output_features[layer] for layer in self.output_layers]) ================================================ FILE: external/AR/pytracking/features/extractor.py ================================================ import torch from pytracking.features.preprocessing import sample_patch from pytracking import TensorList class ExtractorBase: """Base feature extractor class. args: features: List of features. """ def __init__(self, features): self.features = features def initialize(self): for f in self.features: f.initialize() class SingleResolutionExtractor(ExtractorBase): """Single resolution feature extractor. args: features: List of features. """ def __init__(self, features): super().__init__(features) self.feature_stride = self.features[0].stride() if isinstance(self.feature_stride, (list, TensorList)): self.feature_stride = self.feature_stride[0] def stride(self): return self.feature_stride def size(self, input_sz): return input_sz // self.stride() def extract(self, im, pos, scales, image_sz): if isinstance(scales, (int, float)): scales = [scales] # Get image patches im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales]) # Compute features feature_map = torch.cat(TensorList([f.get_feature(im_patches) for f in self.features]).unroll(), dim=1) return feature_map class MultiResolutionExtractor(ExtractorBase): """Multi-resolution feature extractor. args: features: List of features. """ def __init__(self, features, patch_mode='replicate', max_scale_change=None): super().__init__(features) self.patch_mode = patch_mode self.max_scale_change = max_scale_change self.is_color = None def stride(self): return torch.Tensor(TensorList([f.stride() for f in self.features if self._return_feature(f)]).unroll().list()) def size(self, input_sz): return TensorList([f.size(input_sz) for f in self.features if self._return_feature(f)]).unroll() def dim(self): return TensorList([f.dim() for f in self.features if self._return_feature(f)]).unroll() def get_fparams(self, name: str = None): if name is None: return [f.fparams for f in self.features if self._return_feature(f)] return TensorList([getattr(f.fparams, name) for f in self.features if self._return_feature(f)]).unroll() def get_attribute(self, name: str, ignore_missing: bool = False): if ignore_missing: return TensorList([getattr(f, name) for f in self.features if self._return_feature(f) and hasattr(f, name)]) else: return TensorList([getattr(f, name, None) for f in self.features if self._return_feature(f)]) def get_unique_attribute(self, name: str): feat = None for f in self.features: if self._return_feature(f) and hasattr(f, name): if feat is not None: raise RuntimeError('The attribute was not unique.') feat = f if feat is None: raise RuntimeError('The attribute did not exist') return getattr(feat, name) def _return_feature(self, f): return self.is_color is None or self.is_color and f.use_for_color or not self.is_color and f.use_for_gray def set_is_color(self, is_color: bool): self.is_color = is_color def extract(self, im, pos, scales, image_sz, return_patches=False): """Extract features. args: im: Image. pos: Center position for extraction. scales: Image scales to extract features from. image_sz: Size to resize the image samples to before extraction. """ if isinstance(scales, (int, float)): scales = [scales] # Get image patches patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=self.patch_mode, max_scale_change=self.max_scale_change) for s in scales)) im_patches = torch.cat(list(patch_iter)) patch_coords = torch.cat(list(coord_iter)) # im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales]) # Compute features feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll() if return_patches: return feature_map, patch_coords, im_patches else: return feature_map, patch_coords def extract_transformed(self, im, pos, scale, image_sz, transforms): """Extract features from a set of transformed image samples. args: im: Image. pos: Center position for extraction. scale: Image scale to extract features from. image_sz: Size to resize the image samples to before extraction. transforms: A set of image transforms to apply. """ # Get image patche im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz) # Apply transforms im_patches = torch.cat([T(im_patch) for T in transforms]) # Compute features feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll() return feature_map ================================================ FILE: external/AR/pytracking/features/featurebase.py ================================================ import torch import torch.nn.functional as F from pytracking import TensorList class FeatureBase: """Base feature class. args: fparams: Feature specific parameters. pool_stride: Amount of average pooling to apply do downsample the feature map. output_size: Alternatively, specify the output size of the feature map. Adaptive average pooling will be applied. normalize_power: The power exponent for the normalization. None means no normalization (default). use_for_color: Use this feature for color images. use_for_gray: Use this feature for grayscale images. """ def __init__(self, fparams = None, pool_stride = None, output_size = None, normalize_power = None, use_for_color = True, use_for_gray = True): self.fparams = fparams self.pool_stride = 1 if pool_stride is None else pool_stride self.output_size = output_size self.normalize_power = normalize_power self.use_for_color = use_for_color self.use_for_gray = use_for_gray def initialize(self): pass def dim(self): raise NotImplementedError def stride(self): raise NotImplementedError def size(self, im_sz): if self.output_size is None: return im_sz // self.stride() if isinstance(im_sz, torch.Tensor): return torch.Tensor([self.output_size[0], self.output_size[1]]) return self.output_size def extract(self, im): """Performs feature extraction.""" raise NotImplementedError def get_feature(self, im: torch.Tensor): """Get the feature. Generally, call this function. args: im: image patch as a torch.Tensor. """ # Return empty tensor if it should not be used is_color = im.shape[1] == 3 if is_color and not self.use_for_color or not is_color and not self.use_for_gray: return torch.Tensor([]) # Extract feature feat = self.extract(im) # Pool/downsample if self.output_size is not None: feat = F.adaptive_avg_pool2d(feat, self.output_size) elif self.pool_stride != 1: feat = F.avg_pool2d(feat, self.pool_stride, self.pool_stride) # Normalize if self.normalize_power is not None: feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) / (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power) return feat class MultiFeatureBase(FeatureBase): """Base class for features potentially having multiple feature blocks as output (like CNNs). See FeatureBase for more info. """ def size(self, im_sz): if self.output_size is None: return TensorList([im_sz // s for s in self.stride()]) if isinstance(im_sz, torch.Tensor): return TensorList([im_sz // s if sz is None else torch.Tensor([sz[0], sz[1]]) for sz, s in zip(self.output_size, self.stride())]) def get_feature(self, im: torch.Tensor): """Get the feature. Generally, call this function. args: im: image patch as a torch.Tensor. """ # Return empty tensor if it should not be used is_color = im.shape[1] == 3 if is_color and not self.use_for_color or not is_color and not self.use_for_gray: return torch.Tensor([]) feat_list = self.extract(im) output_sz = [None]*len(feat_list) if self.output_size is None else self.output_size # Pool/downsample for i, (sz, s) in enumerate(zip(output_sz, self.pool_stride)): if sz is not None: feat_list[i] = F.adaptive_avg_pool2d(feat_list[i], sz) elif s != 1: feat_list[i] = F.avg_pool2d(feat_list[i], s, s) # Normalize if self.normalize_power is not None: for feat in feat_list: feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) / (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power) return feat_list ================================================ FILE: external/AR/pytracking/features/net_wrappers.py ================================================ import torch from pytracking.utils.loading import load_network class NetWrapper: """Used for wrapping networks in pytracking. Network modules and functions can be accessed directly as if they were members of this class.""" _rec_iter=0 def __init__(self, net_path, use_gpu=True, initialize=False, **kwargs): self.net_path = net_path self.use_gpu = use_gpu self.net = None self.net_kwargs = kwargs if initialize: self.initialize() def __getattr__(self, name): if self._rec_iter > 0: self._rec_iter = 0 return None self._rec_iter += 1 try: ret_val = getattr(self.net, name) except Exception as e: self._rec_iter = 0 raise e self._rec_iter = 0 return ret_val def load_network(self): self.net = load_network(self.net_path, **self.net_kwargs) if self.use_gpu: self.cuda() self.eval() def initialize(self): self.load_network() class NetWithBackbone(NetWrapper): """Wraps a network with a common backbone. Assumes the network have a 'extract_backbone_features(image)' function.""" def __init__(self, net_path, use_gpu=True, initialize=False, image_format='rgb', mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), **kwargs): super().__init__(net_path, use_gpu, initialize, **kwargs) self.image_format = image_format self._mean = torch.Tensor(mean).view(1, -1, 1, 1) self._std = torch.Tensor(std).view(1, -1, 1, 1) def initialize(self, image_format='rgb', mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): super().initialize() def preprocess_image(self, im: torch.Tensor): """Normalize the image with the mean and standard deviation used by the network.""" if self.image_format in ['rgb', 'bgr']: im = im/255 if self.image_format in ['bgr', 'bgr255']: im = im[:, [2, 1, 0], :, :] im -= self._mean im /= self._std if self.use_gpu: im = im.cuda() return im def extract_backbone(self, im: torch.Tensor): """Extract backbone features from the network. Expects a float tensor image with pixel range [0, 255].""" im = self.preprocess_image(im) return self.net.extract_backbone_features(im) ================================================ FILE: external/AR/pytracking/features/preprocessing.py ================================================ import torch import torch.nn.functional as F import numpy as np def numpy_to_torch(a: np.ndarray): return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0) def torch_to_numpy(a: torch.Tensor): return a.squeeze(0).permute(1,2,0).numpy() def sample_patch_transformed(im, pos, scale, image_sz, transforms, is_mask=False): """Extract transformed image samples. args: im: Image. pos: Center position for extraction. scale: Image scale to extract features from. image_sz: Size to resize the image samples to before extraction. transforms: A set of image transforms to apply. """ # Get image patche im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz, is_mask=is_mask) # Apply transforms im_patches = torch.cat([T(im_patch, is_mask=is_mask) for T in transforms]) return im_patches def sample_patch_multiscale(im, pos, scales, image_sz, mode: str='replicate', max_scale_change=None): """Extract image patches at multiple scales. args: im: Image. pos: Center position for extraction. scales: Image scales to extract image patches from. image_sz: Size to resize the image samples to mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major' max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode """ if isinstance(scales, (int, float)): scales = [scales] # Get image patches patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=mode, max_scale_change=max_scale_change) for s in scales)) im_patches = torch.cat(list(patch_iter)) patch_coords = torch.cat(list(coord_iter)) return im_patches, patch_coords def sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None, mode: str = 'replicate', max_scale_change=None, is_mask=False): """Sample an image patch. args: im: Image pos: center position of crop sample_sz: size to crop output_sz: size to resize to mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major' max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode """ # if mode not in ['replicate', 'inside']: # raise ValueError('Unknown border mode \'{}\'.'.format(mode)) # copy and convert posl = pos.long().clone() pad_mode = mode # Get new sample size if forced inside the image if mode == 'inside' or mode == 'inside_major': pad_mode = 'replicate' im_sz = torch.Tensor([im.shape[2], im.shape[3]]) shrink_factor = (sample_sz.float() / im_sz) if mode == 'inside': shrink_factor = shrink_factor.max() elif mode == 'inside_major': shrink_factor = shrink_factor.min() shrink_factor.clamp_(min=1, max=max_scale_change) sample_sz = (sample_sz.float() / shrink_factor).long() # Compute pre-downsampling factor if output_sz is not None: resize_factor = torch.min(sample_sz.float() / output_sz.float()).item() df = int(max(int(resize_factor - 0.1), 1)) else: df = int(1) sz = sample_sz.float() / df # new size # Do downsampling if df > 1: os = posl % df # offset posl = (posl - os) / df # new position im2 = im[..., os[0].item()::df, os[1].item()::df] # downsample else: im2 = im # compute size to crop szl = torch.max(sz.round(), torch.Tensor([2])).long() # Extract top and bottom coordinates tl = posl - (szl - 1)/2 br = posl + szl/2 + 1 # Shift the crop to inside if mode == 'inside' or mode == 'inside_major': im2_sz = torch.LongTensor([im2.shape[2], im2.shape[3]]) shift = (-tl).clamp(0) - (br - im2_sz).clamp(0) tl += shift br += shift outside = ((-tl).clamp(0) + (br - im2_sz).clamp(0)) // 2 shift = (-tl - outside) * (outside > 0).long() tl += shift br += shift # Get image patch # im_patch = im2[...,tl[0].item():br[0].item(),tl[1].item():br[1].item()] # Get image patch if not is_mask: im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]), pad_mode) else: im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2])) # Get image coordinates patch_coord = df * torch.cat((tl, br)).view(1,4) if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]): return im_patch.clone(), patch_coord # Resample if not is_mask: im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear') else: im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='nearest') return im_patch, patch_coord ================================================ FILE: external/AR/pytracking/features/util.py ================================================ import torch from pytracking.features.featurebase import FeatureBase class Concatenate(FeatureBase): """A feature that concatenates other features. args: features: List of features to concatenate. """ def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True): super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray) self.features = features self.input_stride = self.features[0].stride() for feat in self.features: if self.input_stride != feat.stride(): raise ValueError('Strides for the features must be the same for a bultiresolution feature.') def dim(self): return sum([f.dim() for f in self.features]) def stride(self): return self.pool_stride * self.input_stride def extract(self, im: torch.Tensor): return torch.cat([f.get_feature(im) for f in self.features], 1) ================================================ FILE: external/AR/pytracking/libs/__init__.py ================================================ from .tensorlist import TensorList from .tensordict import TensorDict ================================================ FILE: external/AR/pytracking/libs/complex.py ================================================ import torch from pytracking.libs.tensorlist import tensor_operation def is_complex(a: torch.Tensor) -> bool: return a.dim() >= 4 and a.shape[-1] == 2 def is_real(a: torch.Tensor) -> bool: return not is_complex(a) @tensor_operation def mult(a: torch.Tensor, b: torch.Tensor): """Pointwise complex multiplication of complex tensors.""" if is_real(a): if a.dim() >= b.dim(): raise ValueError('Incorrect dimensions.') # a is real return mult_real_cplx(a, b) if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') # b is real return mult_real_cplx(b, a) # Both complex c = mult_real_cplx(a[..., 0], b) c[..., 0] -= a[..., 1] * b[..., 1] c[..., 1] += a[..., 1] * b[..., 0] return c @tensor_operation def mult_conj(a: torch.Tensor, b: torch.Tensor): """Pointwise complex multiplication of complex tensors, with conjugate on b: a*conj(b).""" if is_real(a): if a.dim() >= b.dim(): raise ValueError('Incorrect dimensions.') # a is real return mult_real_cplx(a, conj(b)) if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') # b is real return mult_real_cplx(b, a) # Both complex c = mult_real_cplx(b[...,0], a) c[..., 0] += a[..., 1] * b[..., 1] c[..., 1] -= a[..., 0] * b[..., 1] return c @tensor_operation def mult_real_cplx(a: torch.Tensor, b: torch.Tensor): """Pointwise complex multiplication of real tensor a with complex tensor b.""" if is_real(b): raise ValueError('Last dimension must have length 2.') return a.unsqueeze(-1) * b @tensor_operation def div(a: torch.Tensor, b: torch.Tensor): """Pointwise complex division of complex tensors.""" if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') # b is real return div_cplx_real(a, b) return div_cplx_real(mult_conj(a, b), abs_sqr(b)) @tensor_operation def div_cplx_real(a: torch.Tensor, b: torch.Tensor): """Pointwise complex division of complex tensor a with real tensor b.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return a / b.unsqueeze(-1) @tensor_operation def abs_sqr(a: torch.Tensor): """Squared absolute value.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return torch.sum(a*a, -1) @tensor_operation def abs(a: torch.Tensor): """Absolute value.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return torch.sqrt(abs_sqr(a)) @tensor_operation def conj(a: torch.Tensor): """Complex conjugate.""" if is_real(a): raise ValueError('Last dimension must have length 2.') # return a * torch.Tensor([1, -1], device=a.device) return complex(a[...,0], -a[...,1]) @tensor_operation def real(a: torch.Tensor): """Real part.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return a[..., 0] @tensor_operation def imag(a: torch.Tensor): """Imaginary part.""" if is_real(a): raise ValueError('Last dimension must have length 2.') return a[..., 1] @tensor_operation def complex(a: torch.Tensor, b: torch.Tensor = None): """Create complex tensor from real and imaginary part.""" if b is None: b = a.new_zeros(a.shape) elif a is None: a = b.new_zeros(b.shape) return torch.cat((a.unsqueeze(-1), b.unsqueeze(-1)), -1) @tensor_operation def mtimes(a: torch.Tensor, b: torch.Tensor, conj_a=False, conj_b=False): """Complex matrix multiplication of complex tensors. The dimensions (-3, -2) are matrix multiplied. -1 is the complex dimension.""" if is_real(a): if a.dim() >= b.dim(): raise ValueError('Incorrect dimensions.') return mtimes_real_complex(a, b, conj_b=conj_b) if is_real(b): if b.dim() >= a.dim(): raise ValueError('Incorrect dimensions.') return mtimes_complex_real(a, b, conj_a=conj_a) if not conj_a and not conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]), torch.matmul(a[..., 0], b[..., 1]) + torch.matmul(a[..., 1], b[..., 0])) if conj_a and not conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]), torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0])) if not conj_a and conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]), torch.matmul(a[..., 1], b[..., 0]) - torch.matmul(a[..., 0], b[..., 1])) if conj_a and conj_b: return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]), -torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0])) @tensor_operation def mtimes_real_complex(a: torch.Tensor, b: torch.Tensor, conj_b=False): if is_real(b): raise ValueError('Incorrect dimensions.') if not conj_b: return complex(torch.matmul(a, b[..., 0]), torch.matmul(a, b[..., 1])) if conj_b: return complex(torch.matmul(a, b[..., 0]), -torch.matmul(a, b[..., 1])) @tensor_operation def mtimes_complex_real(a: torch.Tensor, b: torch.Tensor, conj_a=False): if is_real(a): raise ValueError('Incorrect dimensions.') if not conj_a: return complex(torch.matmul(a[..., 0], b), torch.matmul(a[..., 1], b)) if conj_a: return complex(torch.matmul(a[..., 0], b), -torch.matmul(a[..., 1], b)) @tensor_operation def exp_imag(a: torch.Tensor): """Complex exponential with imaginary input: e^(i*a)""" a = a.unsqueeze(-1) return torch.cat((torch.cos(a), torch.sin(a)), -1) ================================================ FILE: external/AR/pytracking/libs/dcf.py ================================================ import torch import math from pytracking import fourier from pytracking import complex import torch.nn.functional as F def hann1d(sz: int, centered = True) -> torch.Tensor: """1D cosine window.""" if centered: return 0.5 * (1 - torch.cos((2 * math.pi / (sz + 1)) * torch.arange(1, sz + 1).float())) w = 0.5 * (1 + torch.cos((2 * math.pi / (sz + 2)) * torch.arange(0, sz//2 + 1).float())) return torch.cat([w, w[1:sz-sz//2].flip((0,))]) def hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor: """2D cosine window.""" return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1) def hann2d_clipped(sz: torch.Tensor, effective_sz: torch.Tensor, centered = True) -> torch.Tensor: """1D clipped cosine window.""" # Ensure that the difference is even effective_sz += (effective_sz - sz) % 2 effective_window = hann1d(effective_sz[0].item(), True).reshape(1, 1, -1, 1) * hann1d(effective_sz[1].item(), True).reshape(1, 1, 1, -1) pad = (sz - effective_sz) / 2 window = F.pad(effective_window, (pad[1].item(), pad[1].item(), pad[0].item(), pad[0].item()), 'replicate') if centered: return window else: mid = (sz / 2).int() window_shift_lr = torch.cat((window[:, :, :, mid[1]:], window[:, :, :, :mid[1]]), 3) return torch.cat((window_shift_lr[:, :, mid[0]:, :], window_shift_lr[:, :, :mid[0], :]), 2) def gauss_fourier(sz: int, sigma: float, half: bool = False) -> torch.Tensor: if half: k = torch.arange(0, int(sz/2+1)) else: k = torch.arange(-int((sz-1)/2), int(sz/2+1)) return (math.sqrt(2*math.pi) * sigma / sz) * torch.exp(-2 * (math.pi * sigma * k.float() / sz)**2) def gauss_spatial(sz, sigma, center=0, end_pad=0): k = torch.arange(-(sz-1)/2, (sz+1)/2+end_pad) return torch.exp(-1.0/(2*sigma**2) * (k - center)**2) def label_function(sz: torch.Tensor, sigma: torch.Tensor): return gauss_fourier(sz[0].item(), sigma[0].item()).reshape(1, 1, -1, 1) * gauss_fourier(sz[1].item(), sigma[1].item(), True).reshape(1, 1, 1, -1) def label_function_spatial(sz: torch.Tensor, sigma: torch.Tensor, center: torch.Tensor = torch.zeros(2), end_pad: torch.Tensor = torch.zeros(2)): """The origin is in the middle of the image.""" return gauss_spatial(sz[0].item(), sigma[0].item(), center[0], end_pad[0].item()).reshape(1, 1, -1, 1) * \ gauss_spatial(sz[1].item(), sigma[1].item(), center[1], end_pad[1].item()).reshape(1, 1, 1, -1) def cubic_spline_fourier(f, a): """The continuous Fourier transform of a cubic spline kernel.""" bf = (6*(1 - torch.cos(2 * math.pi * f)) + 3*a*(1 - torch.cos(4 * math.pi * f)) - (6 + 8*a)*math.pi*f*torch.sin(2 * math.pi * f) - 2*a*math.pi*f*torch.sin(4 * math.pi * f)) \ / (4 * math.pi**4 * f**4) bf[f == 0] = 1 return bf def get_interp_fourier(sz: torch.Tensor, method='ideal', bicubic_param=0.5, centering=True, windowing=False, device='cpu'): ky, kx = fourier.get_frequency_coord(sz) if method=='ideal': interp_y = torch.ones(ky.shape) / sz[0] interp_x = torch.ones(kx.shape) / sz[1] elif method=='bicubic': interp_y = cubic_spline_fourier(ky / sz[0], bicubic_param) / sz[0] interp_x = cubic_spline_fourier(kx / sz[1], bicubic_param) / sz[1] else: raise ValueError('Unknown method.') if centering: interp_y = complex.mult(interp_y, complex.exp_imag((-math.pi/sz[0]) * ky)) interp_x = complex.mult(interp_x, complex.exp_imag((-math.pi/sz[1]) * kx)) if windowing: raise NotImplementedError return interp_y.to(device), interp_x.to(device) def interpolate_dft(a: torch.Tensor, interp_fs) -> torch.Tensor: if isinstance(interp_fs, torch.Tensor): return complex.mult(a, interp_fs) if isinstance(interp_fs, (tuple, list)): return complex.mult(complex.mult(a, interp_fs[0]), interp_fs[1]) raise ValueError('"interp_fs" must be tensor or tuple of tensors.') def get_reg_filter(sz: torch.Tensor, target_sz: torch.Tensor, params): """Computes regularization filter in CCOT and ECO.""" if not params.use_reg_window: return params.reg_window_min * torch.ones(1,1,1,1) if getattr(params, 'reg_window_square', False): target_sz = target_sz.prod().sqrt() * torch.ones(2) # Normalization factor reg_scale = 0.5 * target_sz # Construct grid if getattr(params, 'reg_window_centered', True): wrg = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32).view(1,1,-1,1) wcg = torch.arange(-int((sz[1]-1)/2), int(sz[1]/2+1), dtype=torch.float32).view(1,1,1,-1) else: wrg = torch.cat([torch.arange(0, int(sz[0]/2+1), dtype=torch.float32), torch.arange(-int((sz[0] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,-1,1) wcg = torch.cat([torch.arange(0, int(sz[1]/2+1), dtype=torch.float32), torch.arange(-int((sz[1] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,1,-1) # Construct regularization window reg_window = (params.reg_window_edge - params.reg_window_min) * \ (torch.abs(wrg/reg_scale[0])**params.reg_window_power + torch.abs(wcg/reg_scale[1])**params.reg_window_power) + params.reg_window_min # Compute DFT and enforce sparsity reg_window_dft = torch.rfft(reg_window, 2) / sz.prod() reg_window_dft_abs = complex.abs(reg_window_dft) reg_window_dft[reg_window_dft_abs < params.reg_sparsity_threshold * reg_window_dft_abs.max(), :] = 0 # Do the inverse transform to correct for the window minimum reg_window_sparse = torch.irfft(reg_window_dft, 2, signal_sizes=sz.long().tolist()) reg_window_dft[0,0,0,0,0] += params.reg_window_min - sz.prod() * reg_window_sparse.min() reg_window_dft = complex.real(fourier.rfftshift2(reg_window_dft)) # Remove zeros max_inds,_ = reg_window_dft.nonzero().max(dim=0) mid_ind = int((reg_window_dft.shape[2]-1)/2) top = max_inds[-2].item() + 1 bottom = 2*mid_ind - max_inds[-2].item() right = max_inds[-1].item() + 1 reg_window_dft = reg_window_dft[..., bottom:top, :right] if reg_window_dft.shape[-1] > 1: reg_window_dft = torch.cat([reg_window_dft[..., 1:].flip((2, 3)), reg_window_dft], -1) return reg_window_dft def max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor): """Computes maximum and argmax in the last two dimensions.""" max_val_row, argmax_row = torch.max(a, dim=-2) max_val, argmax_col = torch.max(max_val_row, dim=-1) argmax_row = argmax_row.view(argmax_col.numel(),-1)[torch.arange(argmax_col.numel()), argmax_col.view(-1)] argmax_row = argmax_row.reshape(argmax_col.shape) argmax = torch.cat((argmax_row.unsqueeze(-1), argmax_col.unsqueeze(-1)), -1) return max_val, argmax ================================================ FILE: external/AR/pytracking/libs/fourier.py ================================================ import torch import torch.nn.functional as F from pytracking import complex, TensorList from pytracking.libs.tensorlist import tensor_operation @tensor_operation def rfftshift2(a: torch.Tensor): h = a.shape[2] + 2 return torch.cat((a[:,:,(h-1)//2:,...], a[:,:,:h//2,...]), 2) @tensor_operation def irfftshift2(a: torch.Tensor): mid = int((a.shape[2]-1)/2) return torch.cat((a[:,:,mid:,...], a[:,:,:mid,...]), 2) @tensor_operation def cfft2(a): """Do FFT and center the low frequency component. Always produces odd (full) output sizes.""" return rfftshift2(torch.rfft(a, 2)) @tensor_operation def cifft2(a, signal_sizes=None): """Do inverse FFT corresponding to cfft2.""" return torch.irfft(irfftshift2(a), 2, signal_sizes=signal_sizes) @tensor_operation def sample_fs(a: torch.Tensor, grid_sz: torch.Tensor = None, rescale = True): """Samples the Fourier series.""" # Size of the fourier series sz = torch.Tensor([a.shape[2], 2*a.shape[3]-1]).float() # Default grid if grid_sz is None or sz[0] == grid_sz[0] and sz[1] == grid_sz[1]: if rescale: return sz.prod().item() * cifft2(a) return cifft2(a) if sz[0] > grid_sz[0] or sz[1] > grid_sz[1]: raise ValueError("Only grid sizes that are smaller than the Fourier series size are supported.") tot_pad = (grid_sz - sz).tolist() is_even = [s.item() % 2 == 0 for s in sz] # Compute paddings pad_top = int((tot_pad[0]+1)/2) if is_even[0] else int(tot_pad[0]/2) pad_bottom = int(tot_pad[0] - pad_top) pad_right = int((tot_pad[1]+1)/2) if rescale: return grid_sz.prod().item() * cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist()) else: return cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist()) def get_frequency_coord(sz, add_complex_dim = False, device='cpu'): """Frequency coordinates.""" ky = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32, device=device).view(1,1,-1,1) kx = torch.arange(0, int(sz[1]/2+1), dtype=torch.float32, device=device).view(1,1,1,-1) if add_complex_dim: ky = ky.unsqueeze(-1) kx = kx.unsqueeze(-1) return ky, kx @tensor_operation def shift_fs(a: torch.Tensor, shift: torch.Tensor): """Shift a sample a in the Fourier domain. Params: a : The fourier coefficiens of the sample. shift : The shift to be performed normalized to the range [-pi, pi].""" if a.dim() != 5: raise ValueError('a must be the Fourier coefficients, a 5-dimensional tensor.') if shift[0] == 0 and shift[1] == 0: return a ky, kx = get_frequency_coord((a.shape[2], 2*a.shape[3]-1), device=a.device) return complex.mult(complex.mult(a, complex.exp_imag(shift[0].item()*ky)), complex.exp_imag(shift[1].item()*kx)) def sum_fs(a: TensorList) -> torch.Tensor: """Sum a list of Fourier series expansions.""" s = None mid = None for e in sorted(a, key=lambda elem: elem.shape[-3], reverse=True): if s is None: s = e.clone() mid = int((s.shape[-3] - 1) / 2) else: # Compute coordinates top = mid - int((e.shape[-3] - 1) / 2) bottom = mid + int(e.shape[-3] / 2) + 1 right = e.shape[-2] # Add the data s[..., top:bottom, :right, :] += e return s def sum_fs12(a: TensorList) -> torch.Tensor: """Sum a list of Fourier series expansions.""" s = None mid = None for e in sorted(a, key=lambda elem: elem.shape[0], reverse=True): if s is None: s = e.clone() mid = int((s.shape[0] - 1) / 2) else: # Compute coordinates top = mid - int((e.shape[0] - 1) / 2) bottom = mid + int(e.shape[0] / 2) + 1 right = e.shape[1] # Add the data s[top:bottom, :right, ...] += e return s @tensor_operation def inner_prod_fs(a: torch.Tensor, b: torch.Tensor): if complex.is_complex(a) and complex.is_complex(b): return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0, :].reshape(-1) @ b[:, :, :, 0, :].reshape(-1) elif complex.is_real(a) and complex.is_real(b): return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0].reshape(-1) @ b[:, :, :, 0].reshape(-1) else: raise NotImplementedError('Not implemented for mixed real and complex.') ================================================ FILE: external/AR/pytracking/libs/operation.py ================================================ import torch import torch.nn.functional as F from pytracking.libs.tensorlist import tensor_operation, TensorList @tensor_operation def conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None): """Standard conv2d. Returns the input if weight=None.""" if weight is None: return input ind = None if mode is not None: if padding != 0: raise ValueError('Cannot input both padding and mode.') if mode == 'same': padding = (weight.shape[2]//2, weight.shape[3]//2) if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0: ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None), slice(-1) if weight.shape[3] % 2 == 0 else slice(None)) elif mode == 'valid': padding = (0, 0) elif mode == 'full': padding = (weight.shape[2]-1, weight.shape[3]-1) else: raise ValueError('Unknown mode for padding.') out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) if ind is None: return out return out[:,:,ind[0],ind[1]] @tensor_operation def conv1x1(input: torch.Tensor, weight: torch.Tensor): """Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv.""" if weight is None: return input return torch.conv2d(input, weight) ================================================ FILE: external/AR/pytracking/libs/optimization.py ================================================ import torch import torch.autograd import math from pytracking.libs import TensorList from pytracking.utils.plotting import plot_graph from ltr.models.layers.activation import softmax_reg class L2Problem: """Base class for representing an L2 optimization problem.""" def __call__(self, x: TensorList) -> TensorList: """Shall compute the residuals of the problem.""" raise NotImplementedError def ip_input(self, a, b): """Inner product of the input space.""" return sum(a.view(-1) @ b.view(-1)) def ip_output(self, a, b): """Inner product of the output space.""" return sum(a.view(-1) @ b.view(-1)) def M1(self, x): """M1 preconditioner.""" return x def M2(self, x): """M2 preconditioner.""" return x class MinimizationProblem: """General minimization problem.""" def __call__(self, x: TensorList) -> TensorList: """Shall compute the loss.""" raise NotImplementedError def ip_input(self, a, b): """Inner product of the input space.""" return sum(a.view(-1) @ b.view(-1)) def M1(self, x): return x def M2(self, x): return x class ConjugateGradientBase: """Conjugate Gradient optimizer base class. Implements the CG loop.""" def __init__(self, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False): self.fletcher_reeves = fletcher_reeves self.standard_alpha = standard_alpha self.direction_forget_factor = direction_forget_factor self.debug = debug # State self.p = None self.rho = torch.ones(1) self.r_prev = None # Right hand side self.b = None def reset_state(self): self.p = None self.rho = torch.ones(1) self.r_prev = None def run_CG(self, num_iter, x=None, eps=0.0): """Main conjugate gradient method. args: num_iter: Number of iterations. x: Initial guess. Assumed zero if None. eps: Stop if the residual norm gets smaller than this. """ # Apply forgetting factor if self.direction_forget_factor == 0: self.reset_state() elif self.p is not None: self.rho /= self.direction_forget_factor if x is None: r = self.b.clone() else: r = self.b - self.A(x) # Norms of residuals etc for debugging resvec = None if self.debug: normr = self.residual_norm(r) resvec = torch.zeros(num_iter+1) resvec[0] = normr # Loop over iterations for ii in range(num_iter): # Preconditioners y = self.M1(r) z = self.M2(y) rho1 = self.rho self.rho = self.ip(r, z) if self.check_zero(self.rho): if self.debug: print('Stopped CG since rho = 0') if resvec is not None: resvec = resvec[:ii+1] return x, resvec if self.p is None: self.p = z.clone() else: if self.fletcher_reeves: beta = self.rho / rho1 else: rho2 = self.ip(self.r_prev, z) beta = (self.rho - rho2) / rho1 beta = beta.clamp(0) self.p = z + self.p * beta q = self.A(self.p) pq = self.ip(self.p, q) if self.standard_alpha: alpha = self.rho / pq else: alpha = self.ip(self.p, r) / pq # Save old r for PR formula if not self.fletcher_reeves: self.r_prev = r.clone() # Form new iterate if x is None: x = self.p * alpha else: x += self.p * alpha if ii < num_iter - 1 or self.debug: r -= q * alpha if eps > 0.0 or self.debug: normr = self.residual_norm(r) if self.debug: self.evaluate_CG_iteration(x) resvec[ii+1] = normr if eps > 0 and normr <= eps: if self.debug: print('Stopped CG since norm smaller than eps') break if resvec is not None: resvec = resvec[:ii+2] return x, resvec def A(self, x): # Implements the left hand operation raise NotImplementedError def ip(self, a, b): # Implements the inner product return a.view(-1) @ b.view(-1) def residual_norm(self, r): res = self.ip(r, r).sum() if isinstance(res, (TensorList, list, tuple)): res = sum(res) return res.sqrt() def check_zero(self, s, eps = 0.0): ss = s.abs() <= eps if isinstance(ss, (TensorList, list, tuple)): ss = sum(ss) return ss.item() > 0 def M1(self, x): # M1 preconditioner return x def M2(self, x): # M2 preconditioner return x def evaluate_CG_iteration(self, x): pass class ConjugateGradient(ConjugateGradientBase): """Conjugate Gradient optimizer, performing single linearization of the residuals in the start.""" def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False, plotting = False, visdom=None): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or plotting) self.problem = problem self.x = variable self.plotting = plotting self.fig_num = (10,11) self.visdom = visdom self.cg_eps = cg_eps self.f0 = None self.g = None self.dfdxt_g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None self.dfdxt_g = None def run(self, num_cg_iter): """Run the oprimizer with the provided number of iterations.""" if num_cg_iter == 0: return lossvec = None if self.debug: lossvec = torch.zeros(2) self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) # Create copy with graph detached self.g = self.f0.detach() if self.debug: lossvec[0] = self.problem.ip_output(self.g, self.g) self.g.requires_grad_(True) # Get df/dx^t @ f0 self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True)) # Get the right hand side self.b = - self.dfdxt_g.detach() # Run CG delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.f0 = self.problem(self.x) lossvec[-1] = self.problem.ip_output(self.f0, self.f0) self.residuals = torch.cat((self.residuals, res)) self.losses = torch.cat((self.losses, lossvec)) if self.visdom is not None: self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') self.x.detach_() self.clear_temp() def A(self, x): dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True) return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True)) def ip(self, a, b): return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) class GaussNewtonCG(ConjugateGradientBase): """Gauss-Newton with Conjugate Gradient optimizer.""" def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False, analyze = False, plotting = False, visdom=None): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting) self.problem = problem self.x = variable self.analyze_convergence = analyze self.plotting = plotting self.fig_num = (10,11,12) self.visdom = visdom self.cg_eps = cg_eps self.f0 = None self.g = None self.dfdxt_g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None self.dfdxt_g = None def run_GN(self, *args, **kwargs): return self.run(*args, **kwargs) def run(self, num_cg_iter, num_gn_iter=None): """Run the optimizer. args: num_cg_iter: Number of CG iterations per GN iter. If list, then each entry specifies number of CG iterations and number of GN iterations is given by the length of the list. num_gn_iter: Number of GN iterations. Shall only be given if num_cg_iter is an integer. """ if isinstance(num_cg_iter, int): if num_gn_iter is None: raise ValueError('Must specify number of GN iter if CG iter is constant') num_cg_iter = [num_cg_iter]*num_gn_iter num_gn_iter = len(num_cg_iter) if num_gn_iter == 0: return if self.analyze_convergence: self.evaluate_CG_iteration(0) # Outer loop for running the GN iterations. for cg_iter in num_cg_iter: self.run_GN_iter(cg_iter) if self.debug: if not self.analyze_convergence: self.f0 = self.problem(self.x) loss = self.problem.ip_output(self.f0, self.f0) self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) if self.visdom is not None: self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals') if self.analyze_convergence: self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') if self.analyze_convergence: plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude') self.x.detach_() self.clear_temp() return self.losses, self.residuals def run_GN_iter(self, num_cg_iter): """Runs a single GN iteration.""" self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) # Create copy with graph detached self.g = self.f0.detach() if self.debug and not self.analyze_convergence: loss = self.problem.ip_output(self.g, self.g) self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) self.g.requires_grad_(True) # Get df/dx^t @ f0 self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True)) # Get the right hand side self.b = - self.dfdxt_g.detach() # Run CG delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.residuals = torch.cat((self.residuals, res)) def A(self, x): dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True) return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True)) def ip(self, a, b): return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) def evaluate_CG_iteration(self, delta_x): if self.analyze_convergence: x = (self.x + delta_x).detach() x.requires_grad_(True) # compute loss and gradient f = self.problem(x) loss = self.problem.ip_output(f, f) grad = TensorList(torch.autograd.grad(loss, x)) # store in the vectors self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1))) class GradientDescentL2: """Gradient descent with momentum for L2 problems.""" def __init__(self, problem: L2Problem, variable: TensorList, step_length: float, momentum: float = 0.0, debug = False, plotting = False, visdom=None): self.problem = problem self.x = variable self.step_legnth = step_length self.momentum = momentum self.debug = debug or plotting self.plotting = plotting self.fig_num = (10,11) self.visdom = visdom self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) self.residuals = None self.clear_temp() def clear_temp(self): self.f0 = None self.dir = None def run(self, num_iter, dummy = None): if num_iter == 0: return lossvec = None if self.debug: lossvec = torch.zeros(num_iter+1) grad_mags = torch.zeros(num_iter+1) for i in range(num_iter): self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) # Compute loss loss = self.problem.ip_output(self.f0, self.f0) # Compute grad grad = TensorList(torch.autograd.grad(loss, self.x)) # Update direction if self.dir is None: self.dir = grad else: self.dir = grad + self.momentum * self.dir self.x.detach_() self.x -= self.step_legnth * self.dir if self.debug: lossvec[i] = loss.item() grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item() if self.debug: self.x.requires_grad_(True) self.f0 = self.problem(self.x) loss = self.problem.ip_output(self.f0, self.f0) grad = TensorList(torch.autograd.grad(loss, self.x)) lossvec[-1] = self.problem.ip_output(self.f0, self.f0).item() grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item() self.losses = torch.cat((self.losses, lossvec)) self.gradient_mags = torch.cat((self.gradient_mags, grad_mags)) if self.visdom is not None: self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude') self.x.detach_() self.clear_temp() class NewtonCG(ConjugateGradientBase): """Newton with Conjugate Gradient. Handels general minimization problems.""" def __init__(self, problem: MinimizationProblem, variable: TensorList, init_hessian_reg = 0.0, hessian_reg_factor = 1.0, cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False, analyze = False, plotting = False, fig_num=(10, 11, 12)): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting) self.problem = problem self.x = variable self.analyze_convergence = analyze self.plotting = plotting self.fig_num = fig_num self.hessian_reg = init_hessian_reg self.hessian_reg_factor = hessian_reg_factor self.cg_eps = cg_eps self.f0 = None self.g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None def run(self, num_cg_iter, num_newton_iter=None): if isinstance(num_cg_iter, int): if num_cg_iter == 0: return if num_newton_iter is None: num_newton_iter = 1 num_cg_iter = [num_cg_iter] * num_newton_iter num_newton_iter = len(num_cg_iter) if num_newton_iter == 0: return if self.analyze_convergence: self.evaluate_CG_iteration(0) for cg_iter in num_cg_iter: self.run_newton_iter(cg_iter) self.hessian_reg *= self.hessian_reg_factor if self.debug: if not self.analyze_convergence: loss = self.problem(self.x) self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) if self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') if self.analyze_convergence: plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude') self.x.detach_() self.clear_temp() return self.losses, self.residuals def run_newton_iter(self, num_cg_iter): self.x.requires_grad_(True) # Evaluate function at current estimate self.f0 = self.problem(self.x) if self.debug and not self.analyze_convergence: self.losses = torch.cat((self.losses, self.f0.detach().cpu().view(-1))) # Gradient of loss self.g = TensorList(torch.autograd.grad(self.f0, self.x, create_graph=True)) # Get the right hand side self.b = - self.g.detach() # Run CG delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.residuals = torch.cat((self.residuals, res)) def A(self, x): return TensorList(torch.autograd.grad(self.g, self.x, x, retain_graph=True)) + self.hessian_reg * x def ip(self, a, b): # Implements the inner product return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) def evaluate_CG_iteration(self, delta_x): if self.analyze_convergence: x = (self.x + delta_x).detach() x.requires_grad_(True) # compute loss and gradient loss = self.problem(x) grad = TensorList(torch.autograd.grad(loss, x)) # store in the vectors self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1))) self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1))) class GradientDescent: """Gradient descent for general minimization problems.""" def __init__(self, problem: MinimizationProblem, variable: TensorList, step_length: float, momentum: float = 0.0, debug = False, plotting = False, fig_num=(10,11)): self.problem = problem self.x = variable self.step_legnth = step_length self.momentum = momentum self.debug = debug or plotting self.plotting = plotting self.fig_num = fig_num self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) self.residuals = None self.clear_temp() def clear_temp(self): self.dir = None def run(self, num_iter, dummy = None): if num_iter == 0: return lossvec = None if self.debug: lossvec = torch.zeros(num_iter+1) grad_mags = torch.zeros(num_iter+1) for i in range(num_iter): self.x.requires_grad_(True) # Evaluate function at current estimate loss = self.problem(self.x) # Compute grad grad = TensorList(torch.autograd.grad(loss, self.x)) # Update direction if self.dir is None: self.dir = grad else: self.dir = grad + self.momentum * self.dir self.x.detach_() self.x -= self.step_legnth * self.dir if self.debug: lossvec[i] = loss.item() grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item() if self.debug: self.x.requires_grad_(True) loss = self.problem(self.x) grad = TensorList(torch.autograd.grad(loss, self.x)) lossvec[-1] = loss.item() grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item() self.losses = torch.cat((self.losses, lossvec)) self.gradient_mags = torch.cat((self.gradient_mags, grad_mags)) if self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude') self.x.detach_() self.clear_temp() ================================================ FILE: external/AR/pytracking/libs/tensordict.py ================================================ from collections import OrderedDict import torch import copy class TensorDict(OrderedDict): """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" def concat(self, other): """Concatenates two dicts without copying internal data.""" return TensorDict(self, **other) def copy(self): return TensorDict(super(TensorDict, self).copy()) def __deepcopy__(self, memodict={}): return TensorDict(copy.deepcopy(list(self), memodict)) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) return apply_attr def attribute(self, attr: str, *args): return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) def apply(self, fn, *args, **kwargs): return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) @staticmethod def _iterable(a): return isinstance(a, (TensorDict, list)) ================================================ FILE: external/AR/pytracking/libs/tensorlist.py ================================================ import functools import torch import copy class TensorList(list): """Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.""" def __init__(self, list_of_tensors = None): if list_of_tensors is None: list_of_tensors = list() super(TensorList, self).__init__(list_of_tensors) def __deepcopy__(self, memodict={}): return TensorList(copy.deepcopy(list(self), memodict)) def __getitem__(self, item): if isinstance(item, int): return super(TensorList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return TensorList([super(TensorList, self).__getitem__(i) for i in item]) else: return TensorList(super(TensorList, self).__getitem__(item)) def __add__(self, other): if TensorList._iterable(other): return TensorList([e1 + e2 for e1, e2 in zip(self, other)]) return TensorList([e + other for e in self]) def __radd__(self, other): if TensorList._iterable(other): return TensorList([e2 + e1 for e1, e2 in zip(self, other)]) return TensorList([other + e for e in self]) def __iadd__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] += e2 else: for i in range(len(self)): self[i] += other return self def __sub__(self, other): if TensorList._iterable(other): return TensorList([e1 - e2 for e1, e2 in zip(self, other)]) return TensorList([e - other for e in self]) def __rsub__(self, other): if TensorList._iterable(other): return TensorList([e2 - e1 for e1, e2 in zip(self, other)]) return TensorList([other - e for e in self]) def __isub__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] -= e2 else: for i in range(len(self)): self[i] -= other return self def __mul__(self, other): if TensorList._iterable(other): return TensorList([e1 * e2 for e1, e2 in zip(self, other)]) return TensorList([e * other for e in self]) def __rmul__(self, other): if TensorList._iterable(other): return TensorList([e2 * e1 for e1, e2 in zip(self, other)]) return TensorList([other * e for e in self]) def __imul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] *= e2 else: for i in range(len(self)): self[i] *= other return self def __truediv__(self, other): if TensorList._iterable(other): return TensorList([e1 / e2 for e1, e2 in zip(self, other)]) return TensorList([e / other for e in self]) def __rtruediv__(self, other): if TensorList._iterable(other): return TensorList([e2 / e1 for e1, e2 in zip(self, other)]) return TensorList([other / e for e in self]) def __itruediv__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] /= e2 else: for i in range(len(self)): self[i] /= other return self def __matmul__(self, other): if TensorList._iterable(other): return TensorList([e1 @ e2 for e1, e2 in zip(self, other)]) return TensorList([e @ other for e in self]) def __rmatmul__(self, other): if TensorList._iterable(other): return TensorList([e2 @ e1 for e1, e2 in zip(self, other)]) return TensorList([other @ e for e in self]) def __imatmul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] @= e2 else: for i in range(len(self)): self[i] @= other return self def __mod__(self, other): if TensorList._iterable(other): return TensorList([e1 % e2 for e1, e2 in zip(self, other)]) return TensorList([e % other for e in self]) def __rmod__(self, other): if TensorList._iterable(other): return TensorList([e2 % e1 for e1, e2 in zip(self, other)]) return TensorList([other % e for e in self]) def __pos__(self): return TensorList([+e for e in self]) def __neg__(self): return TensorList([-e for e in self]) def __le__(self, other): if TensorList._iterable(other): return TensorList([e1 <= e2 for e1, e2 in zip(self, other)]) return TensorList([e <= other for e in self]) def __ge__(self, other): if TensorList._iterable(other): return TensorList([e1 >= e2 for e1, e2 in zip(self, other)]) return TensorList([e >= other for e in self]) def concat(self, other): return TensorList(super(TensorList, self).__add__(other)) def copy(self): return TensorList(super(TensorList, self).copy()) def unroll(self): if not any(isinstance(t, TensorList) for t in self): return self new_list = TensorList() for t in self: if isinstance(t, TensorList): new_list.extend(t.unroll()) else: new_list.append(t) return new_list def list(self): return list(self) def attribute(self, attr: str, *args): return TensorList([getattr(e, attr, *args) for e in self]) def apply(self, fn): return TensorList([fn(e) for e in self]) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorList\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorList([getattr(e, name)(*args, **kwargs) for e in self]) return apply_attr @staticmethod def _iterable(a): return isinstance(a, (TensorList, list)) def tensor_operation(op): def islist(a): return isinstance(a, TensorList) @functools.wraps(op) def oplist(*args, **kwargs): if len(args) == 0: raise ValueError('Must be at least one argument without keyword (i.e. operand).') if len(args) == 1: if islist(args[0]): return TensorList([op(a, **kwargs) for a in args[0]]) else: # Multiple operands, assume max two if islist(args[0]) and islist(args[1]): return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])]) if islist(args[0]): return TensorList([op(a, *args[1:], **kwargs) for a in args[0]]) if islist(args[1]): return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]]) # None of the operands are lists return op(*args, **kwargs) return oplist ================================================ FILE: external/AR/pytracking/parameter/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/parameter/atom/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/parameter/atom/atom_gmm_sampl.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = (1e-2, 5e-2) # 1 # Gradient step length in the bounding box refinement 5e-3 2e-2 params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_gmm_sampl', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: external/AR/pytracking/parameter/atom/atom_prob_ml.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = (2e-4, 10e-4) # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_prob_ml', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: external/AR/pytracking/parameter/atom/default.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: external/AR/pytracking/parameter/atom/default_vot.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (14 * 16) ** 2 # Maximum image sample size params.min_image_sample_size = (14 * 16) ** 2 # Minimum image sample size params.search_area_scale = 4 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.0075 # Learning rate deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4, 4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = True # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1 / 3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = -1 # Absolute score threshold to detect target missing params.distractor_threshold = 100 # Relative threshold to find distractors params.hard_negative_threshold = 0.3 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.7 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/atom/multiscale_no_iounet.py ================================================ from pytracking.utils import TrackerParams, FeatureParams, Choice from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18*16)**2 # Maximum image sample size params.min_image_sample_size = (18*16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4,4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = 1.02**torch.arange(-2, 3).float() # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1/3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = False # Use IoU net or not params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params ================================================ FILE: external/AR/pytracking/parameter/dimp/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/parameter/dimp/dimp18.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 18*16 params.search_area_scale = 5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp18.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/dimp18_vot.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 14 * 16 params.search_area_scale = 4 params.feature_size_odd = False # Learning parameters params.sample_memory_size = 250 params.learning_rate = 0.0075 params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 # Net optimization params params.update_classifier = True params.net_opt_iter = 25 params.net_opt_update_iter = 3 params.net_opt_hn_iter = 3 # Detection parameters params.window_output = True # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.45 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp18.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/dimp50.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 18*16 params.search_area_scale = 5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp50.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/dimp50_vot.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 14 * 16 params.search_area_scale = 4 # Learning parameters params.sample_memory_size = 250 params.learning_rate = 0.0075 params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 # Net optimization params params.update_classifier = True params.net_opt_iter = 25 params.net_opt_update_iter = 3 params.net_opt_hn_iter = 3 # Detection parameters params.window_output = True # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.45 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp50.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/dimp50_vot19.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 16 * 16 params.search_area_scale = 4.5 # Learning parameters params.sample_memory_size = 100 params.learning_rate = 0.0075 params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 # Net optimization params params.update_classifier = True params.net_opt_iter = 15 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 2 # Detection parameters params.window_output = True # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [-5, 10, -30, 60], 'blur': [(2, 0.2), (1, 3)], 'relativeshift': [(0.6, 0.6), (-0.6, -0.6)], 'dropout': (3, 0.2)} params.augmentation_expansion_factor = 1.4 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.45 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 3 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.net = NetWithBackbone(net_path='dimp50.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/prdimp18.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 18*16 params.search_area_scale = 5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.score_preprocess = 'softmax' params.target_not_found_threshold = 0.04 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 2.5e-3 # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) params.net = NetWithBackbone(net_path='prdimp18.pth.tar', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/prdimp50.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 22*16 params.search_area_scale = 6 params.border_mode = 'inside_major' params.patch_max_scale_change = 1.5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.score_preprocess = 'softmax' params.target_not_found_threshold = 0.04 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 2.5e-3 # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) params.net = NetWithBackbone(net_path='prdimp50.pth.tar', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/dimp/super_dimp.py ================================================ from pytracking.utils import TrackerParams from pytracking.features.net_wrappers import NetWithBackbone def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = 22*16 params.search_area_scale = 6 params.border_mode = 'inside_major' params.patch_max_scale_change = 1.5 # Learning parameters params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 # Net optimization params params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 # Detection parameters params.window_output = False # Init augmentation parameters params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, -10, 45, -45], 'blur': [(3,1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = 1/3 # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 2.5e-3 # 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) params.net = NetWithBackbone(net_path='super_dimp.pth.tar', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params ================================================ FILE: external/AR/pytracking/parameter/eco/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/parameter/eco/default.py ================================================ from pytracking.utils import TrackerParams, FeatureParams from pytracking.features.extractor import MultiResolutionExtractor from pytracking.features import deep import torch def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True # Feature specific parameters shallow_params = TrackerParams() deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = 250**2 # Maximum image sample size params.min_image_sample_size = 200**2 # Minimum image sample size params.search_area_scale = 4.5 # Scale relative to target size # Conjugate Gradient parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 100 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 10 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = 75 # Forgetting rate of the last conjugate direction params.precond_data_param = 0.3 # Weight of the data term in the preconditioner params.precond_reg_param = 0.15 # Weight of the regularization term in the preconditioner params.precond_proj_param = 35 # Weight of the projection matrix part in the preconditioner # Learning parameters shallow_params.learning_rate = 0.025 deep_params.learning_rate = 0.0075 shallow_params.output_sigma_factor = 1/16 deep_params.output_sigma_factor = 1/4 # Training parameters params.sample_memory_size = 200 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Detection parameters params.scale_factors = 1.02**torch.arange(-2, 3).float() # What scales to use for localization params.score_upsample_factor = 1 # How much Fourier upsampling to use params.score_fusion_strategy = 'weightedsum' # Fusion strategy shallow_params.translation_weight = 0.4 # Weight of this feature deep_params.translation_weight = 1 - shallow_params.translation_weight # Init augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'shift': [(6, 6), (-6, 6), (6, -6), (-6,-6)], 'dropout': (7, 0.2)} # Whether to use augmentation for this feature deep_params.use_augmentation = True shallow_params.use_augmentation = True # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not # params.proj_init_method = 'pca' # Method for initializing the projection matrix params.projection_reg = 5e-8 # Regularization paremeter of the projection matrix shallow_params.compressed_dim = 16 # Dimension output of projection matrix for shallow features deep_params.compressed_dim = 64 # Dimension output of projection matrix for deep features # Interpolation parameters params.interpolation_method = 'bicubic' # The kind of interpolation kernel params.interpolation_bicubic_a = -0.75 # The parameter for the bicubic interpolation kernel params.interpolation_centering = True # Center the kernel at the feature sample params.interpolation_windowing = False # Do additional windowing on the Fourier coefficients of the kernel # Regularization parameters shallow_params.use_reg_window = True # Use spatial regularization or not shallow_params.reg_window_min = 1e-4 # The minimum value of the regularization window shallow_params.reg_window_edge = 10e-3 # The impact of the spatial regularization shallow_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) shallow_params.reg_sparsity_threshold = 0.05 # A relative threshold of which DFT coefficients that should be set to zero deep_params.use_reg_window = True # Use spatial regularization or not deep_params.reg_window_min = 10e-4 # The minimum value of the regularization window deep_params.reg_window_edge = 50e-3 # The impact of the spatial regularization deep_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) deep_params.reg_sparsity_threshold = 0.1 # A relative threshold of which DFT coefficients that should be set to zero fparams = FeatureParams(feature_params=[shallow_params, deep_params]) features = deep.ResNet18m1(output_layers=['vggconv1', 'layer3'], use_gpu=params.use_gpu, fparams=fparams, pool_stride=[2, 1], normalize_power=2) params.features = MultiResolutionExtractor([features]) return params ================================================ FILE: external/AR/pytracking/tracker/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/tracker/atom/__init__.py ================================================ from .atom import ATOM def get_tracker_class(): return ATOM ================================================ FILE: external/AR/pytracking/tracker/atom/atom.py ================================================ from pytracking.tracker.base import BaseTracker import torch import torch.nn.functional as F import torch.nn import math import time from pytracking import dcf, fourier, TensorList, operation from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_tensor from pytracking.libs.optimization import GaussNewtonCG, ConjugateGradient, GradientDescentL2 from .optim import ConvProblem, FactorizedConvProblem from pytracking.features import augmentation import ltr.data.bounding_box_utils as bbutils class ATOM(BaseTracker): multiobj_mode = 'parallel' def initialize_features(self): if not getattr(self, 'features_initialized', False): self.params.features.initialize() self.features_initialized = True def initialize(self, image, info: dict) -> dict: state = info['init_bbox'] # Initialize some stuff self.frame_num = 1 if not self.params.has('device'): self.params.device = 'cuda' if self.params.use_gpu else 'cpu' # Initialize features self.initialize_features() # Check if image is color self.params.features.set_is_color(image.shape[2] == 3) # Get feature specific params self.fparams = self.params.features.get_fparams('feature_params') tic = time.time() # Get position and size self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2]) self.target_sz = torch.Tensor([state[3], state[2]]) # Set search area self.target_scale = 1.0 search_area = torch.prod(self.target_sz * self.params.search_area_scale).item() if search_area > self.params.max_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.max_image_sample_size) elif search_area < self.params.min_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.min_image_sample_size) # Check if IoUNet is used self.use_iou_net = self.params.get('use_iou_net', True) # Target size in base scale self.base_target_sz = self.target_sz / self.target_scale # Use odd square search area and set sizes feat_max_stride = max(self.params.features.stride()) if self.params.get('search_area_shape', 'square') == 'square': self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2) elif self.params.search_area_shape == 'initrect': self.img_sample_sz = torch.round(self.base_target_sz * self.params.search_area_scale) else: raise ValueError('Unknown search area shape') if self.params.feature_size_odd: self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride) else: self.img_sample_sz += feat_max_stride - (self.img_sample_sz + feat_max_stride) % (2 * feat_max_stride) # Set sizes self.img_support_sz = self.img_sample_sz self.feature_sz = self.params.features.size(self.img_sample_sz) self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output self.kernel_size = self.fparams.attribute('kernel_size') self.iou_img_sample_sz = self.img_sample_sz # Optimization options self.params.precond_learning_rate = self.fparams.attribute('learning_rate') if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1: self.params.direction_forget_factor = 0 else: self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate self.output_window = None if self.params.get('window_output', False): if self.params.get('use_clipped_window', False): self.output_window = dcf.hann2d_clipped(self.output_sz.long(), self.output_sz.long()*self.params.effective_search_area / self.params.search_area_scale, centered=False).to(self.params.device) else: self.output_window = dcf.hann2d(self.output_sz.long(), centered=False).to(self.params.device) # Initialize some learning things self.init_learning() # Convert image im = numpy_to_torch(image) self.im = im # For debugging only # Setup scale bounds self.image_sz = torch.Tensor([im.shape[2], im.shape[3]]) self.min_scale_factor = torch.max(10 / self.base_target_sz) self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz) # Extract and transform sample x = self.generate_init_samples(im) # Initialize iounet if self.use_iou_net: self.init_iou_net() # Initialize projection matrix self.init_projection_matrix(x) # Transform to get the training sample train_x = self.preprocess_sample(x) # Generate label function init_y = self.init_label_function(train_x) # Init memory self.init_memory(train_x) # Init optimizer and do initial optimization self.init_optimization(train_x, init_y) self.pos_iounet = self.pos.clone() out = {'time': time.time() - tic} return out def init_optimization(self, train_x, init_y): # Initialize filter filter_init_method = self.params.get('filter_init_method', 'zeros') self.filter = TensorList( [x.new_zeros(1, cdim, sz[0], sz[1]) for x, cdim, sz in zip(train_x, self.compressed_dim, self.kernel_size)]) if filter_init_method == 'zeros': pass elif filter_init_method == 'randn': for f in self.filter: f.normal_(0, 1/f.numel()) else: raise ValueError('Unknown "filter_init_method"') # Get parameters self.params.update_projection_matrix = self.params.get('update_projection_matrix', True) and self.params.use_projection_matrix optimizer = self.params.get('optimizer', 'GaussNewtonCG') # Setup factorized joint optimization if self.params.update_projection_matrix: self.joint_problem = FactorizedConvProblem(self.init_training_samples, init_y, self.filter_reg, self.fparams.attribute('projection_reg'), self.params, self.init_sample_weights, self.projection_activation, self.response_activation) # Variable containing both filter and projection matrix joint_var = self.filter.concat(self.projection_matrix) # Initialize optimizer analyze_convergence = self.params.get('analyze_convergence', False) if optimizer == 'GaussNewtonCG': self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug >= 1), plotting=(self.params.debug >= 3), analyze=analyze_convergence, visdom=self.visdom) elif optimizer == 'GradientDescentL2': self.joint_optimizer = GradientDescentL2(self.joint_problem, joint_var, self.params.optimizer_step_length, self.params.optimizer_momentum, plotting=(self.params.debug >= 3), debug=(self.params.debug >= 1), visdom=self.visdom) # Do joint optimization if isinstance(self.params.init_CG_iter, (list, tuple)): self.joint_optimizer.run(self.params.init_CG_iter) else: self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter) if analyze_convergence: opt_name = 'CG' if self.params.get('CG_optimizer', True) else 'GD' for val_name, values in zip(['loss', 'gradient'], [self.joint_optimizer.losses, self.joint_optimizer.gradient_mags]): val_str = ' '.join(['{:.8e}'.format(v.item()) for v in values]) file_name = '{}_{}.txt'.format(opt_name, val_name) with open(file_name, 'a') as f: f.write(val_str + '\n') raise RuntimeError('Exiting') # Re-project samples with the new projection matrix compressed_samples = self.project_sample(self.init_training_samples, self.projection_matrix) for train_samp, init_samp in zip(self.training_samples, compressed_samples): train_samp[:init_samp.shape[0],...] = init_samp self.hinge_mask = None # Initialize optimizer self.conv_problem = ConvProblem(self.training_samples, self.y, self.filter_reg, self.sample_weights, self.response_activation) if optimizer == 'GaussNewtonCG': self.filter_optimizer = ConjugateGradient(self.conv_problem, self.filter, fletcher_reeves=self.params.fletcher_reeves, direction_forget_factor=self.params.direction_forget_factor, debug=(self.params.debug>=1), plotting=(self.params.debug>=3), visdom=self.visdom) elif optimizer == 'GradientDescentL2': self.filter_optimizer = GradientDescentL2(self.conv_problem, self.filter, self.params.optimizer_step_length, self.params.optimizer_momentum, debug=(self.params.debug >= 1), plotting=(self.params.debug>=3), visdom=self.visdom) # Transfer losses from previous optimization if self.params.update_projection_matrix: self.filter_optimizer.residuals = self.joint_optimizer.residuals self.filter_optimizer.losses = self.joint_optimizer.losses if not self.params.update_projection_matrix: self.filter_optimizer.run(self.params.init_CG_iter) # Post optimization self.filter_optimizer.run(self.params.post_init_CG_iter) # Free memory del self.init_training_samples if self.params.use_projection_matrix: del self.joint_problem, self.joint_optimizer def track(self, image, info: dict = None) -> dict: self.debug_info = {} self.frame_num += 1 self.debug_info['frame_num'] = self.frame_num # Convert image im = numpy_to_torch(image) self.im = im # For debugging only # ------- LOCALIZATION ------- # # Get sample sample_pos = self.pos.round() sample_scales = self.target_scale * self.params.scale_factors test_x = self.extract_processed_sample(im, self.pos, sample_scales, self.img_sample_sz) # Compute scores scores_raw = self.apply_filter(test_x) translation_vec, scale_ind, s, flag = self.localize_target(scores_raw) # Update position and scale if flag != 'not_found': if self.use_iou_net: update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain' if self.params.get('use_classifier', True): self.update_state(sample_pos + translation_vec) self.refine_target_box(sample_pos, sample_scales[scale_ind], scale_ind, update_scale_flag) elif self.params.get('use_classifier', True): self.update_state(sample_pos + translation_vec, sample_scales[scale_ind]) score_map = s[scale_ind, ...] max_score = torch.max(score_map).item() self.debug_info['max_score'] = max_score self.debug_info['flag'] = flag if self.visdom is not None: self.visdom.register(score_map, 'heatmap', 2, 'Score Map') self.visdom.register(self.debug_info, 'info_dict', 1, 'Status') elif self.params.debug >= 2: show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score)) # ------- UPDATE ------- # # Check flags and set learning rate if hard negative update_flag = flag not in ['not_found', 'uncertain'] hard_negative = (flag == 'hard_negative') learning_rate = self.params.hard_negative_learning_rate if hard_negative else None if update_flag: # Get train sample train_x = TensorList([x[scale_ind:scale_ind+1, ...] for x in test_x]) # Create label for sample train_y = self.get_label_function(sample_pos, sample_scales[scale_ind]) # Update memory self.update_memory(train_x, train_y, learning_rate) # Train filter if hard_negative: self.filter_optimizer.run(self.params.hard_negative_CG_iter) elif (self.frame_num-1) % self.params.train_skipping == 0: self.filter_optimizer.run(self.params.CG_iter) # Set the pos of the tracker to iounet pos if self.use_iou_net and flag != 'not_found': self.pos = self.pos_iounet.clone() # Return new state new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]])) out = {'target_bbox': new_state.tolist()} return out def apply_filter(self, sample_x: TensorList): return operation.conv2d(sample_x, self.filter, mode='same') def localize_target(self, scores_raw): # Weighted sum (if multiple features) with interpolation in fourier domain weight = self.fparams.attribute('translation_weight', 1.0) scores_raw = weight * scores_raw sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) * scores_raw.size(3)) for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)): sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (1 - torch.Tensor([ksz[0]%2, ksz[1]%2]) / sz)) scores_fs = fourier.sum_fs(sf_weighted) scores = fourier.sample_fs(scores_fs, self.output_sz) if self.output_window is not None and not self.params.get('perform_hn_without_windowing', False): scores *= self.output_window if self.params.get('advanced_localization', False): return self.localize_advanced(scores) # Get maximum max_score, max_disp = dcf.max2d(scores) _, scale_ind = torch.max(max_score, dim=0) max_disp = max_disp.float().cpu() # Convert to displacements in the base scale disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2 # Compute translation vector and scale change factor translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale translation_vec *= self.params.scale_factors[scale_ind] # Shift the score output for visualization purposes if self.params.debug >= 2: sz = scores.shape[-2:] scores = torch.cat([scores[...,sz[0]//2:,:], scores[...,:sz[0]//2,:]], -2) scores = torch.cat([scores[...,:,sz[1]//2:], scores[...,:,:sz[1]//2]], -1) return translation_vec, scale_ind, scores, None def localize_advanced(self, scores): """Dows the advanced localization with hard negative detection and target not found.""" sz = scores.shape[-2:] if self.output_window is not None and self.params.get('perform_hn_without_windowing', False): scores_orig = scores.clone() scores_orig = torch.cat([scores_orig[..., (sz[0] + 1) // 2:, :], scores_orig[..., :(sz[0] + 1) // 2, :]], -2) scores_orig = torch.cat([scores_orig[..., :, (sz[1] + 1) // 2:], scores_orig[..., :, :(sz[1] + 1) // 2]], -1) scores *= self.output_window # Shift scores back scores = torch.cat([scores[...,(sz[0]+1)//2:,:], scores[...,:(sz[0]+1)//2,:]], -2) scores = torch.cat([scores[...,:,(sz[1]+1)//2:], scores[...,:,:(sz[1]+1)//2]], -1) # Find maximum max_score1, max_disp1 = dcf.max2d(scores) _, scale_ind = torch.max(max_score1, dim=0) max_score1 = max_score1[scale_ind] max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1) target_disp1 = max_disp1 - self.output_sz // 2 translation_vec1 = target_disp1 * (self.img_support_sz / self.output_sz) * self.target_scale if max_score1.item() < self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores, 'not_found' if self.output_window is not None and self.params.get('perform_hn_without_windowing', False): scores = scores_orig # Mask out target neighborhood target_neigh_sz = self.params.target_neighborhood_scale * self.target_sz / self.target_scale tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0) tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0]) tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0) tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1]) scores_masked = scores[scale_ind:scale_ind+1,...].clone() scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0 # Find new maximum max_score2, max_disp2 = dcf.max2d(scores_masked) max_disp2 = max_disp2.float().cpu().view(-1) target_disp2 = max_disp2 - self.output_sz // 2 translation_vec2 = target_disp2 * (self.img_support_sz / self.output_sz) * self.target_scale # Handle the different cases if max_score2 > self.params.distractor_threshold * max_score1: disp_norm1 = torch.sqrt(torch.sum(target_disp1**2)) disp_norm2 = torch.sqrt(torch.sum(target_disp2**2)) disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2 if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold: return translation_vec1, scale_ind, scores, 'hard_negative' if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold: return translation_vec2, scale_ind, scores, 'hard_negative' if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold: return translation_vec1, scale_ind, scores, 'uncertain' # If also the distractor is close, return with highest score return translation_vec1, scale_ind, scores, 'uncertain' if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores, 'hard_negative' return translation_vec1, scale_ind, scores, None def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor): return self.params.features.extract(im, pos, scales, sz)[0] def get_iou_features(self): return self.params.features.get_unique_attribute('iounet_features') def get_iou_backbone_features(self): return self.params.features.get_unique_attribute('iounet_backbone_features') def extract_processed_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> (TensorList, TensorList): x = self.extract_sample(im, pos, scales, sz) return self.preprocess_sample(self.project_sample(x)) def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList): if self.params.get('_feature_window', False): x = x * self.feature_window return x def project_sample(self, x: TensorList, proj_matrix = None): # Apply projection matrix if proj_matrix is None: proj_matrix = self.projection_matrix return operation.conv2d(x, proj_matrix).apply(self.projection_activation) def init_learning(self): # Get window function self.feature_window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz]) # Filter regularization self.filter_reg = self.fparams.attribute('filter_reg') # Activation function after the projection matrix (phi_1 in the paper) projection_activation = self.params.get('projection_activation', 'none') if isinstance(projection_activation, tuple): projection_activation, act_param = projection_activation if projection_activation == 'none': self.projection_activation = lambda x: x elif projection_activation == 'relu': self.projection_activation = torch.nn.ReLU(inplace=True) elif projection_activation == 'elu': self.projection_activation = torch.nn.ELU(inplace=True) elif projection_activation == 'mlu': self.projection_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param) else: raise ValueError('Unknown activation') # Activation function after the output scores (phi_2 in the paper) response_activation = self.params.get('response_activation', 'none') if isinstance(response_activation, tuple): response_activation, act_param = response_activation if response_activation == 'none': self.response_activation = lambda x: x elif response_activation == 'relu': self.response_activation = torch.nn.ReLU(inplace=True) elif response_activation == 'elu': self.response_activation = torch.nn.ELU(inplace=True) elif response_activation == 'mlu': self.response_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param) else: raise ValueError('Unknown activation') def generate_init_samples(self, im: torch.Tensor) -> TensorList: """Generate augmented initial samples.""" # Compute augmentation size aug_expansion_factor = self.params.get('augmentation_expansion_factor', None) aug_expansion_sz = self.img_sample_sz.clone() aug_output_sz = None if aug_expansion_factor is not None and aug_expansion_factor != 1: aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long() aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2 aug_expansion_sz = aug_expansion_sz.float() aug_output_sz = self.img_sample_sz.long().tolist() # Random shift operator get_rand_shift = lambda: None random_shift_factor = self.params.get('random_shift_factor', 0) if random_shift_factor > 0: get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor).long().tolist() # Create transofmations self.transforms = [augmentation.Identity(aug_output_sz)] if 'shift' in self.params.augmentation: self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation['shift']]) if 'relativeshift' in self.params.augmentation: get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist() self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation['relativeshift']]) if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']: self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift())) if 'blur' in self.params.augmentation: self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation['blur']]) if 'scale' in self.params.augmentation: self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation['scale']]) if 'rotate' in self.params.augmentation: self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation['rotate']]) # Generate initial samples init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, aug_expansion_sz, self.transforms) # Remove augmented samples for those that shall not have for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if not use_aug: init_samples[i] = init_samples[i][0:1, ...] # Add dropout samples if 'dropout' in self.params.augmentation: num, prob = self.params.augmentation['dropout'] self.transforms.extend(self.transforms[:1]*num) for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if use_aug: init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)]) return init_samples def init_projection_matrix(self, x): # Set if using projection matrix self.params.use_projection_matrix = self.params.get('use_projection_matrix', True) if self.params.use_projection_matrix: self.compressed_dim = self.fparams.attribute('compressed_dim', None) proj_init_method = self.params.get('proj_init_method', 'pca') if proj_init_method == 'pca': x_mat = TensorList([e.permute(1, 0, 2, 3).reshape(e.shape[1], -1).clone() for e in x]) x_mat -= x_mat.mean(dim=1, keepdim=True) cov_x = x_mat @ x_mat.t() self.projection_matrix = TensorList( [None if cdim is None else torch.svd(C)[0][:, :cdim].t().unsqueeze(-1).unsqueeze(-1).clone() for C, cdim in zip(cov_x, self.compressed_dim)]) elif proj_init_method == 'randn': self.projection_matrix = TensorList( [None if cdim is None else ex.new_zeros(cdim,ex.shape[1],1,1).normal_(0,1/math.sqrt(ex.shape[1])) for ex, cdim in zip(x, self.compressed_dim)]) else: self.compressed_dim = x.size(1) self.projection_matrix = TensorList([None]*len(x)) def init_label_function(self, train_x): # Allocate label function self.y = TensorList([x.new_zeros(self.params.sample_memory_size, 1, x.shape[2], x.shape[3]) for x in train_x]) # Output sigma factor output_sigma_factor = self.fparams.attribute('output_sigma_factor') self.sigma = (self.feature_sz / self.img_support_sz * self.base_target_sz).prod().sqrt() * output_sigma_factor * torch.ones(2) # Center pos in normalized coords target_center_norm = (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz) # Generate label functions for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz, self.kernel_size, train_x): center_pos = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2]) for i, T in enumerate(self.transforms[:x.shape[0]]): sample_center = center_pos + torch.Tensor(T.shift) / self.img_support_sz * sz y[i, 0, ...] = dcf.label_function_spatial(sz, sig, sample_center) # Return only the ones to use for initial training return TensorList([y[:x.shape[0], ...] for y, x in zip(self.y, train_x)]) def init_memory(self, train_x): # Initialize first-frame training samples self.num_init_samples = train_x.size(0) self.init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x]) self.init_training_samples = train_x # Sample counters and weights self.num_stored_samples = self.num_init_samples.copy() self.previous_replace_ind = [None] * len(self.num_stored_samples) self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x]) for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, self.num_init_samples): sw[:num] = init_sw # Initialize memory self.training_samples = TensorList( [x.new_zeros(self.params.sample_memory_size, cdim, x.shape[2], x.shape[3]) for x, cdim in zip(train_x, self.compressed_dim)]) def update_memory(self, sample_x: TensorList, sample_y: TensorList, learning_rate = None): replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, self.fparams, learning_rate) self.previous_replace_ind = replace_ind for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind): train_samp[ind:ind+1,...] = x for y_memory, y, ind in zip(self.y, sample_y, replace_ind): y_memory[ind:ind+1,...] = y if self.hinge_mask is not None: for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind): m[ind:ind+1,...] = (y >= self.params.hinge_threshold).float() self.num_stored_samples += 1 def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams, learning_rate = None): # Update weights and get index to replace in memory replace_ind = [] for sw, prev_ind, num_samp, num_init, fpar in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams): lr = learning_rate if lr is None: lr = fpar.learning_rate init_samp_weight = getattr(fpar, 'init_samples_minimum_weight', None) if init_samp_weight == 0: init_samp_weight = None s_ind = 0 if init_samp_weight is None else num_init if num_samp == 0 or lr == 1: sw[:] = 0 sw[0] = 1 r_ind = 0 else: # Get index to replace _, r_ind = torch.min(sw[s_ind:], 0) r_ind = r_ind.item() + s_ind # Update weights if prev_ind is None: sw /= 1 - lr sw[r_ind] = lr else: sw[r_ind] = sw[prev_ind] / (1 - lr) sw /= sw.sum() if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight: sw /= init_samp_weight + sw[num_init:].sum() sw[:num_init] = init_samp_weight / num_init replace_ind.append(r_ind) return replace_ind def get_label_function(self, sample_pos, sample_scale): # Generate label function train_y = TensorList() target_center_norm = (self.pos - sample_pos) / (sample_scale * self.img_support_sz) for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size): center = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2]) train_y.append(dcf.label_function_spatial(sz, sig, center)) return train_y def update_state(self, new_pos, new_scale = None): # Update scale if new_scale is not None: self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor) self.target_sz = self.base_target_sz * self.target_scale # Update pos inside_ratio = 0.2 inside_offset = (inside_ratio - 0.5) * self.target_sz self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset) def get_iounet_box(self, pos, sz, sample_pos, sample_scale): """All inputs in original image coordinates""" box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz - 1) / 2 box_sz = sz / sample_scale target_ul = box_center - (box_sz - 1) / 2 return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))]) def init_iou_net(self): # Setup IoU net self.iou_predictor = self.params.features.get_unique_attribute('iou_predictor') for p in self.iou_predictor.parameters(): p.requires_grad = False # Get target boxes for the different augmentations self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz, self.pos.round(), self.target_scale) target_boxes = TensorList() if self.params.iounet_augmentation: for T in self.transforms: if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)): break target_boxes.append(self.iou_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0])) else: target_boxes.append(self.iou_target_box.clone()) target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device) # Get iou features iou_backbone_features = self.get_iou_backbone_features() # Remove other augmentations such as rotation iou_backbone_features = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_features]) # Extract target feat with torch.no_grad(): target_feat = self.iou_predictor.get_modulation(iou_backbone_features, target_boxes) self.target_feat = TensorList([x.detach().mean(0) for x in target_feat]) if self.params.get('iounet_not_use_reference', False): self.target_feat = TensorList([torch.full_like(tf, tf.norm() / tf.numel()) for tf in self.target_feat]) def refine_target_box(self, sample_pos, sample_scale, scale_ind, update_scale = True): # Initial box for refinement init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale) # Extract features from the relevant scale iou_features = self.get_iou_features() iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features]) init_boxes = init_box.view(1,4).clone() if self.params.num_init_random_boxes > 0: # Get random initial boxes square_box_sz = init_box[2:].prod().sqrt() rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)]) minimal_edge_size = init_box[2:].min()/3 rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size) new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2] init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1) init_boxes = torch.cat([init_box.view(1,4), init_boxes]) # Refine boxes by maximizing iou output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes) # Remove weird boxes with extreme aspect ratios output_boxes[:, 2:].clamp_(1) aspect_ratio = output_boxes[:,2] / output_boxes[:,3] keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio) output_boxes = output_boxes[keep_ind,:] output_iou = output_iou[keep_ind] # If no box found if output_boxes.shape[0] == 0: return # Take average of top k boxes k = self.params.get('iounet_k', 5) topk = min(k, output_boxes.shape[0]) _, inds = torch.topk(output_iou, topk) predicted_box = output_boxes[inds, :].mean(0) predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0) # Update position new_pos = predicted_box[:2] + predicted_box[2:]/2 - (self.iou_img_sample_sz - 1) / 2 new_pos = new_pos.flip((0,)) * sample_scale + sample_pos new_target_sz = predicted_box[2:].flip((0,)) * sample_scale new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod()) self.pos_iounet = new_pos.clone() if self.params.get('use_iounet_pos_for_learning', True): self.pos = new_pos.clone() self.target_sz = new_target_sz if update_scale: self.target_scale = new_scale def optimize_boxes(self, iou_features, init_boxes): # Optimize iounet boxes output_boxes = init_boxes.view(1, -1, 4).to(self.params.device) step_length = self.params.box_refinement_step_length init_step_length = self.params.box_refinement_step_length if isinstance(step_length, (tuple, list)): init_step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to( self.params.device).view(1, 1, 4) box_refinement_space = self.params.get('box_refinement_space', 'default') step_length = init_step_length * output_boxes.new_ones(1, output_boxes.shape[1], 1) outputs_prev = -99999999 * output_boxes.new_ones(1, output_boxes.shape[1]) step = torch.zeros_like(output_boxes) if box_refinement_space == 'default': # Optimization using bounding box space used in original IoUNet for i_ in range(self.params.box_refinement_iter): # forward pass bb_init = output_boxes.clone().detach() bb_init.requires_grad = True outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient=torch.ones_like(outputs)) # Update mask and step length update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1) update_mask_float = update_mask.view(1, -1, 1).float() step_length[~update_mask, :] *= self.params.box_refinement_step_decay outputs_prev = outputs.detach().clone() # Update proposal step = update_mask_float * step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2) - ( 1.0 - update_mask_float) * step output_boxes = bb_init + step output_boxes.detach_() elif box_refinement_space == 'relative': # Optimization using relative bounding box space sz_norm = output_boxes[:, :1, 2:].clone() output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm) for i_ in range(self.params.box_refinement_iter): # forward pass bb_init_rel = output_boxes_rel.clone().detach() bb_init_rel.requires_grad = True bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm) outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient=torch.ones_like(outputs)) # Update mask and step length update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1) update_mask_float = update_mask.view(1, -1, 1).float() step_length[~update_mask, :] *= self.params.box_refinement_step_decay outputs_prev = outputs.detach().clone() # Update proposal step = update_mask_float * step_length * bb_init_rel.grad - (1.0 - update_mask_float) * step output_boxes_rel = bb_init_rel + step output_boxes_rel.detach_() # for s in outputs.view(-1): # print('{:.2f} '.format(s.item()), end='') # print('') # print('') output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm) else: raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space)) return output_boxes.view(-1, 4).cpu(), outputs.detach().view(-1).cpu() ================================================ FILE: external/AR/pytracking/tracker/atom/optim.py ================================================ import torch from pytracking import optimization, TensorList, operation import math class FactorizedConvProblem(optimization.L2Problem): def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, projection_reg, params, sample_weights: TensorList, projection_activation, response_activation): self.training_samples = training_samples self.y = y self.filter_reg = filter_reg self.sample_weights = sample_weights self.params = params self.projection_reg = projection_reg self.projection_activation = projection_activation self.response_activation = response_activation self.diag_M = self.filter_reg.concat(projection_reg) def __call__(self, x: TensorList): """ Compute residuals :param x: [filters, projection_matrices] :return: [data_terms, filter_regularizations, proj_mat_regularizations] """ filter = x[:len(x)//2] # w2 in paper P = x[len(x)//2:] # w1 in paper # Do first convolution compressed_samples = operation.conv1x1(self.training_samples, P).apply(self.projection_activation) # Do second convolution residuals = operation.conv2d(compressed_samples, filter, mode='same').apply(self.response_activation) # Compute data residuals residuals = residuals - self.y residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals # Add regularization for projection matrix residuals.extend(self.filter_reg.apply(math.sqrt) * filter) # Add regularization for projection matrix residuals.extend(self.projection_reg.apply(math.sqrt) * P) return residuals def ip_input(self, a: TensorList, b: TensorList): num = len(a) // 2 # Number of filters a_filter = a[:num] b_filter = b[:num] a_P = a[num:] b_P = b[num:] # Filter inner product # ip_out = a_filter.reshape(-1) @ b_filter.reshape(-1) ip_out = operation.conv2d(a_filter, b_filter).view(-1) # Add projection matrix part # ip_out += a_P.reshape(-1) @ b_P.reshape(-1) ip_out += operation.conv2d(a_P.view(1,-1,1,1), b_P.view(1,-1,1,1)).view(-1) # Have independent inner products for each filter return ip_out.concat(ip_out.clone()) def M1(self, x: TensorList): return x / self.diag_M class ConvProblem(optimization.L2Problem): def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, sample_weights: TensorList, response_activation): self.training_samples = training_samples self.y = y self.filter_reg = filter_reg self.sample_weights = sample_weights self.response_activation = response_activation def __call__(self, x: TensorList): """ Compute residuals :param x: [filters] :return: [data_terms, filter_regularizations] """ # Do convolution and compute residuals residuals = operation.conv2d(self.training_samples, x, mode='same').apply(self.response_activation) residuals = residuals - self.y residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals # Add regularization for projection matrix residuals.extend(self.filter_reg.apply(math.sqrt) * x) return residuals def ip_input(self, a: TensorList, b: TensorList): # return a.reshape(-1) @ b.reshape(-1) # return (a * b).sum() return operation.conv2d(a, b).view(-1) ================================================ FILE: external/AR/pytracking/tracker/base/__init__.py ================================================ from .basetracker import BaseTracker ================================================ FILE: external/AR/pytracking/tracker/base/basetracker.py ================================================ from _collections import OrderedDict class BaseTracker: """Base class for all trackers.""" def __init__(self, params): self.params = params self.visdom = None def initialize(self, image, info: dict) -> dict: """Overload this function in your tracker. This should initialize the model.""" raise NotImplementedError def track(self, image, info: dict = None) -> dict: """Overload this function in your tracker. This should track in the frame and update the model.""" raise NotImplementedError def visdom_draw_tracking(self, image, box, segmentation=None): if isinstance(box, OrderedDict): box = [v for k, v in box.items()] else: box = (box,) if segmentation is None: self.visdom.register((image, *box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking') ================================================ FILE: external/AR/pytracking/tracker/dimp/__init__.py ================================================ from .dimp import DiMP def get_tracker_class(): return DiMP ================================================ FILE: external/AR/pytracking/tracker/dimp/dimp.py ================================================ from pytracking.tracker.base import BaseTracker import torch import torch.nn.functional as F import math import time from pytracking import dcf, TensorList from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_tensor, plot_graph from pytracking.features.preprocessing import sample_patch_multiscale, sample_patch_transformed from pytracking.features import augmentation import ltr.data.bounding_box_utils as bbutils from ltr.models.target_classifier.initializer import FilterInitializerZero from ltr.models.layers import activation class DiMP(BaseTracker): multiobj_mode = 'parallel' def initialize_features(self): if not getattr(self, 'features_initialized', False): self.params.net.initialize() self.features_initialized = True def initialize(self, image, info: dict) -> dict: # Initialize some stuff self.frame_num = 1 if not self.params.has('device'): self.params.device = 'cuda' if self.params.use_gpu else 'cpu' # Initialize network self.initialize_features() # The DiMP network self.net = self.params.net # Time initialization tic = time.time() # Convert image im = numpy_to_torch(image) # Get target position and size state = info['init_bbox'] self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2]) self.target_sz = torch.Tensor([state[3], state[2]]) # Get object id self.object_id = info.get('object_ids', [None])[0] self.id_str = '' if self.object_id is None else ' {}'.format(self.object_id) # Set sizes self.image_sz = torch.Tensor([im.shape[2], im.shape[3]]) sz = self.params.image_sample_size sz = torch.Tensor([sz, sz] if isinstance(sz, int) else sz) if self.params.get('use_image_aspect_ratio', False): sz = self.image_sz * sz.prod().sqrt() / self.image_sz.prod().sqrt() stride = self.params.get('feature_stride', 32) sz = torch.round(sz / stride) * stride self.img_sample_sz = sz self.img_support_sz = self.img_sample_sz # Set search area search_area = torch.prod(self.target_sz * self.params.search_area_scale).item() self.target_scale = math.sqrt(search_area) / self.img_sample_sz.prod().sqrt() # Target size in base scale self.base_target_sz = self.target_sz / self.target_scale # Setup scale factors if not self.params.has('scale_factors'): self.params.scale_factors = torch.ones(1) elif isinstance(self.params.scale_factors, (list, tuple)): self.params.scale_factors = torch.Tensor(self.params.scale_factors) # Setup scale bounds self.min_scale_factor = torch.max(10 / self.base_target_sz) self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz) # Extract and transform sample init_backbone_feat = self.generate_init_samples(im) # Initialize classifier self.init_classifier(init_backbone_feat) # Initialize IoUNet if self.params.get('use_iou_net', True): self.init_iou_net(init_backbone_feat) out = {'time': time.time() - tic} return out def track(self, image, info: dict = None) -> dict: self.debug_info = {} self.frame_num += 1 self.debug_info['frame_num'] = self.frame_num # Convert image im = numpy_to_torch(image) # ------- LOCALIZATION ------- # # Extract backbone features backbone_feat, sample_coords, im_patches = self.extract_backbone_features(im, self.get_centered_sample_pos(), self.target_scale * self.params.scale_factors, self.img_sample_sz) # Extract classification features test_x = self.get_classification_features(backbone_feat) # Location of sample sample_pos, sample_scales = self.get_sample_location(sample_coords) # Compute classification scores scores_raw = self.classify_target(test_x) # Localize the target translation_vec, scale_ind, s, flag = self.localize_target(scores_raw, sample_pos, sample_scales) new_pos = sample_pos[scale_ind,:] + translation_vec # Update position and scale if flag != 'not_found': if self.params.get('use_iou_net', True): update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain' if self.params.get('use_classifier', True): self.update_state(new_pos) self.refine_target_box(backbone_feat, sample_pos[scale_ind,:], sample_scales[scale_ind], scale_ind, update_scale_flag) elif self.params.get('use_classifier', True): self.update_state(new_pos, sample_scales[scale_ind]) # ------- UPDATE ------- # update_flag = flag not in ['not_found', 'uncertain'] hard_negative = (flag == 'hard_negative') learning_rate = self.params.get('hard_negative_learning_rate', None) if hard_negative else None if update_flag and self.params.get('update_classifier', False): # Get train sample train_x = test_x[scale_ind:scale_ind+1, ...] # Create target_box and label for spatial sample target_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos[scale_ind,:], sample_scales[scale_ind]) # Update the classifier model self.update_classifier(train_x, target_box, learning_rate, s[scale_ind,...]) # Set the pos of the tracker to iounet pos if self.params.get('use_iou_net', True) and flag != 'not_found' and hasattr(self, 'pos_iounet'): self.pos = self.pos_iounet.clone() score_map = s[scale_ind, ...] max_score = torch.max(score_map).item() # Visualize and set debug info self.search_area_box = torch.cat((sample_coords[scale_ind,[1,0]], sample_coords[scale_ind,[3,2]] - sample_coords[scale_ind,[1,0]] - 1)) self.debug_info['flag' + self.id_str] = flag self.debug_info['max_score' + self.id_str] = max_score if self.visdom is not None: self.visdom.register(score_map, 'heatmap', 2, 'Score Map' + self.id_str) self.visdom.register(self.debug_info, 'info_dict', 1, 'Status') elif self.params.debug >= 2: show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score)) # Compute output bounding box new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]])) if self.params.get('output_not_found_box', False) and flag == 'not_found': output_state = [-1, -1, -1, -1] else: output_state = new_state.tolist() '''2020.4.26 ''' out = {'target_bbox': output_state, 'dcf_center':new_pos[[1,0]]} return out def get_sample_location(self, sample_coord): """Get the location of the extracted sample.""" sample_coord = sample_coord.float() sample_pos = 0.5*(sample_coord[:,:2] + sample_coord[:,2:] - 1) sample_scales = ((sample_coord[:,2:] - sample_coord[:,:2]) / self.img_sample_sz).prod(dim=1).sqrt() return sample_pos, sample_scales def get_centered_sample_pos(self): """Get the center position for the new sample. Make sure the target is correctly centered.""" return self.pos + ((self.feature_sz + self.kernel_size) % 2) * self.target_scale * \ self.img_support_sz / (2*self.feature_sz) def classify_target(self, sample_x: TensorList): """Classify target by applying the DiMP filter.""" with torch.no_grad(): scores = self.net.classifier.classify(self.target_filter, sample_x) return scores def localize_target(self, scores, sample_pos, sample_scales): """Run the target localization.""" scores = scores.squeeze(1) preprocess_method = self.params.get('score_preprocess', 'none') if preprocess_method == 'none': pass elif preprocess_method == 'exp': scores = scores.exp() elif preprocess_method == 'softmax': reg_val = getattr(self.net.classifier.filter_optimizer, 'softmax_reg', None) scores_view = scores.view(scores.shape[0], -1) scores_softmax = activation.softmax_reg(scores_view, dim=-1, reg=reg_val) scores = scores_softmax.view(scores.shape) else: raise Exception('Unknown score_preprocess in params.') score_filter_ksz = self.params.get('score_filter_ksz', 1) if score_filter_ksz > 1: assert score_filter_ksz % 2 == 1 kernel = scores.new_ones(1,1,score_filter_ksz,score_filter_ksz) scores = F.conv2d(scores.view(-1,1,*scores.shape[-2:]), kernel, padding=score_filter_ksz//2).view(scores.shape) if self.params.get('advanced_localization', False): return self.localize_advanced(scores, sample_pos, sample_scales) # Get maximum score_sz = torch.Tensor(list(scores.shape[-2:])) score_center = (score_sz - 1)/2 max_score, max_disp = dcf.max2d(scores) _, scale_ind = torch.max(max_score, dim=0) max_disp = max_disp[scale_ind,...].float().cpu().view(-1) target_disp = max_disp - score_center # Compute translation vector and scale change factor output_sz = score_sz - (self.kernel_size + 1) % 2 translation_vec = target_disp * (self.img_support_sz / output_sz) * sample_scales[scale_ind] return translation_vec, scale_ind, scores, None def localize_advanced(self, scores, sample_pos, sample_scales): """Run the target advanced localization (as in ATOM).""" sz = scores.shape[-2:] score_sz = torch.Tensor(list(sz)) output_sz = score_sz - (self.kernel_size + 1) % 2 score_center = (score_sz - 1)/2 scores_hn = scores if self.output_window is not None and self.params.get('perform_hn_without_windowing', False): scores_hn = scores.clone() scores *= self.output_window max_score1, max_disp1 = dcf.max2d(scores) _, scale_ind = torch.max(max_score1, dim=0) sample_scale = sample_scales[scale_ind] max_score1 = max_score1[scale_ind] max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1) target_disp1 = max_disp1 - score_center translation_vec1 = target_disp1 * (self.img_support_sz / output_sz) * sample_scale if max_score1.item() < self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores_hn, 'not_found' if max_score1.item() < self.params.get('uncertain_threshold', -float('inf')): return translation_vec1, scale_ind, scores_hn, 'uncertain' if max_score1.item() < self.params.get('hard_sample_threshold', -float('inf')): return translation_vec1, scale_ind, scores_hn, 'hard_negative' # Mask out target neighborhood target_neigh_sz = self.params.target_neighborhood_scale * (self.target_sz / sample_scale) * (output_sz / self.img_support_sz) tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0) tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0]) tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0) tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1]) scores_masked = scores_hn[scale_ind:scale_ind + 1, ...].clone() scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0 # Find new maximum max_score2, max_disp2 = dcf.max2d(scores_masked) max_disp2 = max_disp2.float().cpu().view(-1) target_disp2 = max_disp2 - score_center translation_vec2 = target_disp2 * (self.img_support_sz / output_sz) * sample_scale prev_target_vec = (self.pos - sample_pos[scale_ind,:]) / ((self.img_support_sz / output_sz) * sample_scale) # Handle the different cases if max_score2 > self.params.distractor_threshold * max_score1: disp_norm1 = torch.sqrt(torch.sum((target_disp1-prev_target_vec)**2)) disp_norm2 = torch.sqrt(torch.sum((target_disp2-prev_target_vec)**2)) disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2 if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold: return translation_vec1, scale_ind, scores_hn, 'hard_negative' if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold: return translation_vec2, scale_ind, scores_hn, 'hard_negative' if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold: return translation_vec1, scale_ind, scores_hn, 'uncertain' # If also the distractor is close, return with highest score return translation_vec1, scale_ind, scores_hn, 'uncertain' if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold: return translation_vec1, scale_ind, scores_hn, 'hard_negative' return translation_vec1, scale_ind, scores_hn, 'normal' def extract_backbone_features(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor): im_patches, patch_coords = sample_patch_multiscale(im, pos, scales, sz, mode=self.params.get('border_mode', 'replicate'), max_scale_change=self.params.get('patch_max_scale_change', None)) with torch.no_grad(): backbone_feat = self.net.extract_backbone(im_patches) return backbone_feat, patch_coords, im_patches def get_classification_features(self, backbone_feat): with torch.no_grad(): return self.net.extract_classification_feat(backbone_feat) def get_iou_backbone_features(self, backbone_feat): return self.net.get_backbone_bbreg_feat(backbone_feat) def get_iou_features(self, backbone_feat): with torch.no_grad(): return self.net.bb_regressor.get_iou_feat(self.get_iou_backbone_features(backbone_feat)) def get_iou_modulation(self, iou_backbone_feat, target_boxes): with torch.no_grad(): return self.net.bb_regressor.get_modulation(iou_backbone_feat, target_boxes) def generate_init_samples(self, im: torch.Tensor) -> TensorList: """Perform data augmentation to generate initial training samples.""" mode = self.params.get('border_mode', 'replicate') if mode == 'inside': # Get new sample size if forced inside the image im_sz = torch.Tensor([im.shape[2], im.shape[3]]) sample_sz = self.target_scale * self.img_sample_sz shrink_factor = (sample_sz.float() / im_sz) if mode == 'inside': shrink_factor = shrink_factor.max() elif mode == 'inside_major': shrink_factor = shrink_factor.min() shrink_factor.clamp_(min=1, max=self.params.get('patch_max_scale_change', None)) sample_sz = (sample_sz.float() / shrink_factor) self.init_sample_scale = (sample_sz / self.img_sample_sz).prod().sqrt() tl = self.pos - (sample_sz - 1) / 2 br = self.pos + sample_sz / 2 + 1 global_shift = - ((-tl).clamp(0) - (br - im_sz).clamp(0)) / self.init_sample_scale else: self.init_sample_scale = self.target_scale global_shift = torch.zeros(2) self.init_sample_pos = self.pos.round() # Compute augmentation size aug_expansion_factor = self.params.get('augmentation_expansion_factor', None) aug_expansion_sz = self.img_sample_sz.clone() aug_output_sz = None if aug_expansion_factor is not None and aug_expansion_factor != 1: aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long() aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2 aug_expansion_sz = aug_expansion_sz.float() aug_output_sz = self.img_sample_sz.long().tolist() # Random shift for each sample get_rand_shift = lambda: None random_shift_factor = self.params.get('random_shift_factor', 0) if random_shift_factor > 0: get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor + global_shift).long().tolist() # Always put identity transformation first, since it is the unaugmented sample that is always used self.transforms = [augmentation.Identity(aug_output_sz, global_shift.long().tolist())] augs = self.params.augmentation if self.params.get('use_augmentation', True) else {} # Add all augmentations if 'shift' in augs: self.transforms.extend([augmentation.Translation(shift, aug_output_sz, global_shift.long().tolist()) for shift in augs['shift']]) if 'relativeshift' in augs: get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist() self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz, global_shift.long().tolist()) for shift in augs['relativeshift']]) if 'fliplr' in augs and augs['fliplr']: self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift())) if 'blur' in augs: self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in augs['blur']]) if 'scale' in augs: self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in augs['scale']]) if 'rotate' in augs: self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in augs['rotate']]) # Extract augmented image patches im_patches = sample_patch_transformed(im, self.init_sample_pos, self.init_sample_scale, aug_expansion_sz, self.transforms) # Extract initial backbone features with torch.no_grad(): init_backbone_feat = self.net.extract_backbone(im_patches) return init_backbone_feat def init_target_boxes(self): """Get the target bounding boxes for the initial augmented samples.""" self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale) init_target_boxes = TensorList() for T in self.transforms: init_target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0])) init_target_boxes = torch.cat(init_target_boxes.view(1, 4), 0).to(self.params.device) self.target_boxes = init_target_boxes.new_zeros(self.params.sample_memory_size, 4) self.target_boxes[:init_target_boxes.shape[0],:] = init_target_boxes return init_target_boxes def init_memory(self, train_x: TensorList): # Initialize first-frame spatial training samples self.num_init_samples = train_x.size(0) init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x]) # Sample counters and weights for spatial self.num_stored_samples = self.num_init_samples.copy() self.previous_replace_ind = [None] * len(self.num_stored_samples) self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x]) for sw, init_sw, num in zip(self.sample_weights, init_sample_weights, self.num_init_samples): sw[:num] = init_sw # Initialize memory self.training_samples = TensorList( [x.new_zeros(self.params.sample_memory_size, x.shape[1], x.shape[2], x.shape[3]) for x in train_x]) for ts, x in zip(self.training_samples, train_x): ts[:x.shape[0],...] = x def update_memory(self, sample_x: TensorList, target_box, learning_rate = None): # Update weights and get replace ind replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, learning_rate) self.previous_replace_ind = replace_ind # Update sample and label memory for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind): train_samp[ind:ind+1,...] = x # Update bb memory self.target_boxes[replace_ind[0],:] = target_box self.num_stored_samples += 1 def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, learning_rate = None): # Update weights and get index to replace replace_ind = [] for sw, prev_ind, num_samp, num_init in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples): lr = learning_rate if lr is None: lr = self.params.learning_rate init_samp_weight = self.params.get('init_samples_minimum_weight', None) if init_samp_weight == 0: init_samp_weight = None s_ind = 0 if init_samp_weight is None else num_init if num_samp == 0 or lr == 1: sw[:] = 0 sw[0] = 1 r_ind = 0 else: # Get index to replace if num_samp < sw.shape[0]: r_ind = num_samp else: _, r_ind = torch.min(sw[s_ind:], 0) r_ind = r_ind.item() + s_ind # Update weights if prev_ind is None: sw /= 1 - lr sw[r_ind] = lr else: sw[r_ind] = sw[prev_ind] / (1 - lr) sw /= sw.sum() if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight: sw /= init_samp_weight + sw[num_init:].sum() sw[:num_init] = init_samp_weight / num_init replace_ind.append(r_ind) return replace_ind def update_state(self, new_pos, new_scale = None): # Update scale if new_scale is not None: self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor) self.target_sz = self.base_target_sz * self.target_scale # Update pos inside_ratio = self.params.get('target_inside_ratio', 0.2) inside_offset = (inside_ratio - 0.5) * self.target_sz self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset) def get_iounet_box(self, pos, sz, sample_pos, sample_scale): """All inputs in original image coordinates. Generates a box in the cropped image sample reference frame, in the format used by the IoUNet.""" box_center = (pos - sample_pos) / sample_scale + (self.img_sample_sz - 1) / 2 box_sz = sz / sample_scale target_ul = box_center - (box_sz - 1) / 2 return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))]) def init_iou_net(self, backbone_feat): # Setup IoU net and objective for p in self.net.bb_regressor.parameters(): p.requires_grad = False # Get target boxes for the different augmentations self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale) target_boxes = TensorList() if self.params.iounet_augmentation: for T in self.transforms: if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)): break target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0])) else: target_boxes.append(self.classifier_target_box + torch.Tensor([self.transforms[0].shift[1], self.transforms[0].shift[0], 0, 0])) target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device) # Get iou features iou_backbone_feat = self.get_iou_backbone_features(backbone_feat) # Remove other augmentations such as rotation iou_backbone_feat = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_feat]) # Get modulation vector self.iou_modulation = self.get_iou_modulation(iou_backbone_feat, target_boxes) if torch.is_tensor(self.iou_modulation[0]): self.iou_modulation = TensorList([x.detach().mean(0) for x in self.iou_modulation]) def init_classifier(self, init_backbone_feat): # Get classification features x = self.get_classification_features(init_backbone_feat) # Overwrite some parameters in the classifier. (These are not generally changed) self._overwrite_classifier_params(feature_dim=x.shape[-3]) # Add the dropout augmentation here, since it requires extraction of the classification features if 'dropout' in self.params.augmentation and self.params.get('use_augmentation', True): num, prob = self.params.augmentation['dropout'] self.transforms.extend(self.transforms[:1]*num) x = torch.cat([x, F.dropout2d(x[0:1,...].expand(num,-1,-1,-1), p=prob, training=True)]) # Set feature size and other related sizes self.feature_sz = torch.Tensor(list(x.shape[-2:])) ksz = self.net.classifier.filter_size self.kernel_size = torch.Tensor([ksz, ksz] if isinstance(ksz, (int, float)) else ksz) self.output_sz = self.feature_sz + (self.kernel_size + 1)%2 # Construct output window self.output_window = None if self.params.get('window_output', False): if self.params.get('use_clipped_window', False): self.output_window = dcf.hann2d_clipped(self.output_sz.long(), (self.output_sz*self.params.effective_search_area / self.params.search_area_scale).long(), centered=True).to(self.params.device) else: self.output_window = dcf.hann2d(self.output_sz.long(), centered=True).to(self.params.device) self.output_window = self.output_window.squeeze(0) # Get target boxes for the different augmentations target_boxes = self.init_target_boxes() # Set number of iterations plot_loss = self.params.debug > 0 num_iter = self.params.get('net_opt_iter', None) # Get target filter by running the discriminative model prediction module with torch.no_grad(): self.target_filter, _, losses = self.net.classifier.get_filter(x, target_boxes, num_iter=num_iter, compute_losses=plot_loss) # Init memory if self.params.get('update_classifier', True): self.init_memory(TensorList([x])) if plot_loss: if isinstance(losses, dict): losses = losses['train'] self.losses = torch.cat(losses) if self.visdom is not None: self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str) elif self.params.debug >= 3: plot_graph(self.losses, 10, title='Training Loss' + self.id_str) def _overwrite_classifier_params(self, feature_dim): # Overwrite some parameters in the classifier. (These are not generally changed) pred_module = getattr(self.net.classifier.filter_optimizer, 'score_predictor', self.net.classifier.filter_optimizer) if self.params.get('label_threshold', None) is not None: self.net.classifier.filter_optimizer.label_threshold = self.params.label_threshold if self.params.get('label_shrink', None) is not None: self.net.classifier.filter_optimizer.label_shrink = self.params.label_shrink if self.params.get('softmax_reg', None) is not None: self.net.classifier.filter_optimizer.softmax_reg = self.params.softmax_reg if self.params.get('filter_reg', None) is not None: pred_module.filter_reg[0] = self.params.filter_reg pred_module.min_filter_reg = self.params.filter_reg if self.params.get('filter_init_zero', False): self.net.classifier.filter_initializer = FilterInitializerZero(self.net.classifier.filter_size, feature_dim) def update_classifier(self, train_x, target_box, learning_rate=None, scores=None): # Set flags and learning rate hard_negative_flag = learning_rate is not None if learning_rate is None: learning_rate = self.params.learning_rate # Update the tracker memory if hard_negative_flag or self.frame_num % self.params.get('train_sample_interval', 1) == 0: self.update_memory(TensorList([train_x]), target_box, learning_rate) # Decide the number of iterations to run num_iter = 0 low_score_th = self.params.get('low_score_opt_threshold', None) if hard_negative_flag: num_iter = self.params.get('net_opt_hn_iter', None) elif low_score_th is not None and low_score_th > scores.max().item(): num_iter = self.params.get('net_opt_low_iter', None) elif (self.frame_num - 1) % self.params.train_skipping == 0: num_iter = self.params.get('net_opt_update_iter', None) plot_loss = self.params.debug > 0 if num_iter > 0: # Get inputs for the DiMP filter optimizer module samples = self.training_samples[0][:self.num_stored_samples[0],...] target_boxes = self.target_boxes[:self.num_stored_samples[0],:].clone() sample_weights = self.sample_weights[0][:self.num_stored_samples[0]] # Run the filter optimizer module with torch.no_grad(): self.target_filter, _, losses = self.net.classifier.filter_optimizer(self.target_filter, num_iter=num_iter, feat=samples, bb=target_boxes, sample_weight=sample_weights, compute_losses=plot_loss) if plot_loss: if isinstance(losses, dict): losses = losses['train'] self.losses = torch.cat((self.losses, torch.cat(losses))) if self.visdom is not None: self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str) elif self.params.debug >= 3: plot_graph(self.losses, 10, title='Training Loss' + self.id_str) def refine_target_box(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True): """Run the ATOM IoUNet to refine the target bounding box.""" if hasattr(self.net.bb_regressor, 'predict_bb'): return self.direct_box_regression(backbone_feat, sample_pos, sample_scale, scale_ind, update_scale) # Initial box for refinement init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale) # Extract features from the relevant scale iou_features = self.get_iou_features(backbone_feat) iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features]) # Generate random initial boxes init_boxes = init_box.view(1,4).clone() if self.params.num_init_random_boxes > 0: square_box_sz = init_box[2:].prod().sqrt() rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)]) minimal_edge_size = init_box[2:].min()/3 rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size) new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2] init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1) init_boxes = torch.cat([init_box.view(1,4), init_boxes]) # Optimize the boxes output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes) # Remove weird boxes output_boxes[:, 2:].clamp_(1) aspect_ratio = output_boxes[:,2] / output_boxes[:,3] keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio) output_boxes = output_boxes[keep_ind,:] output_iou = output_iou[keep_ind] # If no box found if output_boxes.shape[0] == 0: return # Predict box k = self.params.get('iounet_k', 5) topk = min(k, output_boxes.shape[0]) _, inds = torch.topk(output_iou, topk) predicted_box = output_boxes[inds, :].mean(0) predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0) # Get new position and size new_pos = predicted_box[:2] + predicted_box[2:] / 2 new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos new_target_sz = predicted_box[2:].flip((0,)) * sample_scale new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod()) self.pos_iounet = new_pos.clone() if self.params.get('use_iounet_pos_for_learning', True): self.pos = new_pos.clone() self.target_sz = new_target_sz if update_scale: self.target_scale = new_scale # self.visualize_iou_pred(iou_features, predicted_box) def optimize_boxes(self, iou_features, init_boxes): box_refinement_space = self.params.get('box_refinement_space', 'default') if box_refinement_space == 'default': return self.optimize_boxes_default(iou_features, init_boxes) if box_refinement_space == 'relative': return self.optimize_boxes_relative(iou_features, init_boxes) raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space)) def optimize_boxes_default(self, iou_features, init_boxes): """Optimize iounet boxes with the default parametrization""" output_boxes = init_boxes.view(1, -1, 4).to(self.params.device) step_length = self.params.box_refinement_step_length if isinstance(step_length, (tuple, list)): step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]], device=self.params.device).view(1,1,4) for i_ in range(self.params.box_refinement_iter): # forward pass bb_init = output_boxes.clone().detach() bb_init.requires_grad = True outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient = torch.ones_like(outputs)) # Update proposal output_boxes = bb_init + step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2) output_boxes.detach_() step_length *= self.params.box_refinement_step_decay return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu() def optimize_boxes_relative(self, iou_features, init_boxes): """Optimize iounet boxes with the relative parametrization ised in PrDiMP""" output_boxes = init_boxes.view(1, -1, 4).to(self.params.device) step_length = self.params.box_refinement_step_length if isinstance(step_length, (tuple, list)): step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(self.params.device).view(1,1,4) sz_norm = output_boxes[:,:1,2:].clone() output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm) for i_ in range(self.params.box_refinement_iter): # forward pass bb_init_rel = output_boxes_rel.clone().detach() bb_init_rel.requires_grad = True bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm) outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init) if isinstance(outputs, (list, tuple)): outputs = outputs[0] outputs.backward(gradient = torch.ones_like(outputs)) # Update proposal output_boxes_rel = bb_init_rel + step_length * bb_init_rel.grad output_boxes_rel.detach_() step_length *= self.params.box_refinement_step_decay # for s in outputs.view(-1): # print('{:.2f} '.format(s.item()), end='') # print('') # print('') output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm) return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu() def direct_box_regression(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True): """Implementation of direct bounding box regression.""" # Initial box for refinement init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale) # Extract features from the relevant scale iou_features = self.get_iou_features(backbone_feat) iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features]) # Generate random initial boxes init_boxes = init_box.view(1, 1, 4).clone().to(self.params.device) # Optimize the boxes output_boxes = self.net.bb_regressor.predict_bb(self.iou_modulation, iou_features, init_boxes).view(-1,4).cpu() # Remove weird boxes output_boxes[:, 2:].clamp_(1) predicted_box = output_boxes[0, :] # Get new position and size new_pos = predicted_box[:2] + predicted_box[2:] / 2 new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos new_target_sz = predicted_box[2:].flip((0,)) * sample_scale new_scale_bbr = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod()) new_scale = new_scale_bbr self.pos_iounet = new_pos.clone() if self.params.get('use_iounet_pos_for_learning', True): self.pos = new_pos.clone() self.target_sz = new_target_sz if update_scale: self.target_scale = new_scale def visualize_iou_pred(self, iou_features, center_box): center_box = center_box.view(1,1,4) sz_norm = center_box[...,2:].clone() center_box_rel = bbutils.rect_to_rel(center_box, sz_norm) pos_dist = 1.0 sz_dist = math.log(3.0) pos_step = 0.01 sz_step = 0.01 pos_scale = torch.arange(-pos_dist, pos_dist+pos_step, step=pos_step) sz_scale = torch.arange(-sz_dist, sz_dist+sz_step, step=sz_step) bbx = torch.zeros(1, pos_scale.numel(), 4) bbx[0,:,0] = pos_scale.clone() bby = torch.zeros(pos_scale.numel(), 1, 4) bby[:,0,1] = pos_scale.clone() bbw = torch.zeros(1, sz_scale.numel(), 4) bbw[0,:,2] = sz_scale.clone() bbh = torch.zeros(sz_scale.numel(), 1, 4) bbh[:,0,3] = sz_scale.clone() pos_boxes = bbutils.rel_to_rect((center_box_rel + bbx) + bby, sz_norm).view(1,-1,4).to(self.params.device) sz_boxes = bbutils.rel_to_rect((center_box_rel + bbw) + bbh, sz_norm).view(1,-1,4).to(self.params.device) pos_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, pos_boxes).exp() sz_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, sz_boxes).exp() show_tensor(pos_scores.view(pos_scale.numel(),-1), title='Position scores', fig_num=21) show_tensor(sz_scores.view(sz_scale.numel(),-1), title='Size scores', fig_num=22) def visdom_draw_tracking(self, image, box, segmentation=None): if hasattr(self, 'search_area_box'): self.visdom.register((image, box, self.search_area_box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, box), 'Tracking', 1, 'Tracking') ================================================ FILE: external/AR/pytracking/tracker/eco/__init__.py ================================================ from .eco import ECO def get_tracker_class(): return ECO ================================================ FILE: external/AR/pytracking/tracker/eco/eco.py ================================================ from pytracking.tracker.base import BaseTracker import torch import torch.nn.functional as F import math from pytracking import complex, dcf, fourier, TensorList from pytracking.libs.tensorlist import tensor_operation from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_tensor from pytracking.libs.optimization import GaussNewtonCG from .optim import FilterOptim, FactorizedConvProblem from pytracking.features import augmentation class ECO(BaseTracker): multiobj_mode = 'parallel' def initialize_features(self): if not getattr(self, 'features_initialized', False): self.params.features.initialize() self.features_initialized = True def initialize(self, image, info: dict) -> dict: state = info['init_bbox'] # Initialize some stuff self.frame_num = 1 if not self.params.has('device'): self.params.device = 'cuda' if self.params.use_gpu else 'cpu' # Initialize features self.initialize_features() # Chack if image is color self.params.features.set_is_color(image.shape[2] == 3) # Get feature specific params self.fparams = self.params.features.get_fparams('feature_params') # Get position and size self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2]) self.target_sz = torch.Tensor([state[3], state[2]]) # Set search area self.target_scale = 1.0 search_area = torch.prod(self.target_sz * self.params.search_area_scale).item() if search_area > self.params.max_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.max_image_sample_size) elif search_area < self.params.min_image_sample_size: self.target_scale = math.sqrt(search_area / self.params.min_image_sample_size) # Target size in base scale self.base_target_sz = self.target_sz / self.target_scale # Use odd square search area and set sizes feat_max_stride = max(self.params.features.stride()) self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2) self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride) # Set other sizes (corresponds to ECO code) self.img_support_sz = self.img_sample_sz self.feature_sz = self.params.features.size(self.img_sample_sz) self.filter_sz = self.feature_sz + (self.feature_sz + 1) % 2 self.output_sz = self.params.score_upsample_factor * self.img_support_sz # Interpolated size of the output self.compressed_dim = self.fparams.attribute('compressed_dim') # Number of filters self.num_filters = len(self.filter_sz) # Get window function self.window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz]) # Get interpolation function self.interp_fs = TensorList([dcf.get_interp_fourier(sz, self.params.interpolation_method, self.params.interpolation_bicubic_a, self.params.interpolation_centering, self.params.interpolation_windowing, self.params.device) for sz in self.filter_sz]) # Get regularization filter self.reg_filter = TensorList([dcf.get_reg_filter(self.img_support_sz, self.base_target_sz, fparams).to(self.params.device) for fparams in self.fparams]) self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1) # Get label function output_sigma_factor = self.fparams.attribute('output_sigma_factor') sigma = (self.filter_sz / self.img_support_sz) * torch.sqrt(self.base_target_sz.prod()) * output_sigma_factor self.yf = TensorList([dcf.label_function(sz, sig).to(self.params.device) for sz, sig in zip(self.filter_sz, sigma)]) # Optimization options self.params.precond_learning_rate = self.fparams.attribute('learning_rate') if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1: self.params.direction_forget_factor = 0 else: self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate # Convert image im = numpy_to_torch(image) # Setup bounds self.image_sz = torch.Tensor([im.shape[2], im.shape[3]]) self.min_scale_factor = torch.max(10 / self.base_target_sz) self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz) # Extract and transform sample x = self.generate_init_samples(im) # Initialize projection matrix x_mat = TensorList([e.permute(1,0,2,3).reshape(e.shape[1], -1).clone() for e in x]) x_mat -= x_mat.mean(dim=1, keepdim=True) cov_x = x_mat @ x_mat.t() self.projection_matrix = TensorList([torch.svd(C)[0][:,:cdim].clone() for C, cdim in zip(cov_x, self.compressed_dim)]) # Transform to get the training sample train_xf = self.preprocess_sample(x) # Shift the samples back if 'shift' in self.params.augmentation: for xf in train_xf: if xf.shape[0] == 1: continue for i, shift in enumerate(self.params.augmentation['shift']): shift_samp = 2 * math.pi * torch.Tensor(shift) / self.img_support_sz xf[1+i:2+i,...] = fourier.shift_fs(xf[1+i:2+i,...], shift=shift_samp) # Shift sample shift_samp = 2*math.pi * (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz) train_xf = fourier.shift_fs(train_xf, shift=shift_samp) # Initialize first-frame training samples num_init_samples = train_xf.size(0) self.init_sample_weights = TensorList([xf.new_ones(1) / xf.shape[0] for xf in train_xf]) self.init_training_samples = train_xf.permute(2, 3, 0, 1, 4) # Sample counters and weights self.num_stored_samples = num_init_samples self.previous_replace_ind = [None]*len(self.num_stored_samples) self.sample_weights = TensorList([xf.new_zeros(self.params.sample_memory_size) for xf in train_xf]) for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, num_init_samples): sw[:num] = init_sw # Initialize memory self.training_samples = TensorList( [xf.new_zeros(xf.shape[2], xf.shape[3], self.params.sample_memory_size, cdim, 2) for xf, cdim in zip(train_xf, self.compressed_dim)]) # Initialize filter self.filter = TensorList( [xf.new_zeros(1, cdim, xf.shape[2], xf.shape[3], 2) for xf, cdim in zip(train_xf, self.compressed_dim)]) # Do joint optimization self.joint_problem = FactorizedConvProblem(self.init_training_samples, self.yf, self.reg_filter, self.projection_matrix, self.params, self.init_sample_weights) joint_var = self.filter.concat(self.projection_matrix) self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug>=1), visdom=self.visdom) if self.params.update_projection_matrix: self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter) # Re-project samples with the new projection matrix compressed_samples = complex.mtimes(self.init_training_samples, self.projection_matrix) for train_samp, init_samp in zip(self.training_samples, compressed_samples): train_samp[:,:,:init_samp.shape[2],:,:] = init_samp # Initialize optimizer self.filter_optimizer = FilterOptim(self.params, self.reg_energy) self.filter_optimizer.register(self.filter, self.training_samples, self.yf, self.sample_weights, self.reg_filter) self.filter_optimizer.sample_energy = self.joint_problem.sample_energy self.filter_optimizer.residuals = self.joint_optimizer.residuals.clone() if not self.params.update_projection_matrix: self.filter_optimizer.run(self.params.init_CG_iter) # Post optimization self.filter_optimizer.run(self.params.post_init_CG_iter) self.symmetrize_filter() def track(self, image, info: dict = None) -> dict: self.debug_info = {} self.frame_num += 1 self.debug_info['frame_num'] = self.frame_num # Convert image im = numpy_to_torch(image) # ------- LOCALIZATION ------- # # Get sample sample_pos = self.pos.round() sample_scales = self.target_scale * self.params.scale_factors test_xf = self.extract_fourier_sample(im, self.pos, sample_scales, self.img_sample_sz) # Compute scores sf = self.apply_filter(test_xf) translation_vec, scale_ind, s = self.localize_target(sf) scale_change_factor = self.params.scale_factors[scale_ind] # Update position and scale self.update_state(sample_pos + translation_vec, self.target_scale * scale_change_factor) score_map = s[scale_ind, ...] max_score = torch.max(score_map).item() self.debug_info['max_score'] = max_score if self.visdom is not None: self.visdom.register(score_map, 'heatmap', 2, 'Score Map') self.visdom.register(self.debug_info, 'info_dict', 1, 'Status') elif self.params.debug >= 2: show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score)) # if self.params.debug >= 3: # for i, hf in enumerate(self.filter): # show_tensor(fourier.sample_fs(hf).abs().mean(1), 6+i) # ------- UPDATE ------- # # Get train sample train_xf = TensorList([xf[scale_ind:scale_ind+1, ...] for xf in test_xf]) # Shift the sample shift_samp = 2*math.pi * (self.pos - sample_pos) / (sample_scales[scale_ind] * self.img_support_sz) train_xf = fourier.shift_fs(train_xf, shift=shift_samp) # Update memory self.update_memory(train_xf) # Train filter if self.frame_num % self.params.train_skipping == 1: self.filter_optimizer.run(self.params.CG_iter, train_xf) self.symmetrize_filter() # Return new state new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]])) out = {'target_bbox': new_state.tolist()} return out def apply_filter(self, sample_xf: TensorList) -> torch.Tensor: return complex.mult(self.filter, sample_xf).sum(1, keepdim=True) def localize_target(self, sf: TensorList): if self.params.score_fusion_strategy == 'sum': scores = fourier.sample_fs(fourier.sum_fs(sf), self.output_sz) elif self.params.score_fusion_strategy == 'weightedsum': weight = self.fparams.attribute('translation_weight') scores = fourier.sample_fs(fourier.sum_fs(weight * sf), self.output_sz) elif self.params.score_fusion_strategy == 'transcale': alpha = self.fparams.attribute('scale_weight') beta = self.fparams.attribute('translation_weight') sample_sz = torch.round(self.output_sz.view(1,-1) * self.params.scale_factors.view(-1,1)) scores = 0 for sfe, a, b in zip(sf, alpha, beta): sfe = fourier.shift_fs(sfe, math.pi*torch.ones(2)) scores_scales = [] for sind, sz in enumerate(sample_sz): pd = (self.output_sz-sz)/2 scores_scales.append(F.pad(fourier.sample_fs(sfe[sind:sind+1,...], sz), (math.floor(pd[1].item()), math.ceil(pd[1].item()), math.floor(pd[0].item()), math.ceil(pd[0].item())))) scores_cat = torch.cat(scores_scales) scores = scores + (b - a) * scores_cat.mean(dim=0, keepdim=True) + a * scores_cat else: raise ValueError('Unknown score fusion strategy.') # Get maximum max_score, max_disp = dcf.max2d(scores) _, scale_ind = torch.max(max_score, dim=0) max_disp = max_disp.float().cpu() # Convert to displacements in the base scale if self.params.score_fusion_strategy in ['sum', 'weightedsum']: disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2 elif self.params.score_fusion_strategy == 'transcale': disp = max_disp - self.output_sz / 2 # Compute translation vector and scale change factor translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale if self.params.score_fusion_strategy in ['sum', 'weightedsum']: translation_vec *= self.params.scale_factors[scale_ind] return translation_vec, scale_ind, scores def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor): return self.params.features.extract(im, pos, scales, sz)[0] def extract_fourier_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> TensorList: x = self.extract_sample(im, pos, scales, sz) return self.preprocess_sample(self.project_sample(x)) def preprocess_sample(self, x: TensorList) -> TensorList: x *= self.window sample_xf = fourier.cfft2(x) return TensorList([dcf.interpolate_dft(xf, bf) for xf, bf in zip(sample_xf, self.interp_fs)]) def project_sample(self, x: TensorList): @tensor_operation def _project_sample(x: torch.Tensor, P: torch.Tensor): if P is None: return x return torch.matmul(x.permute(2, 3, 0, 1), P).permute(2, 3, 0, 1) return _project_sample(x, self.projection_matrix) def generate_init_samples(self, im: torch.Tensor) -> TensorList: # Do data augmentation transforms = [augmentation.Identity()] if 'shift' in self.params.augmentation: transforms.extend([augmentation.Translation(shift) for shift in self.params.augmentation['shift']]) if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']: transforms.append(augmentation.FlipHorizontal()) if 'rotate' in self.params.augmentation: transforms.extend([augmentation.Rotate(angle) for angle in self.params.augmentation['rotate']]) if 'blur' in self.params.augmentation: transforms.extend([augmentation.Blur(sigma) for sigma in self.params.augmentation['blur']]) init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, self.img_sample_sz, transforms) # Remove augmented samples for those that shall not have for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if not use_aug: init_samples[i] = init_samples[i][0:1, ...] if 'dropout' in self.params.augmentation: num, prob = self.params.augmentation['dropout'] for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')): if use_aug: init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)]) return init_samples def update_memory(self, sample_xf: TensorList): # Update weights and get index to replace replace_ind = self.update_sample_weights() for train_samp, xf, ind in zip(self.training_samples, sample_xf, replace_ind): train_samp[:,:,ind:ind+1,:,:] = xf.permute(2, 3, 0, 1, 4) def update_sample_weights(self): replace_ind = [] for sw, prev_ind, num_samp, fparams in zip(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.fparams): if num_samp == 0 or fparams.learning_rate == 1: sw[:] = 0 sw[0] = 1 r_ind = 0 else: # Get index to replace _, r_ind = torch.min(sw, 0) r_ind = r_ind.item() # Update weights if prev_ind is None: sw /= 1 - fparams.learning_rate sw[r_ind] = fparams.learning_rate else: sw[r_ind] = sw[prev_ind] / (1 - fparams.learning_rate) sw /= sw.sum() replace_ind.append(r_ind) self.previous_replace_ind = replace_ind.copy() self.num_stored_samples += 1 return replace_ind def update_state(self, new_pos, new_scale): # Update scale self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor) self.target_sz = self.base_target_sz * self.target_scale # Update pos inside_ratio = 0.2 inside_offset = (inside_ratio - 0.5) * self.target_sz self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset) def symmetrize_filter(self): for hf in self.filter: hf[:,:,:,0,:] /= 2 hf[:,:,:,0,:] += complex.conj(hf[:,:,:,0,:].flip((2,))) ================================================ FILE: external/AR/pytracking/tracker/eco/optim.py ================================================ import torch import torch.nn.functional as F from pytracking import complex, optimization, fourier, TensorList from pytracking.utils.plotting import plot_graph import math class FactorizedConvProblem(optimization.L2Problem): def __init__(self, training_samples: TensorList, yf:TensorList, reg_filter: torch.Tensor, init_proj_mat: TensorList, params, sample_weights: torch.Tensor = None): self.training_samples = training_samples self.yf = complex.complex(yf).permute(2, 3, 0, 1, 4) self.reg_filter = reg_filter self.sample_weights_sqrt = None if sample_weights is None else sample_weights.sqrt() self.params = params # Sample energy for preconditioner compressed_samples = complex.mtimes(self.training_samples, init_proj_mat) self.sample_energy = complex.abs_sqr(compressed_samples).mean(dim=2, keepdim=True).permute(2, 3, 0, 1) self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1) # Projection energy for preconditioner self.proj_energy = 2 * fourier.inner_prod_fs(yf, yf) / self.training_samples.size(3) # Filter part of preconditioner self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy + (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + \ self.params.precond_reg_param * self.reg_energy self.diag_M.unsqueeze_(-1) # Projection matrix part of preconditioner self.diag_M.extend(self.params.precond_proj_param * (self.proj_energy + self.params.projection_reg)) def __call__(self, x: TensorList): """ Compute residuals :param x: [filters, projection_matrices] :return: [data_terms, filter_regularizations, proj_mat_regularizations] """ hf = x[:len(x)//2] P = x[len(x)//2:] compressed_samples = complex.mtimes(self.training_samples, P) residuals = complex.mtimes(compressed_samples, hf.permute(2, 3, 1, 0, 4)) # (h, w, num_samp, num_filt, 2) residuals = residuals - self.yf if self.sample_weights_sqrt is not None: residuals = complex.mult(self.sample_weights_sqrt.view(1, 1, -1, 1), residuals) # Add spatial regularization for hfe, reg_filter in zip(hf, self.reg_filter): reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1) reg_pad2 = min(reg_filter.shape[-1] - 1, hfe.shape[-2] - 1) # Add part needed for convolution if reg_pad2 > 0: hfe_left_padd = complex.conj(hfe[...,1:reg_pad2+1,:].clone().detach().flip((2,3))) hfe_conv = torch.cat([hfe_left_padd, hfe], -2) else: hfe_conv = hfe.clone() # Shift data to batch dimension hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2]) # Do first convolution hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2)) residuals.append(hfe_conv) # Add regularization for projection matrix residuals.extend(math.sqrt(self.params.projection_reg) * P) return residuals def ip_input(self, a: TensorList, b: TensorList): num = len(a) // 2 # Number of filters a_filter = a[:num] b_filter = b[:num] a_P = a[num:] b_P = b[num:] # Filter inner product ip_out = fourier.inner_prod_fs(a_filter, b_filter) # Add projection matrix part ip_out += a_P.reshape(-1) @ b_P.reshape(-1) # Have independent inner products for each filter return ip_out.concat(ip_out.clone()) def ip_output(self, a: TensorList, b: TensorList): num = len(a) // 3 # Number of filters a_data = a[:num].permute(2,3,0,1,4) b_data = b[:num].permute(2,3,0,1,4) a_filt_reg = a[num:2*num] b_filt_reg = b[num:2*num] a_P_reg = a[2*num:] b_P_reg = b[2*num:] ip_data = sum(fourier.inner_prod_fs(a_data, b_data)) ip_filt_reg = ip_data.new_zeros(1) for ar, br, res_data, reg_filter in zip(a_filt_reg, b_filt_reg, a_data, self.reg_filter): reg_pad2 = min(reg_filter.shape[-1] - 1, res_data.shape[-2] - 1) arp = ar.reshape(1, -1, 2, ar.shape[2], ar.shape[3]).permute(0, 1, 3, 4, 2) brp = br.reshape(1, -1, 2, br.shape[2], br.shape[3]).permute(0, 1, 3, 4, 2) ip_filt_reg += fourier.inner_prod_fs(arp[:,:,:,2*reg_pad2:,:], brp[:,:,:,2*reg_pad2:,:]) ip_P_reg = sum(a_P_reg.view(-1) @ b_P_reg.view(-1)) return ip_data + ip_filt_reg + ip_P_reg def M1(self, x: TensorList): return x / self.diag_M class FilterOptim(optimization.ConjugateGradientBase): def __init__(self, params, reg_energy): super(FilterOptim, self).__init__(params.fletcher_reeves, params.standard_alpha, params.direction_forget_factor, (params.debug >= 3)) # Parameters self.params = params self.reg_energy = reg_energy self.sample_energy = None self.residuals = torch.zeros(0) def register(self, filter, training_samples, yf, sample_weights, reg_filter): self.filter = filter self.training_samples = training_samples # (h, w, num_samples, num_channels, 2) self.yf = yf self.sample_weights = sample_weights self.reg_filter = reg_filter def run(self, num_iter, new_xf: TensorList = None): if num_iter == 0: return if new_xf is not None: new_sample_energy = complex.abs_sqr(new_xf) if self.sample_energy is None: self.sample_energy = new_sample_energy else: self.sample_energy = (1 - self.params.precond_learning_rate) * self.sample_energy + self.params.precond_learning_rate * new_sample_energy # Compute right hand side self.b = complex.mtimes(self.sample_weights.view(1,1,1,-1), self.training_samples).permute(2,3,0,1,4) self.b = complex.mult_conj(self.yf, self.b) self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy + (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + self.params.precond_reg_param * self.reg_energy _, res = self.run_CG(num_iter, self.filter) if self.debug: self.residuals = torch.cat((self.residuals, res)) plot_graph(self.residuals, 9) def A(self, hf: TensorList): # Classify sh = complex.mtimes(self.training_samples, hf.permute(2,3,1,0,4)) # (h, w, num_samp, num_filt, 2) sh = complex.mult(self.sample_weights.view(1,1,-1,1), sh) # Multiply with transpose hf_out = complex.mtimes(sh.permute(0,1,3,2,4), self.training_samples, conj_b=True).permute(2,3,0,1,4) # Add regularization for hfe, hfe_out, reg_filter in zip(hf, hf_out, self.reg_filter): reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1) reg_pad2 = min(reg_filter.shape[-1] - 1, 2*hfe.shape[-2]- 2) # Add part needed for convolution if reg_pad2 > 0: hfe_conv = torch.cat([complex.conj(hfe[...,1:reg_pad2+1,:].flip((2,3))), hfe], -2) else: hfe_conv = hfe.clone() # Shift data to batch dimension hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2]) # Do first convolution hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2)) # Do second convolution remove_size = min(reg_pad2, hfe.shape[-2]-1) hfe_conv = F.conv2d(hfe_conv[...,remove_size:], reg_filter) # Reshape back and add hfe_out += hfe_conv.reshape(hfe.shape[0], hfe.shape[1], 2, hfe.shape[2], hfe.shape[3]).permute(0,1,3,4,2) return hf_out def ip(self, a: torch.Tensor, b: torch.Tensor): return fourier.inner_prod_fs(a, b) def M1(self, hf): return complex.div(hf, self.diag_M) ================================================ FILE: external/AR/pytracking/util_scripts/__init__.py ================================================ ================================================ FILE: external/AR/pytracking/util_scripts/download_results.py ================================================ import os import sys import gdown import re import shutil import argparse import tempfile env_path = os.path.join(os.path.dirname(__file__), '../..') if env_path not in sys.path: sys.path.append(env_path) from pytracking.evaluation.environment import env_settings results_link_dict = { "dimp": { "prdimp50_003.zip": "1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc", "prdimp50_002.zip": "1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz", "prdimp50_001.zip": "17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS", "prdimp50_000.zip": "1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6", "prdimp18_004.zip": "1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM", "prdimp18_003.zip": "1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO", "prdimp18_002.zip": "17lMllYhygCqgE81DoHX4BZar3xc3auzM", "prdimp18_001.zip": "1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G", "prdimp18_000.zip": "1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z", "prdimp50_004.zip": "1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO", "dimp50_004.zip": "1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa", "dimp50_000.zip": "1LCgf5sg453Z4bY37A_W5mbXeG68U1fET", "dimp18_000.zip": "17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g", "dimp18_001.zip": "1AsiliVgISyDTouYOQYVOXA0srj3YskhJ", "dimp50_got_001.zip": "1EE5FcPXqMBkv_0ghfzytCMmbKxWxy04p", "dimp18_002.zip": "1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y", "dimp50_got_002.zip": "1ALXzVkn58GZ1E0I22vrbXkEXwy5u0xOc", "dimp18_got_000.zip": "1BxowlgGEonnuaVXwiDwiYr7VV7BRWLvr", "dimp50_001.zip": "1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K", "dimp18_got_002.zip": "1awqXQnFRr5NwjLfI-Ngtt3zT7XmQIwzs", "dimp18_got_001.zip": "1rr2J6NuuYJ5E4wDUw-PrxaNKjIsfgAyk", "dimp50_got_000.zip": "1ruP8XJOu0woq-bvKdHJ9_Y9RceHDrDjm", "dimp18_004.zip": "1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg", "dimp18_003.zip": "1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8", "dimp50_003.zip": "1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy", "dimp50_002.zip": "1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4", }, "atom": { "default_004.zip": "1BapnQh_8iRM44DXj862eOZV4q8zQLdmT", "default_003.zip": "1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5", "default_got_000.zip": "1uJnC0PPQhavwRbAL7VQ2Zow8YdLVzeCb", "default_got_001.zip": "1YzJm0H31veDW-lMxwy8MYNpMULgsYHKf", "default_000.zip": "1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5", "default_002.zip": "1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC", "default_001.zip": "1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt", "default_got_002.zip": "1qGtArxdAy0uWSd-HqFT5zmXpR6TCm4Vc", }, } def _download_file(file_id, path): link = 'https://drive.google.com/uc?id=' + file_id gdown.download(link, path, quiet=True) def download_results(download_path, trackers='all'): """ Script to automatically download tracker results for PyTracking. args: download_path - Directory where the zipped results are downloaded trackers - Tracker results which are to be downloaded. If set to 'all', all available results are downloaded. If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded. Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are downloaded. The value can be set to either 'all', in which case all available results for the tracker are downloaded. Else the value should be a list of parameter file names. """ print('Using download path ''{}'''.format(download_path)) os.makedirs(download_path, exist_ok=True) if isinstance(trackers, str): if trackers == 'all': trackers = {k: 'all' for k in results_link_dict.keys()} elif trackers in results_link_dict: trackers = {trackers: 'all'} else: raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict') elif isinstance(trackers, dict): pass else: raise Exception('tracker_list must be set to ''all'', or be a dict') for trk, runfiles in trackers.items(): trk_path = os.path.join(download_path, trk) if not os.path.exists(trk_path): os.makedirs(trk_path) if runfiles == 'all': for params, fileid in results_link_dict[trk].items(): print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) elif isinstance(runfiles, (list, tuple)): for p in runfiles: for params, fileid in results_link_dict[trk].items(): if re.match(r'{}(|_(\d\d\d)).zip'.format(p), params) is not None: print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) else: raise Exception('tracker_list values must either be set to ''all'', or be a list of param names') def unpack_tracking_results(download_path, output_path=None): """ Unpacks zipped benchmark results. The directory 'download_path' should have the following structure - root - tracker1 - param1.zip - param2.zip . . - tracker2 - param1.zip - param2.zip . . args: download_path - Path to the directory where the zipped results are stored output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path by default """ if output_path is None: output_path = env_settings().results_path if not os.path.exists(output_path): os.makedirs(output_path) trackers = os.listdir(download_path) for t in trackers: runfiles = os.listdir(os.path.join(download_path, t)) for r in runfiles: save_path = os.path.join(output_path, t) if not os.path.exists(save_path): os.makedirs(save_path) shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip') def main(): parser = argparse.ArgumentParser(description='Download and unpack zipped results') parser.add_argument('--tracker', type=str, default='all', help='Name of tracker results to download, or ''all''.') parser.add_argument('--output_path', type=str, default=None, help='Path to the directory where the results will be unpacked.') parser.add_argument('--temp_download_path', type=str, default=None, help='Temporary path used for downloading the Zip files.') parser.add_argument('--download', type=bool, default=True, help='Whether to download results or unpack existing downloaded files.') args = parser.parse_args() download_path = args.temp_download_path if download_path is None: download_path = '{}/pytracking_results/'.format(tempfile.gettempdir()) if args.download: download_results(download_path, args.tracker) unpack_tracking_results(download_path, args.output_path) if __name__ == '__main__': main() ================================================ FILE: external/AR/pytracking/util_scripts/pack_got10k_results.py ================================================ import numpy as np import os import shutil from pytracking.evaluation.environment import env_settings def pack_got10k_results(tracker_name, param_name, output_name): """ Packs got10k results into a zip folder which can be directly uploaded to the evaluation server. The packed file is saved in the folder env_settings().got_packed_results_path args: tracker_name - name of the tracker param_name - name of the parameter file output_name - name of the packed zip file """ output_path = os.path.join(env_settings().got_packed_results_path, output_name) if not os.path.exists(output_path): os.makedirs(output_path) results_path = env_settings().results_path for i in range(1,181): seq_name = 'GOT-10k_Test_{:06d}'.format(i) seq_output_path = '{}/{}'.format(output_path, seq_name) if not os.path.exists(seq_output_path): os.makedirs(seq_output_path) for run_id in range(3): res = np.loadtxt('{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64) times = np.loadtxt( '{}/{}/{}_{:03d}/{}_time.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64) np.savetxt('{}/{}_{:03d}.txt'.format(seq_output_path, seq_name, run_id+1), res, delimiter=',', fmt='%f') np.savetxt('{}/{}_time.txt'.format(seq_output_path, seq_name), times, fmt='%f') # Generate ZIP file shutil.make_archive(output_path, 'zip', output_path) # Remove raw text files shutil.rmtree(output_path) ================================================ FILE: external/AR/pytracking/util_scripts/pack_trackingnet_results.py ================================================ import numpy as np import os import shutil from pytracking.evaluation.environment import env_settings from pytracking.evaluation.datasets import get_dataset def pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None): """ Packs trackingnet results into a zip folder which can be directly uploaded to the evaluation server. The packed file is saved in the folder env_settings().tn_packed_results_path args: tracker_name - name of the tracker param_name - name of the parameter file run_id - run id for the tracker output_name - name of the packed zip file """ if output_name is None: if run_id is None: output_name = '{}_{}'.format(tracker_name, param_name) else: output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id) output_path = os.path.join(env_settings().tn_packed_results_path, output_name) if not os.path.exists(output_path): os.makedirs(output_path) results_path = env_settings().results_path tn_dataset = get_dataset('trackingnet') for seq in tn_dataset: seq_name = seq.name if run_id is None: seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name) else: seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name) results = np.loadtxt(seq_results_path, dtype=np.float64) np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f') # Generate ZIP file shutil.make_archive(output_path, 'zip', output_path) # Remove raw text files shutil.rmtree(output_path) ================================================ FILE: external/AR/pytracking/utils/__init__.py ================================================ from .params import TrackerParams, FeatureParams, Choice ================================================ FILE: external/AR/pytracking/utils/convert_vot_anno_to_rect.py ================================================ import numpy as np def convert_vot_anno_to_rect(vot_anno, type): if len(vot_anno) == 4: return vot_anno if type == 'union': x1 = min(vot_anno[0::2]) x2 = max(vot_anno[0::2]) y1 = min(vot_anno[1::2]) y2 = max(vot_anno[1::2]) return [x1, y1, x2 - x1, y2 - y1] elif type == 'preserve_area': if len(vot_anno) != 8: raise ValueError vot_anno = np.array(vot_anno) cx = np.mean(vot_anno[0::2]) cy = np.mean(vot_anno[1::2]) x1 = min(vot_anno[0::2]) x2 = max(vot_anno[0::2]) y1 = min(vot_anno[1::2]) y2 = max(vot_anno[1::2]) A1 = np.linalg.norm(vot_anno[0:2] - vot_anno[2: 4]) * np.linalg.norm(vot_anno[2: 4] - vot_anno[4:6]) A2 = (x2 - x1) * (y2 - y1) s = np.sqrt(A1 / A2) w = s * (x2 - x1) + 1 h = s * (y2 - y1) + 1 x = cx - 0.5*w y = cy - 0.5*h return [x, y, w, h] else: raise ValueError ================================================ FILE: external/AR/pytracking/utils/load_text.py ================================================ import numpy as np import pandas as pd def load_text_numpy(path, delimiter, dtype): if isinstance(delimiter, (tuple, list)): for d in delimiter: try: ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype) return ground_truth_rect except: pass raise Exception('Could not read file {}'.format(path)) else: ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype) return ground_truth_rect def load_text_pandas(path, delimiter, dtype): if isinstance(delimiter, (tuple, list)): for d in delimiter: try: ground_truth_rect = pd.read_csv(path, delimiter=d, header=None, dtype=dtype, na_filter=False, low_memory=False).values return ground_truth_rect except Exception as e: pass raise Exception('Could not read file {}'.format(path)) else: ground_truth_rect = pd.read_csv(path, delimiter=delimiter, header=None, dtype=dtype, na_filter=False, low_memory=False).values return ground_truth_rect def load_text(path, delimiter=' ', dtype=np.float32, backend='numpy'): if backend == 'numpy': return load_text_numpy(path, delimiter, dtype) elif backend == 'pandas': return load_text_pandas(path, delimiter, dtype) ================================================ FILE: external/AR/pytracking/utils/loading.py ================================================ import os import ltr.admin.loading as ltr_loading from pytracking.evaluation.environment import env_settings def load_network(net_path, **kwargs): """Load network for tracking. args: net_path - Path to network. If it is not an absolute path, it is relative to the network_path in the local.py. See ltr.admin.loading.load_network for further details. **kwargs - Additional key-word arguments that are sent to ltr.admin.loading.load_network. """ kwargs['backbone_pretrained'] = False if os.path.isabs(net_path): path_full = net_path net, _ = ltr_loading.load_network(path_full, **kwargs) elif isinstance(env_settings().network_path, (list, tuple)): net = None for p in env_settings().network_path: path_full = os.path.join(p, net_path) try: net, _ = ltr_loading.load_network(path_full, **kwargs) break except Exception as e: # print(e) pass assert net is not None, 'Failed to load network' else: path_full = os.path.join(env_settings().network_path, net_path) net, _ = ltr_loading.load_network(path_full, **kwargs) return net ================================================ FILE: external/AR/pytracking/utils/params.py ================================================ from pytracking import TensorList import random class TrackerParams: """Class for tracker parameters.""" def set_default_values(self, default_vals: dict): for name, val in default_vals.items(): if not hasattr(self, name): setattr(self, name, val) def get(self, name: str, *default): """Get a parameter value with the given name. If it does not exists, it return the default value given as a second argument or returns an error if no default value is given.""" if len(default) > 1: raise ValueError('Can only give one default value.') if not default: return getattr(self, name) return getattr(self, name, default[0]) def has(self, name: str): """Check if there exist a parameter with the given name.""" return hasattr(self, name) class FeatureParams: """Class for feature specific parameters""" def __init__(self, *args, **kwargs): if len(args) > 0: raise ValueError for name, val in kwargs.items(): if isinstance(val, list): setattr(self, name, TensorList(val)) else: setattr(self, name, val) def Choice(*args): """Can be used to sample random parameter values.""" return random.choice(args) ================================================ FILE: external/AR/pytracking/utils/plotting.py ================================================ import matplotlib.pyplot as plt import numpy as np import torch import cv2 def draw_figure(fig): fig.canvas.draw() fig.canvas.flush_events() plt.pause(0.001) def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None): """Display a 2D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim == 3: a_np = np.transpose(a_np, (1, 2, 0)) if ax is None: fig = plt.figure(fig_num) plt.tight_layout() plt.cla() plt.imshow(a_np, vmin=range[0], vmax=range[1]) plt.axis('off') plt.axis('equal') if title is not None: plt.title(title) draw_figure(fig) else: ax.cla() ax.imshow(a_np, vmin=range[0], vmax=range[1]) ax.set_axis_off() ax.axis('equal') if title is not None: ax.set_title(title) draw_figure(plt.gcf()) def plot_graph(a: torch.Tensor, fig_num = None, title = None): """Plot graph. Data is a 1D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim > 1: raise ValueError fig = plt.figure(fig_num) # plt.tight_layout() plt.cla() plt.plot(a_np) if title is not None: plt.title(title) draw_figure(fig) def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None): im_np = im.clone().cpu().squeeze().numpy() im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8)) boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int) # Draw proposals for i_ in range(boxes.shape[0]): if disp_ids is None or disp_ids[i_]: bb = boxes[i_, :] disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256) cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), disp_color, 1) if iou_pred is not None: text_pos = (bb[0], bb[1] - 5) cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False) im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float() return im_tensor def _pascal_color_map(N=256, normalized=False): """ Python implementation of the color map function for the PASCAL VOC data set. Official Matlab version can be found in the PASCAL VOC devkit http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit """ def bitget(byteval, idx): return (byteval & (1 << idx)) != 0 dtype = 'float32' if normalized else 'uint8' cmap = np.zeros((N, 3), dtype=dtype) for i in range(N): r = g = b = 0 c = i for j in range(8): r = r | (bitget(c, 0) << 7 - j) g = g | (bitget(c, 1) << 7 - j) b = b | (bitget(c, 2) << 7 - j) c = c >> 3 cmap[i] = np.array([r, g, b]) cmap = cmap / 255 if normalized else cmap return cmap def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None): """ Overlay mask over image. Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py This function allows you to overlay a mask over an image with some transparency. # Arguments im: Numpy Array. Array with the image. The shape must be (H, W, 3) and the pixels must be represented as `np.uint8` data type. ann: Numpy Array. Array with the mask. The shape must be (H, W) and the values must be intergers alpha: Float. Proportion of alpha to apply at the overlaid mask. colors: Numpy Array. Optional custom colormap. It must have shape (N, 3) being N the maximum number of colors to represent. contour_thickness: Integer. Thickness of each object index contour draw over the overlay. This function requires to have installed the package `opencv-python`. # Returns Numpy Array: Image of the overlay with shape (H, W, 3) and data type `np.uint8`. """ im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int) if im.shape[:-1] != ann.shape: raise ValueError('First two dimensions of `im` and `ann` must match') if im.shape[-1] != 3: raise ValueError('im must have three channels at the 3 dimension') colors = colors or _pascal_color_map() colors = np.asarray(colors, dtype=np.uint8) mask = colors[ann] fg = im * alpha + (1 - alpha) * mask img = im.copy() img[ann > 0] = fg[ann > 0] if contour_thickness: # pragma: no cover import cv2 for obj_id in np.unique(ann[ann > 0]): contours = cv2.findContours((ann == obj_id).astype( np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(), contour_thickness) return img ================================================ FILE: external/AR/pytracking/utils/visdom.py ================================================ import visdom import visdom.server from pytracking.features.preprocessing import numpy_to_torch from pytracking.utils.plotting import show_image_with_boxes, overlay_mask import cv2 import torch import copy import numpy as np from collections import OrderedDict class VisBase: def __init__(self, visdom, show_data, title): self.visdom = visdom self.show_data = show_data self.title = title self.raw_data = None def update(self, data, **kwargs): self.save_data(data, **kwargs) if self.show_data: self.draw_data() def save_data(self, data, **kwargs): raise NotImplementedError def draw_data(self): raise NotImplementedError def toggle_display(self, new_mode=None): if new_mode is not None: self.show_data = new_mode else: self.show_data = not self.show_data if self.show_data: self.draw_data() else: self.visdom.close(self.title) class VisImage(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): data = data.float() self.raw_data = data def draw_data(self): self.visdom.image(self.raw_data.clone(), opts={'title': self.title}, win=self.title) class VisHeatmap(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): data = data.squeeze().flip(0) self.raw_data = data def draw_data(self): self.visdom.heatmap(self.raw_data.clone(), opts={'title': self.title}, win=self.title) class VisFeaturemap(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.block_list = None def block_list_callback_handler(self, data): self.block_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui') self.draw_data() def save_data(self, data): data = data.view(-1, *data.shape[-2:]) data = data.flip(1) if self.block_list is None: self.block_list = [] self.draw_feat = [] for i in range(data.shape[0]): self.block_list.append({'type': 'checkbox', 'name': 'Channel {:04d}'.format(i), 'value': False}) self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui') self.visdom.register_event_handler(self.block_list_callback_handler, 'featuremap_ui') self.raw_data = data def draw_data(self): if self.block_list is not None and self.show_data: for i, d in enumerate(self.block_list): if d['value']: fig_title = '{} ch: {:04d}'.format(self.title, i) self.visdom.heatmap(self.raw_data[i, :, :].clone(), opts={'title': fig_title}, win=fig_title) class VisCostVolume(VisBase): def __init__(self, visdom, show_data, title, flip=False): super().__init__(visdom, show_data, title) self.show_slice = False self.slice_pos = None self.flip = flip def show_cost_volume(self): data = self.raw_data.clone() # data_perm = data.permute(2, 0, 3, 1).contiguous() data_perm = data.permute(0, 2, 1, 3).contiguous() if self.flip: data_perm = data_perm.permute(2, 3, 0, 1).contiguous() data_perm = data_perm.view(data_perm.shape[0] * data_perm.shape[1], -1) self.visdom.heatmap(data_perm.flip(0), opts={'title': self.title}, win=self.title) def set_zoom_pos(self, slice_pos): self.slice_pos = slice_pos def toggle_show_slice(self, new_mode=None): if new_mode is not None: self.show_slice = new_mode else: self.show_slice = not self.show_slice def show_cost_volume_slice(self): slice_pos = self.slice_pos # slice_pos: [row, col] cost_volume_data = self.raw_data.clone() if self.flip: cost_volume_slice = cost_volume_data[:, :, slice_pos[0], slice_pos[1]] else: cost_volume_slice = cost_volume_data[slice_pos[0], slice_pos[1], :, :] self.visdom.heatmap(cost_volume_slice.flip(0), opts={'title': self.title}, win=self.title) def save_data(self, data): data = data.view(data.shape[-2], data.shape[-1], data.shape[-2], data.shape[-1]) self.raw_data = data def draw_data(self): if self.show_slice: self.show_cost_volume_slice() else: self.show_cost_volume() class VisCostVolumeUI(VisBase): def cv_ui_handler(self, data): zoom_toggled = False if data['event_type'] == 'KeyPress': if data['key'] == 'ArrowRight': self.zoom_pos[1] = min(self.zoom_pos[1] + 1, self.feat_shape[1]-1) elif data['key'] == 'ArrowLeft': self.zoom_pos[1] = max(self.zoom_pos[1] - 1, 0) elif data['key'] == 'ArrowUp': self.zoom_pos[0] = max(self.zoom_pos[0] - 1, 0) elif data['key'] == 'ArrowDown': self.zoom_pos[0] = min(self.zoom_pos[0] + 1, self.feat_shape[0]-1) elif data['key'] == 'Enter': self.zoom_mode = not self.zoom_mode zoom_toggled = True # Update image self.show_image() # Update cost volumes for block_title, block in self.registered_blocks.items(): if isinstance(block, VisCostVolume): block.set_zoom_pos(self.zoom_pos) block.toggle_show_slice(self.zoom_mode) if (self.zoom_mode or zoom_toggled) and block.show_data: block.draw_data() def __init__(self, visdom, show_data, title, feat_shape, registered_blocks): super().__init__(visdom, show_data, title) self.feat_shape = feat_shape self.zoom_mode = False self.zoom_pos = [int((feat_shape[0] - 1) / 2), int((feat_shape[1] - 1) / 2)] self.registered_blocks = registered_blocks self.visdom.register_event_handler(self.cv_ui_handler, title) def draw_grid(self, data): stride_r = int(data.shape[1] / self.feat_shape[0]) stride_c = int(data.shape[2] / self.feat_shape[1]) # Draw grid data[:, list(range(0, data.shape[1], stride_r)), :] = 0 data[:, :, list(range(0, data.shape[2], stride_c))] = 0 data[0, list(range(0, data.shape[1], stride_r)), :] = 255 data[0, :, list(range(0, data.shape[2], stride_c))] = 255 return data def shade_cell(self, data): stride_r = int(data.shape[1] / self.feat_shape[0]) stride_c = int(data.shape[2] / self.feat_shape[1]) r1 = self.zoom_pos[0]*stride_r r2 = min((self.zoom_pos[0] + 1)*stride_r, data.shape[1]) c1 = self.zoom_pos[1] * stride_c c2 = min((self.zoom_pos[1] + 1) * stride_c, data.shape[2]) factor = 0.8 if self.zoom_mode else 0.5 data[:, r1:r2, c1:c2] = data[:, r1:r2, c1:c2] * (1 - factor) + torch.tensor([255.0, 0.0, 0.0]).view(3, 1, 1).to(data.device) * factor return data def show_image(self, data=None): if data is None: data = self.raw_data.clone() data = self.draw_grid(data) data = self.shade_cell(data) self.visdom.image(data, opts={'title': self.title}, win=self.title) def save_data(self, data): # Ignore feat shape data = data[0] data = data.float() self.raw_data = data def draw_data(self): self.show_image(self.raw_data.clone()) class VisInfoDict(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.raw_data = OrderedDict() def generate_display_text(self, data): display_text = '' for key, value in data.items(): key = key.replace('_', ' ') if value is None: display_text += '{}: {}
'.format(key, 'None') elif isinstance(value, (str, int)): display_text += '{}: {}
'.format(key, value) else: display_text += '{}: {:.2f}
'.format(key, value) return display_text def save_data(self, data): for key, val in data.items(): self.raw_data[key] = val def draw_data(self): data = copy.deepcopy(self.raw_data) display_text = self.generate_display_text(data) self.visdom.text(display_text, opts={'title': self.title}, win=self.title) class VisText(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): self.raw_data = data def draw_data(self): data = copy.deepcopy(self.raw_data) self.visdom.text(data, opts={'title': self.title}, win=self.title) class VisLinePlot(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): self.raw_data = data def draw_data(self): if isinstance(self.raw_data, (list, tuple)): data_y = self.raw_data[0].clone() data_x = self.raw_data[1].clone() else: data_y = self.raw_data.clone() data_x = torch.arange(data_y.shape[0]) self.visdom.line(data_y, data_x, opts={'title': self.title}, win=self.title) class VisTracking(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): image = data[0] boxes_masks = data[1:] boxes, masks = [], [] for bm in boxes_masks: if bm is None: continue if isinstance(bm, list): boxes.append(torch.Tensor(bm)); continue if len(bm.shape) > 1: # Binarize segmentation if a float tensor is provided if bm.dtype != np.uint8: bm = (bm > 0.5).astype(np.uint8) masks.append(bm); continue boxes.append(bm.float()) self.raw_data = [image, boxes, masks] def draw_data(self): disp_image = self.raw_data[0].copy() resize_factor = 1 if max(disp_image.shape) > 480: resize_factor = 480.0 / float(max(disp_image.shape)) disp_image = cv2.resize(disp_image, None, fx=resize_factor, fy=resize_factor) for i, mask in enumerate(self.raw_data[2]): self.raw_data[2][i] = cv2.resize(mask, None, fx=resize_factor, fy=resize_factor) boxes = [resize_factor * b.clone() for b in self.raw_data[1]] for i, disp_rect in enumerate(boxes): color = ((255*((i%3)>0)), 255*((i+1)%2), (255*(i%5))//4) cv2.rectangle(disp_image, (int(disp_rect[0]), int(disp_rect[1])), (int(disp_rect[0] + disp_rect[2]), int(disp_rect[1] + disp_rect[3])), color, 2) for i, mask in enumerate(self.raw_data[2], 1): disp_image = overlay_mask(disp_image, mask * i) disp_image = numpy_to_torch(disp_image).squeeze(0) disp_image = disp_image.float() self.visdom.image(disp_image, opts={'title': self.title}, win=self.title) class VisBBReg(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.block_list = [] def block_list_callback_handler(self, data): self.block_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis') self.draw_data() def save_data(self, data): self.image = data[0].float() self.init_boxes = data[1] self.final_boxes = data[2] self.final_ious = data[3] def draw_data(self): if len(self.block_list) == 0: self.block_list.append({'type': 'checkbox', 'name': 'ID 0', 'value': True}) self.block_list.append({'type': 'checkbox', 'name': 'ID 1', 'value': True}) self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis') self.visdom.register_event_handler(self.block_list_callback_handler, 'bbreg_vis') disp_image = self.image ids = [x['value'] for x in self.block_list] init_box_image = show_image_with_boxes(disp_image.clone(), self.init_boxes.clone(), disp_ids=ids) final_box_image = show_image_with_boxes(disp_image.clone(), self.final_boxes.clone(), self.final_ious.clone(), disp_ids=ids) self.visdom.image(init_box_image, opts={'title': 'Init Boxes'}, win='Init Boxes') self.visdom.image(final_box_image, opts={'title': 'Final Boxes'}, win='Final Boxes') class Visdom: def __init__(self, debug=0, ui_info=None, visdom_info=None): self.debug = debug self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'), port=visdom_info.get('port', 8097)) self.registered_blocks = {} self.blocks_list = [] self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') self.visdom.register_event_handler(self.block_list_callback_handler, 'block_list') if ui_info is not None: self.visdom.register_event_handler(ui_info['handler'], ui_info['win_id']) def block_list_callback_handler(self, data): field_name = self.blocks_list[data['propertyId']]['name'] self.registered_blocks[field_name].toggle_display(data['value']) self.blocks_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') def register(self, data, mode, debug_level=0, title='Data', **kwargs): if title not in self.registered_blocks.keys(): show_data = self.debug >= debug_level if title != 'Tracking': self.blocks_list.append({'type': 'checkbox', 'name': title, 'value': show_data}) self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') if mode == 'image': self.registered_blocks[title] = VisImage(self.visdom, show_data, title) elif mode == 'heatmap': self.registered_blocks[title] = VisHeatmap(self.visdom, show_data, title) elif mode == 'cost_volume': self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title) elif mode == 'cost_volume_flip': self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title, flip=True) elif mode == 'cost_volume_ui': self.registered_blocks[title] = VisCostVolumeUI(self.visdom, show_data, title, data[1], self.registered_blocks) elif mode == 'info_dict': self.registered_blocks[title] = VisInfoDict(self.visdom, show_data, title) elif mode == 'text': self.registered_blocks[title] = VisText(self.visdom, show_data, title) elif mode == 'lineplot': self.registered_blocks[title] = VisLinePlot(self.visdom, show_data, title) elif mode == 'Tracking': self.registered_blocks[title] = VisTracking(self.visdom, show_data, title) elif mode == 'bbreg': self.registered_blocks[title] = VisBBReg(self.visdom, show_data, title) elif mode == 'featmap': self.registered_blocks[title] = VisFeaturemap(self.visdom, show_data, title) else: raise ValueError('Visdom Error: Unknown data mode {}'.format(mode)) # Update self.registered_blocks[title].update(data, **kwargs) ================================================ FILE: external/AR/pytracking/vot20_utils.py ================================================ import numpy as np def make_full_size(x, output_sz): ''' zero-pad input x (right and down) to match output_sz x: numpy array e.g., binary mask output_sz: size of the output [width, height] ''' if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]: return x pad_x = output_sz[0] - x.shape[1] if pad_x < 0: x = x[:, :x.shape[1] + pad_x] # padding has to be set to zero, otherwise pad function fails pad_x = 0 pad_y = output_sz[1] - x.shape[0] if pad_y < 0: x = x[:x.shape[0] + pad_y, :] # padding has to be set to zero, otherwise pad function fails pad_y = 0 return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0) def rect_from_mask(mask): ''' create an axis-aligned rectangle from a given binary mask mask in created as a minimal rectangle containing all non-zero pixels ''' x_ = np.sum(mask, axis=0) y_ = np.sum(mask, axis=1) x0 = np.min(np.nonzero(x_)) x1 = np.max(np.nonzero(x_)) y0 = np.min(np.nonzero(y_)) y1 = np.max(np.nonzero(y_)) return [x0, y0, x1 - x0 + 1, y1 - y0 + 1] def mask_from_rect(rect, output_sz): ''' create a binary mask from a given rectangle rect: axis-aligned rectangle [x0, y0, width, height] output_sz: size of the output [width, height] ''' mask = np.zeros((output_sz[1], output_sz[0]), dtype=np.uint8) x0 = max(int(round(rect[0])), 0) y0 = max(int(round(rect[1])), 0) x1 = min(int(round(rect[0] + rect[2])), output_sz[0]) y1 = min(int(round(rect[1] + rect[3])), output_sz[1]) mask[y0:y1, x0:x1] = 1 return mask def bbox_clip(x1, y1, x2, y2, boundary, min_sz=10): '''boundary (H,W)''' x1_new = max(0, min(x1, boundary[1] - min_sz)) y1_new = max(0, min(y1, boundary[0] - min_sz)) x2_new = max(min_sz, min(x2, boundary[1])) y2_new = max(min_sz, min(y2, boundary[0])) return x1_new, y1_new, x2_new, y2_new ================================================ FILE: external/PreciseRoIPooling/.gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .vim-template* # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ ================================================ FILE: external/PreciseRoIPooling/LICENSE ================================================ MIT License Copyright (c) 2018 Jiayuan Mao Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: external/PreciseRoIPooling/README.md ================================================ # PreciseRoIPooling This repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation). **Acquisition of Localization Confidence for Accurate Object Detection** _Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.) https://arxiv.org/abs/1807.11590 ## Brief In short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is: - different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates. - different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous. For a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.
## Implementation PrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome. ## Usage (PyTorch 1.0) In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented). Since we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` ## Usage (PyTorch 0.4) **!!! Please first checkout to the branch pytorch0.4.** In the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented). To use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do: ``` from prroi_pool import PrRoIPool2D avg_pool = PrRoIPool2D(window_height, window_width, spatial_scale) roi_features = avg_pool(features, rois) # for those who want to use the "functional" from prroi_pool.functional import prroi_pool2d roi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale) ``` Here, - RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor. - `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`. - The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`. ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore ================================================ *.o /_prroi_pooling ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : __init__.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. from .prroi_pool import * ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/functional.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : functional.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch import torch.autograd as ag __all__ = ['prroi_pool2d'] _prroi_pooling = None def _import_prroi_pooling(): global _prroi_pooling if _prroi_pooling is None: try: from os.path import join as pjoin, dirname from torch.utils.cpp_extension import load as load_extension root_dir = pjoin(dirname(__file__), 'src') _prroi_pooling = load_extension( '_prroi_pooling', [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], verbose=True ) except ImportError: raise ImportError('Can not compile Precise RoI Pooling library.') return _prroi_pooling class PrRoIPool2DFunction(ag.Function): @staticmethod def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): _prroi_pooling = _import_prroi_pooling() assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) pooled_height = int(pooled_height) pooled_width = int(pooled_width) spatial_scale = float(spatial_scale) features = features.contiguous() rois = rois.contiguous() params = (pooled_height, pooled_width, spatial_scale) if features.is_cuda: output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) ctx.params = params # everything here is contiguous. ctx.save_for_backward(features, rois, output) else: raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') return output @staticmethod def backward(ctx, grad_output): _prroi_pooling = _import_prroi_pooling() features, rois, output = ctx.saved_tensors grad_input = grad_coor = None if features.requires_grad: grad_output = grad_output.contiguous() grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) if rois.requires_grad: grad_output = grad_output.contiguous() grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) return grad_input, grad_coor, None, None, None prroi_pool2d = PrRoIPool2DFunction.apply ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py ================================================ #! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : prroi_pool.py # Author : Jiayuan Mao, Tete Xiao # Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com # Date : 07/13/2018 # # This file is part of PreciseRoIPooling. # Distributed under terms of the MIT license. # Copyright (c) 2017 Megvii Technology Limited. import torch.nn as nn from .functional import prroi_pool2d __all__ = ['PrRoIPool2D'] class PrRoIPool2D(nn.Module): def __init__(self, pooled_height, pooled_width, spatial_scale): super().__init__() self.pooled_height = int(pooled_height) self.pooled_width = int(pooled_width) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale) def extra_repr(self): return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__) ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c ================================================ /* * File : prroi_pooling_gpu.c * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include #include #include #include #include "prroi_pooling_gpu_impl.cuh" at::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) { int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options()); if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return output; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingForwardGpu( stream, features.data(), rois.data(), output.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count ); AT_CUDA_CHECK(cudaGetLastError()); return output; } at::Tensor prroi_pooling_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto features_diff = at::zeros_like(features); int nr_rois = rois.size(0); int batch_size = features.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = batch_size * nr_channels * height * width; if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return features_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), features_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); AT_CUDA_CHECK(cudaGetLastError()); return features_diff; } at::Tensor prroi_pooling_coor_backward_cuda( const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff, int pooled_height, int pooled_width, float spatial_scale) { auto coor_diff = at::zeros_like(rois); int nr_rois = rois.size(0); int nr_channels = features.size(1); int height = features.size(2); int width = features.size(3); int top_count = nr_rois * nr_channels * pooled_height * pooled_width; int bottom_count = nr_rois * 5; if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return coor_diff; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); PrRoIPoolingCoorBackwardGpu( stream, features.data(), rois.data(), output.data(), output_diff.data(), coor_diff.data(), nr_channels, height, width, pooled_height, pooled_width, spatial_scale, top_count, bottom_count ); AT_CUDA_CHECK(cudaGetLastError()); return coor_diff; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("prroi_pooling_forward_cuda", &prroi_pooling_forward_cuda, "PRRoIPooling_forward"); m.def("prroi_pooling_backward_cuda", &prroi_pooling_backward_cuda, "PRRoIPooling_backward"); m.def("prroi_pooling_coor_backward_cuda", &prroi_pooling_coor_backward_cuda, "PRRoIPooling_backward_coor"); } ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h ================================================ /* * File : prroi_pooling_gpu.h * Author : Jiayuan Mao, Tete Xiao * Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com * Date : 07/13/2018 * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale); int prroi_pooling_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scale ); int prroi_pooling_coor_backward_cuda( THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff, int pooled_height, int pooled_width, float spatial_scal ); ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py ================================================ # -*- coding: utf-8 -*- # File : test_prroi_pooling2d.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 18/02/2018 # # This file is part of Jacinle. import unittest import torch import torch.nn as nn import torch.nn.functional as F from jactorch.utils.unittest import TorchTestCase from prroi_pool import PrRoIPool2D class TestPrRoIPool2D(TorchTestCase): def test_forward(self): pool = PrRoIPool2D(7, 7, spatial_scale=0.5) features = torch.rand((4, 16, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 14, 14], [1, 14, 14, 28, 28], ]).float().cuda() out = pool(features, rois) out_gold = F.avg_pool2d(features, kernel_size=2, stride=1) self.assertTensorClose(out, torch.stack(( out_gold[0, :, :7, :7], out_gold[1, :, 7:14, 7:14], ), dim=0)) def test_backward_shapeonly(self): pool = PrRoIPool2D(2, 2, spatial_scale=0.5) features = torch.rand((4, 2, 24, 32)).cuda() rois = torch.tensor([ [0, 0, 0, 4, 4], [1, 14, 14, 18, 18], ]).float().cuda() features.requires_grad = rois.requires_grad = True out = pool(features, rois) loss = out.sum() loss.backward() self.assertTupleEqual(features.size(), features.grad.size()) self.assertTupleEqual(rois.size(), rois.grad.size()) if __name__ == '__main__': unittest.main() ================================================ FILE: external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu ================================================ /* * File : prroi_pooling_gpu_impl.cu * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #include "prroi_pooling_gpu_impl.cuh" #include #include #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define CUDA_POST_KERNEL_CHECK \ do { \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \ exit(-1); \ } \ } while(0) #define CUDA_NUM_THREADS 512 namespace { static int CUDA_NUM_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); float retVal = overflow ? 0.0f : data[h * width + w]; return retVal; } __device__ static float PrRoIPoolingGetCoeff(float dh, float dw){ dw = dw > 0 ? dw : -dw; dh = dh > 0 ? dh : -dh; return (1.0f - dh) * (1.0f - dw); } __device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) { return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1; } __device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){ float retVal = 0.0f; int h1 = floorf(h); int w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w); retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h); w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); h1 = floorf(h)+1; w1 = floorf(w)+1; retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1)); return retVal; } __device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; float sum_out = 0; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp; alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp; alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp; return sum_out; } __device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff) { bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width); if (!overflow) atomicAdd(diff + h * width + w, top_diff * coeff); } __device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w, const float y0, const float x0, const float y1, const float x1, const int h0, const int w0) { float alpha, beta, lim_alpha, lim_beta, tmp; alpha = x0 - float(s_w); beta = y0 - float(s_h); lim_alpha = x1 - float(s_w); lim_beta = y1 - float(s_h); tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp); alpha = x0 - float(s_w); beta = float(e_h) - y1; lim_alpha = x1 - float(s_w); lim_beta = float(e_h) - y0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp); alpha = float(e_w) - x1; lim_alpha = float(e_w) - x0; tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha) * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta); PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp); } __global__ void PrRoIPoolingForward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, ((float)0.0)); float roi_height = max(roi_end_h - roi_start_h, ((float)0.0)); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width; float *this_out = top_data + index; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); if (win_size == 0) { *this_out = 0; return; } float sum_out = 0; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); *this_out = sum_out / win_size; } } __global__ void PrRoIPoolingBackward( const int nthreads, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); for (int w_iter = s_w; w_iter < e_w; ++w_iter) for (int h_iter = s_h; h_iter < e_h; ++h_iter) PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1, max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)), min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)), height, width); } } __global__ void PrRoIPoolingCoorBackward( const int nthreads, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float spatial_scale) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = bottom_rois[1] * spatial_scale; float roi_start_h = bottom_rois[2] * spatial_scale; float roi_end_w = bottom_rois[3] * spatial_scale; float roi_end_h = bottom_rois[4] * spatial_scale; float roi_width = max(roi_end_w - roi_start_w, (float)0); float roi_height = max(roi_end_h - roi_start_h, (float)0); float bin_size_h = roi_height / static_cast(pooled_height); float bin_size_w = roi_width / static_cast(pooled_width); const float *this_out_grad = top_diff + index; const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; const float *this_top_data = top_data + index; float *this_data_grad = bottom_diff + n * 5; float win_start_w = roi_start_w + bin_size_w * pw; float win_start_h = roi_start_h + bin_size_h * ph; float win_end_w = win_start_w + bin_size_w; float win_end_h = win_start_h + bin_size_h; float win_size = max(float(0.0), bin_size_w * bin_size_h); float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size; // WARNING: to be discussed if (sum_out == 0) return; int s_w, s_h, e_w, e_h; s_w = floorf(win_start_w); e_w = ceilf(win_end_w); s_h = floorf(win_start_h); e_h = ceilf(win_end_h); float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0; for (int h_iter = s_h; h_iter < e_h; ++h_iter) { g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width)); g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter, min(win_end_h, float(h_iter + 1)) - h_iter, PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width), PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width)); } for (int w_iter = s_w; w_iter < e_w; ++w_iter) { g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width)); g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter, min(win_end_w, float(w_iter + 1)) - w_iter, PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width), PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width)); } float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data); float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data); float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data); float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data); partial_x1 = partial_x1 / win_size * spatial_scale; partial_x2 = partial_x2 / win_size * spatial_scale; partial_y1 = partial_y1 / win_size * spatial_scale; partial_y2 = partial_y2 / win_size * spatial_scale; // (b, x1, y1, x2, y2) this_data_grad[0] = 0; atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width)) * (*this_out_grad)); atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height)) * (*this_out_grad)); atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width) * (*this_out_grad)); atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height) * (*this_out_grad)); } } } /* !anonymous namespace */ #ifdef __cplusplus extern "C" { #endif void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count) { PrRoIPoolingForward<<>>( top_count, bottom_data, bottom_rois, top_data, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingBackward<<>>( top_count, bottom_rois, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count) { cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream); PrRoIPoolingCoorBackward<<>>( top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff, channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_); CUDA_POST_KERNEL_CHECK; } } /* !extern "C" */ ================================================ FILE: external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh ================================================ /* * File : prroi_pooling_gpu_impl.cuh * Author : Tete Xiao, Jiayuan Mao * Email : jasonhsiao97@gmail.com * * Distributed under terms of the MIT license. * Copyright (c) 2017 Megvii Technology Limited. */ #ifndef PRROI_POOLING_GPU_IMPL_CUH #define PRROI_POOLING_GPU_IMPL_CUH #ifdef __cplusplus extern "C" { #endif #define F_DEVPTR_IN const float * #define F_DEVPTR_OUT float * void PrRoIPoolingForwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_OUT top_data, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count); void PrRoIPoolingBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); void PrRoIPoolingCoorBackwardGpu( cudaStream_t stream, F_DEVPTR_IN bottom_data, F_DEVPTR_IN bottom_rois, F_DEVPTR_IN top_data, F_DEVPTR_IN top_diff, F_DEVPTR_OUT bottom_diff, const int channels_, const int height_, const int width_, const int pooled_height_, const int pooled_width_, const float spatial_scale_, const int top_count, const int bottom_count); #ifdef __cplusplus } /* !extern "C" */ #endif #endif /* !PRROI_POOLING_GPU_IMPL_CUH */ ================================================ FILE: external/vot20/cttrack/config.yaml ================================================ registry: - ./trackers.ini stack: vot2020 ================================================ FILE: external/vot20/cttrack/trackers.ini ================================================ [cttrack_large] # label = cttrack_large protocol = traxpython command = from cttrack_start import main;main() # Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths) # paths = /home/lr/workspace/CTTrack: paths = # Additional environment paths env_PATH = : ================================================ FILE: lib/__init__.py ================================================ ================================================ FILE: lib/config/__init__.py ================================================ ================================================ FILE: lib/config/artrack/config.py ================================================ from easydict import EasyDict as edict import yaml """ Add default config for OSTrack. """ cfg = edict() # MODEL cfg.MODEL = edict() cfg.MODEL.PRETRAIN_FILE = "mae_pretrain_vit_base.pth" cfg.MODEL.PRETRAIN_PTH = "" cfg.MODEL.EXTRA_MERGER = False cfg.MODEL.RETURN_INTER = False cfg.MODEL.RETURN_STAGES = [2, 5, 8, 11] # MODEL.BACKBONE cfg.MODEL.BACKBONE = edict() cfg.MODEL.BACKBONE.TYPE = "vit_base_patch16_224" cfg.MODEL.BACKBONE.STRIDE = 16 cfg.MODEL.BACKBONE.MID_PE = False cfg.MODEL.BACKBONE.SEP_SEG = False cfg.MODEL.BACKBONE.CAT_MODE = 'direct' cfg.MODEL.BACKBONE.MERGE_LAYER = 0 cfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False cfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore' cfg.MODEL.BACKBONE.CE_LOC = [] cfg.MODEL.BACKBONE.CE_KEEP_RATIO = [] cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX # MODEL.HEAD cfg.MODEL.BINS = 400 cfg.MODEL.RANGE = 2 cfg.MODEL.ENCODER_LAYER = 3 cfg.MODEL.NUM_HEADS = 12 cfg.MODEL.MLP_RATIO = 4 cfg.MODEL.QKV_BIAS = True cfg.MODEL.DROP_RATE = 0.1 cfg.MODEL.ATTN_DROP = 0.0 cfg.MODEL.DROP_PATH = 0.0 cfg.MODEL.DECODER_LAYER = 6 cfg.MODEL.HEAD = edict() cfg.MODEL.HEAD.TYPE = "PIX" cfg.MODEL.HEAD.NUM_CHANNELS = 1024 # TRAIN cfg.TRAIN = edict() cfg.TRAIN.LR = 0.0001 cfg.TRAIN.WEIGHT_DECAY = 0.0001 cfg.TRAIN.EPOCH = 500 cfg.TRAIN.LR_DROP_EPOCH = 400 cfg.TRAIN.BATCH_SIZE = 16 cfg.TRAIN.NUM_WORKER = 10 cfg.TRAIN.OPTIMIZER = "ADAMW" cfg.TRAIN.BACKBONE_MULTIPLIER = 0.1 cfg.TRAIN.GIOU_WEIGHT = 2.0 cfg.TRAIN.L1_WEIGHT = 5.0 cfg.TRAIN.FREEZE_LAYERS = [0, ] cfg.TRAIN.PRINT_INTERVAL = 50 cfg.TRAIN.VAL_EPOCH_INTERVAL = 20 cfg.TRAIN.GRAD_CLIP_NORM = 0.1 cfg.TRAIN.AMP = False cfg.TRAIN.CE_START_EPOCH = 20 # candidate elimination start epoch cfg.TRAIN.CE_WARM_EPOCH = 80 # candidate elimination warm up epoch cfg.TRAIN.DROP_PATH_RATE = 0.1 # drop path rate for ViT backbone # TRAIN.SCHEDULER cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.TYPE = "step" cfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1 # DATA cfg.DATA = edict() cfg.DATA.SAMPLER_MODE = "causal" # sampling methods cfg.DATA.MEAN = [0.485, 0.456, 0.406] cfg.DATA.STD = [0.229, 0.224, 0.225] cfg.DATA.MAX_SAMPLE_INTERVAL = 200 # DATA.TRAIN cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.DATASETS_NAME = ["LASOT", "GOT10K_vottrain"] cfg.DATA.TRAIN.DATASETS_RATIO = [1, 1] cfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000 # DATA.VAL cfg.DATA.VAL = edict() cfg.DATA.VAL.DATASETS_NAME = ["GOT10K_votval"] cfg.DATA.VAL.DATASETS_RATIO = [1] cfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000 # DATA.SEARCH cfg.DATA.SEARCH = edict() cfg.DATA.SEARCH.SIZE = 256 cfg.DATA.SEARCH.FACTOR = 5.0 cfg.DATA.SEARCH.CENTER_JITTER = 4.5 cfg.DATA.SEARCH.SCALE_JITTER = 0.5 cfg.DATA.SEARCH.NUMBER = 1 # DATA.TEMPLATE cfg.DATA.TEMPLATE = edict() cfg.DATA.TEMPLATE.NUMBER = 1 cfg.DATA.TEMPLATE.SIZE = 128 cfg.DATA.TEMPLATE.FACTOR = 2.0 cfg.DATA.TEMPLATE.CENTER_JITTER = 0 cfg.DATA.TEMPLATE.SCALE_JITTER = 0 # TEST cfg.TEST = edict() cfg.TEST.TEMPLATE_FACTOR = 2.0 cfg.TEST.TEMPLATE_SIZE = 128 cfg.TEST.SEARCH_FACTOR = 5.0 cfg.TEST.SEARCH_SIZE = 256 cfg.TEST.EPOCH = 500 def _edict2dict(dest_dict, src_edict): if isinstance(dest_dict, dict) and isinstance(src_edict, dict): for k, v in src_edict.items(): if not isinstance(v, edict): dest_dict[k] = v else: dest_dict[k] = {} _edict2dict(dest_dict[k], v) else: return def gen_config(config_file): cfg_dict = {} _edict2dict(cfg_dict, cfg) with open(config_file, 'w') as f: yaml.dump(cfg_dict, f, default_flow_style=False) def _update_config(base_cfg, exp_cfg): if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict): for k, v in exp_cfg.items(): if k in base_cfg: if not isinstance(v, dict): base_cfg[k] = v else: _update_config(base_cfg[k], v) else: raise ValueError("{} not exist in config.py".format(k)) else: return def update_config_from_file(filename, base_cfg=None): exp_config = None with open(filename) as f: exp_config = edict(yaml.safe_load(f)) if base_cfg is not None: _update_config(base_cfg, exp_config) else: _update_config(cfg, exp_config) ================================================ FILE: lib/config/artrack_seq/config.py ================================================ from easydict import EasyDict as edict import yaml """ Add default config for OSTrack. """ cfg = edict() # MODEL cfg.MODEL = edict() cfg.MODEL.PRETRAIN_FILE = "mae_pretrain_vit_base.pth" cfg.MODEL.PRETRAIN_PTH = "" cfg.MODEL.PRENUM = 7 cfg.MODEL.EXTRA_MERGER = False cfg.MODEL.RETURN_INTER = False cfg.MODEL.RETURN_STAGES = [2, 5, 8, 11] # MODEL.BACKBONE cfg.MODEL.BACKBONE = edict() cfg.MODEL.BACKBONE.TYPE = "vit_base_patch16_224" cfg.MODEL.BACKBONE.STRIDE = 16 cfg.MODEL.BACKBONE.MID_PE = False cfg.MODEL.BACKBONE.SEP_SEG = False cfg.MODEL.BACKBONE.CAT_MODE = 'direct' cfg.MODEL.BACKBONE.MERGE_LAYER = 0 cfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False cfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore' cfg.MODEL.BACKBONE.CE_LOC = [] cfg.MODEL.BACKBONE.CE_KEEP_RATIO = [] cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX # MODEL.HEAD cfg.MODEL.BINS = 400 cfg.MODEL.RANGE = 2 cfg.MODEL.ENCODER_LAYER = 3 cfg.MODEL.NUM_HEADS = 12 cfg.MODEL.MLP_RATIO = 4 cfg.MODEL.QKV_BIAS = True cfg.MODEL.DROP_RATE = 0.1 cfg.MODEL.ATTN_DROP = 0.0 cfg.MODEL.DROP_PATH = 0.0 cfg.MODEL.DECODER_LAYER = 6 cfg.MODEL.HEAD = edict() cfg.MODEL.HEAD.TYPE = "PIX" cfg.MODEL.HEAD.NUM_CHANNELS = 1024 # TRAIN cfg.TRAIN = edict() cfg.TRAIN.LR = 0.0001 cfg.TRAIN.WEIGHT_DECAY = 0.0001 cfg.TRAIN.EPOCH = 500 cfg.TRAIN.LR_DROP_EPOCH = 400 cfg.TRAIN.BATCH_SIZE = 16 cfg.TRAIN.NUM_WORKER = 10 cfg.TRAIN.OPTIMIZER = "ADAMW" cfg.TRAIN.BACKBONE_MULTIPLIER = 0.1 cfg.TRAIN.GIOU_WEIGHT = 2.0 cfg.TRAIN.L1_WEIGHT = 5.0 cfg.TRAIN.FREEZE_LAYERS = [0, ] cfg.TRAIN.PRINT_INTERVAL = 50 cfg.TRAIN.VAL_EPOCH_INTERVAL = 20 cfg.TRAIN.GRAD_CLIP_NORM = 0.1 cfg.TRAIN.AMP = False cfg.TRAIN.CE_START_EPOCH = 20 # candidate elimination start epoch cfg.TRAIN.CE_WARM_EPOCH = 80 # candidate elimination warm up epoch cfg.TRAIN.DROP_PATH_RATE = 0.1 # drop path rate for ViT backbone # TRAIN.SCHEDULER cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.TYPE = "step" cfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1 # DATA cfg.DATA = edict() cfg.DATA.SAMPLER_MODE = "causal" # sampling methods cfg.DATA.MEAN = [0.485, 0.456, 0.406] cfg.DATA.STD = [0.229, 0.224, 0.225] cfg.DATA.MAX_SAMPLE_INTERVAL = 200 cfg.DATA.MAX_GAP = 300 cfg.DATA.MAX_INTERVAL = 5 cfg.DATA.INTERVAL_PROB = 0.0 cfg.DATA.TEMP = 2 # DATA.TRAIN cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.DATASETS_NAME = ["LASOT", "GOT10K_vottrain"] cfg.DATA.TRAIN.DATASETS_RATIO = [1, 1] cfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000 # DATA.VAL cfg.DATA.VAL = edict() cfg.DATA.VAL.DATASETS_NAME = ["GOT10K_votval"] cfg.DATA.VAL.DATASETS_RATIO = [1] cfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000 # DATA.SEARCH cfg.DATA.SEARCH = edict() cfg.DATA.SEARCH.SIZE = 256 cfg.DATA.SEARCH.FACTOR = 5.0 cfg.DATA.SEARCH.CENTER_JITTER = 4.5 cfg.DATA.SEARCH.SCALE_JITTER = 0.5 cfg.DATA.SEARCH.NUMBER = 1 # DATA.TEMPLATE cfg.DATA.TEMPLATE = edict() cfg.DATA.TEMPLATE.NUMBER = 1 cfg.DATA.TEMPLATE.SIZE = 128 cfg.DATA.TEMPLATE.FACTOR = 2.0 cfg.DATA.TEMPLATE.CENTER_JITTER = 0 cfg.DATA.TEMPLATE.SCALE_JITTER = 0 # TEST cfg.TEST = edict() cfg.TEST.TEMPLATE_FACTOR = 2.0 cfg.TEST.TEMPLATE_SIZE = 128 cfg.TEST.SEARCH_FACTOR = 5.0 cfg.TEST.SEARCH_SIZE = 256 cfg.TEST.EPOCH = 500 def _edict2dict(dest_dict, src_edict): if isinstance(dest_dict, dict) and isinstance(src_edict, dict): for k, v in src_edict.items(): if not isinstance(v, edict): dest_dict[k] = v else: dest_dict[k] = {} _edict2dict(dest_dict[k], v) else: return def gen_config(config_file): cfg_dict = {} _edict2dict(cfg_dict, cfg) with open(config_file, 'w') as f: yaml.dump(cfg_dict, f, default_flow_style=False) def _update_config(base_cfg, exp_cfg): if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict): for k, v in exp_cfg.items(): if k in base_cfg: if not isinstance(v, dict): base_cfg[k] = v else: _update_config(base_cfg[k], v) else: raise ValueError("{} not exist in config.py".format(k)) else: return def update_config_from_file(filename, base_cfg=None): exp_config = None with open(filename) as f: exp_config = edict(yaml.safe_load(f)) if base_cfg is not None: _update_config(base_cfg, exp_config) else: _update_config(cfg, exp_config) ================================================ FILE: lib/config/artrackv2/config.py ================================================ from easydict import EasyDict as edict import yaml """ Add default config for OSTrack. """ cfg = edict() # MODEL cfg.MODEL = edict() cfg.MODEL.PRETRAIN_FILE = "mae_pretrain_vit_base.pth" cfg.MODEL.PRETRAIN_PTH = "" cfg.MODEL.EXTRA_MERGER = False cfg.MODEL.RETURN_INTER = False cfg.MODEL.RETURN_STAGES = [2, 5, 8, 11] # MODEL.BACKBONE cfg.MODEL.BACKBONE = edict() cfg.MODEL.BACKBONE.TYPE = "vit_base_patch16_224" cfg.MODEL.BACKBONE.STRIDE = 16 cfg.MODEL.BACKBONE.MID_PE = False cfg.MODEL.BACKBONE.SEP_SEG = False cfg.MODEL.BACKBONE.CAT_MODE = 'direct' cfg.MODEL.BACKBONE.MERGE_LAYER = 0 cfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False cfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore' cfg.MODEL.BACKBONE.CE_LOC = [] cfg.MODEL.BACKBONE.CE_KEEP_RATIO = [] cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX # MODEL.HEAD cfg.MODEL.BINS = 400 cfg.MODEL.RANGE = 2 cfg.MODEL.EXTENSION = 3 cfg.MODEL.ENCODER_LAYER = 3 cfg.MODEL.NUM_HEADS = 12 cfg.MODEL.MLP_RATIO = 4 cfg.MODEL.QKV_BIAS = True cfg.MODEL.DROP_RATE = 0.1 cfg.MODEL.ATTN_DROP = 0.0 cfg.MODEL.DROP_PATH = 0.0 cfg.MODEL.DECODER_LAYER = 6 cfg.MODEL.HEAD = edict() cfg.MODEL.HEAD.TYPE = "PIX" cfg.MODEL.HEAD.NUM_CHANNELS = 1024 # TRAIN cfg.TRAIN = edict() cfg.TRAIN.LR = 0.0001 cfg.TRAIN.WEIGHT_DECAY = 0.0001 cfg.TRAIN.EPOCH = 500 cfg.TRAIN.LR_DROP_EPOCH = 400 cfg.TRAIN.BATCH_SIZE = 16 cfg.TRAIN.NUM_WORKER = 10 cfg.TRAIN.OPTIMIZER = "ADAMW" cfg.TRAIN.BACKBONE_MULTIPLIER = 0.1 cfg.TRAIN.GIOU_WEIGHT = 2.0 cfg.TRAIN.L1_WEIGHT = 5.0 cfg.TRAIN.SCORE_WEIGHT = 0.0 cfg.TRAIN.FREEZE_LAYERS = [0, ] cfg.TRAIN.PRINT_INTERVAL = 50 cfg.TRAIN.VAL_EPOCH_INTERVAL = 20 cfg.TRAIN.GRAD_CLIP_NORM = 0.1 cfg.TRAIN.AMP = False cfg.TRAIN.CE_START_EPOCH = 20 # candidate elimination start epoch cfg.TRAIN.CE_WARM_EPOCH = 80 # candidate elimination warm up epoch cfg.TRAIN.DROP_PATH_RATE = 0.1 # drop path rate for ViT backbone # TRAIN.SCHEDULER cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.TYPE = "step" cfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1 # DATA cfg.DATA = edict() cfg.DATA.SAMPLER_MODE = "causal" # sampling methods cfg.DATA.MEAN = [0.485, 0.456, 0.406] cfg.DATA.STD = [0.229, 0.224, 0.225] cfg.DATA.MAX_SAMPLE_INTERVAL = 200 # DATA.TRAIN cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.DATASETS_NAME = ["LASOT", "GOT10K_vottrain"] cfg.DATA.TRAIN.DATASETS_RATIO = [1, 1] cfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000 # DATA.VAL cfg.DATA.VAL = edict() cfg.DATA.VAL.DATASETS_NAME = ["GOT10K_votval"] cfg.DATA.VAL.DATASETS_RATIO = [1] cfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000 # DATA.SEARCH cfg.DATA.SEARCH = edict() cfg.DATA.SEARCH.SIZE = 256 cfg.DATA.SEARCH.FACTOR = 5.0 cfg.DATA.SEARCH.CENTER_JITTER = 4.5 cfg.DATA.SEARCH.SCALE_JITTER = 0.5 cfg.DATA.SEARCH.NUMBER = 1 # DATA.TEMPLATE cfg.DATA.TEMPLATE = edict() cfg.DATA.TEMPLATE.NUMBER = 1 cfg.DATA.TEMPLATE.SIZE = 128 cfg.DATA.TEMPLATE.FACTOR = 2.0 cfg.DATA.TEMPLATE.CENTER_JITTER = 0 cfg.DATA.TEMPLATE.SCALE_JITTER = 0 # TEST cfg.TEST = edict() cfg.TEST.TEMPLATE_FACTOR = 2.0 cfg.TEST.TEMPLATE_SIZE = 128 cfg.TEST.SEARCH_FACTOR = 5.0 cfg.TEST.SEARCH_SIZE = 256 cfg.TEST.EPOCH = 500 def _edict2dict(dest_dict, src_edict): if isinstance(dest_dict, dict) and isinstance(src_edict, dict): for k, v in src_edict.items(): if not isinstance(v, edict): dest_dict[k] = v else: dest_dict[k] = {} _edict2dict(dest_dict[k], v) else: return def gen_config(config_file): cfg_dict = {} _edict2dict(cfg_dict, cfg) with open(config_file, 'w') as f: yaml.dump(cfg_dict, f, default_flow_style=False) def _update_config(base_cfg, exp_cfg): if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict): for k, v in exp_cfg.items(): if k in base_cfg: if not isinstance(v, dict): base_cfg[k] = v else: _update_config(base_cfg[k], v) else: raise ValueError("{} not exist in config.py".format(k)) else: return def update_config_from_file(filename, base_cfg=None): exp_config = None with open(filename) as f: exp_config = edict(yaml.safe_load(f)) if base_cfg is not None: _update_config(base_cfg, exp_config) else: _update_config(cfg, exp_config) ================================================ FILE: lib/config/artrackv2_seq/config.py ================================================ from easydict import EasyDict as edict import yaml """ Add default config for OSTrack. """ cfg = edict() # MODEL cfg.MODEL = edict() cfg.MODEL.PRETRAIN_FILE = "mae_pretrain_vit_base.pth" cfg.MODEL.PRETRAIN_PTH = "" cfg.MODEL.EXTRA_MERGER = False cfg.MODEL.RETURN_INTER = False cfg.MODEL.RETURN_STAGES = [2, 5, 8, 11] # MODEL.DECODER cfg.MODEL.DECODER = edict() cfg.MODEL.DECODER.TYPE = "mask" cfg.MODEL.DECODER.MASK_RATIO = 0.75 cfg.MODEL.DECODER.EMBEDDIM = 512 cfg.MODEL.DECODER.DEPTH = 8 cfg.MODEL.DECODER.NUMHEADS = 16 cfg.MODEL.DECODER.MLPRATIO = 4 # MODEL.BACKBONE cfg.MODEL.BACKBONE = edict() cfg.MODEL.BACKBONE.TYPE = "vit_base_patch16_224" cfg.MODEL.BACKBONE.STRIDE = 16 cfg.MODEL.BACKBONE.PATCHSIZE = 16 cfg.MODEL.BACKBONE.MID_PE = False cfg.MODEL.BACKBONE.SEP_SEG = False cfg.MODEL.BACKBONE.CAT_MODE = 'direct' cfg.MODEL.BACKBONE.MERGE_LAYER = 0 cfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False cfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore' cfg.MODEL.BACKBONE.CE_LOC = [] cfg.MODEL.BACKBONE.CE_KEEP_RATIO = [] cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL' # choose between ALL, CTR_POINT, CTR_REC, GT_BOX # MODEL.HEAD cfg.MODEL.BINS = 400 cfg.MODEL.RANGE = 2 cfg.MODEL.EXTENSION = 3 cfg.MODEL.PRENUM = 7 cfg.MODEL.ENCODER_LAYER = 3 cfg.MODEL.NUM_HEADS = 12 cfg.MODEL.MLP_RATIO = 4 cfg.MODEL.QKV_BIAS = True cfg.MODEL.DROP_RATE = 0.1 cfg.MODEL.ATTN_DROP = 0.0 cfg.MODEL.DROP_PATH = 0.0 cfg.MODEL.DECODER_LAYER = 6 cfg.MODEL.HEAD = edict() cfg.MODEL.HEAD.TYPE = "PIX" cfg.MODEL.HEAD.NUM_CHANNELS = 1024 # TRAIN cfg.TRAIN = edict() cfg.TRAIN.LR = 0.0001 cfg.TRAIN.WEIGHT_DECAY = 0.0001 cfg.TRAIN.EPOCH = 500 cfg.TRAIN.LR_DROP_EPOCH = 400 cfg.TRAIN.BATCH_SIZE = 16 cfg.TRAIN.NUM_WORKER = 10 cfg.TRAIN.OPTIMIZER = "ADAMW" cfg.TRAIN.BACKBONE_MULTIPLIER = 0.1 cfg.TRAIN.GIOU_WEIGHT = 2.0 cfg.TRAIN.L1_WEIGHT = 5.0 cfg.TRAIN.SCORE_WEIGHT = 1.0 cfg.TRAIN.FREEZE_LAYERS = [0, ] cfg.TRAIN.PRINT_INTERVAL = 50 cfg.TRAIN.VAL_EPOCH_INTERVAL = 20 cfg.TRAIN.GRAD_CLIP_NORM = 0.1 cfg.TRAIN.AMP = False cfg.TRAIN.CE_START_EPOCH = 20 # candidate elimination start epoch cfg.TRAIN.CE_WARM_EPOCH = 80 # candidate elimination warm up epoch cfg.TRAIN.DROP_PATH_RATE = 0.1 # drop path rate for ViT backbone # TRAIN.SCHEDULER cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.TYPE = "step" cfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1 # DATA cfg.DATA = edict() cfg.DATA.MAX_GAP = 300 cfg.DATA.SAMPLER_MODE = "causal" # sampling methods cfg.DATA.MEAN = [0.485, 0.456, 0.406] cfg.DATA.STD = [0.229, 0.224, 0.225] cfg.DATA.MAX_SAMPLE_INTERVAL = 200 cfg.DATA.MAX_GAP = 300 cfg.DATA.MAX_INTERVAL = 5 cfg.DATA.INTERVAL_PROB = 0.0 cfg.DATA.TEMP = 2 # DATA.TRAIN cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.DATASETS_NAME = ["LASOT", "GOT10K_vottrain"] cfg.DATA.TRAIN.DATASETS_RATIO = [1, 1] cfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000 # DATA.VAL cfg.DATA.VAL = edict() cfg.DATA.VAL.DATASETS_NAME = ["GOT10K_votval"] cfg.DATA.VAL.DATASETS_RATIO = [1] cfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000 # DATA.SEARCH cfg.DATA.SEARCH = edict() cfg.DATA.SEARCH.SIZE = 256 cfg.DATA.SEARCH.FACTOR = 5.0 cfg.DATA.SEARCH.CENTER_JITTER = 4.5 cfg.DATA.SEARCH.SCALE_JITTER = 0.5 cfg.DATA.SEARCH.NUMBER = 1 # DATA.TEMPLATE cfg.DATA.TEMPLATE = edict() cfg.DATA.TEMPLATE.NUMBER = 1 cfg.DATA.TEMPLATE.SIZE = 128 cfg.DATA.TEMPLATE.FACTOR = 2.0 cfg.DATA.TEMPLATE.CENTER_JITTER = 0 cfg.DATA.TEMPLATE.SCALE_JITTER = 0 # TEST cfg.TEST = edict() cfg.TEST.TEMPLATE_FACTOR = 2.0 cfg.TEST.TEMPLATE_SIZE = 128 cfg.TEST.SEARCH_FACTOR = 5.0 cfg.TEST.SEARCH_SIZE = 256 cfg.TEST.EPOCH = 500 def _edict2dict(dest_dict, src_edict): if isinstance(dest_dict, dict) and isinstance(src_edict, dict): for k, v in src_edict.items(): if not isinstance(v, edict): dest_dict[k] = v else: dest_dict[k] = {} _edict2dict(dest_dict[k], v) else: return def gen_config(config_file): cfg_dict = {} _edict2dict(cfg_dict, cfg) with open(config_file, 'w') as f: yaml.dump(cfg_dict, f, default_flow_style=False) def _update_config(base_cfg, exp_cfg): if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict): for k, v in exp_cfg.items(): if k in base_cfg: if not isinstance(v, dict): base_cfg[k] = v else: _update_config(base_cfg[k], v) else: raise ValueError("{} not exist in config.py".format(k)) else: return def update_config_from_file(filename, base_cfg=None): exp_config = None with open(filename) as f: exp_config = edict(yaml.safe_load(f)) if base_cfg is not None: _update_config(base_cfg, exp_config) else: _update_config(cfg, exp_config) ================================================ FILE: lib/models/__init__.py ================================================ from .artrack.artrack import build_artrack ================================================ FILE: lib/models/artrack/__init__.py ================================================ from .artrack import build_artrack ================================================ FILE: lib/models/artrack/artrack.py ================================================ """ Basic OSTrack model. """ import math import os from typing import List import torch from torch import nn from torch.nn.modules.transformer import _get_clones from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.layers.head import build_pix_head from lib.models.artrack.vit import vit_base_patch16_224, vit_large_patch16_224 from lib.utils.box_ops import box_xyxy_to_cxcywh class ARTrack(nn.Module): """ This is the base class for ARTrack """ def __init__(self, transformer, pix_head, hidden_dim): """ Initializes the model. Parameters: transformer: torch module of the transformer architecture. """ super().__init__() self.identity = torch.nn.Parameter(torch.zeros(1, 2, hidden_dim)) self.identity = trunc_normal_(self.identity, std=.02) self.backbone = transformer self.pix_head = pix_head def forward(self, template: torch.Tensor, search: torch.Tensor, seq_input=None ): x = self.backbone(z=template, x=search, identity=self.identity,) # Forward head feat_last = x if isinstance(x, list): feat_last = x[-1] pos_z = self.backbone.pos_embed_z pos_x = self.backbone.pos_embed_x out = self.forward_head(feat_last, pos_z, pos_x, self.identity, seq_input) out['backbone_feat'] = x return out def forward_head(self, cat_feature, pos_z, pos_x, identity, seq_input=None,): output_dict = self.pix_head(cat_feature, pos_z, pos_x, identity, seq_input) return output_dict def build_artrack(cfg, training=True): current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root pretrained_path = os.path.join(current_dir, '../../../pretrained_models') if cfg.MODEL.PRETRAIN_FILE and ('ARTrack' not in cfg.MODEL.PRETRAIN_FILE) and training: pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE) else: pretrained = '' if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224': backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224': print("i use vit_large") backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE) hidden_dim = backbone.embed_dim patch_start_index = 1 else: raise NotImplementedError backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index) pix_head = build_pix_head(cfg, hidden_dim) model = ARTrack( backbone, pix_head, hidden_dim, ) if cfg.MODEL.PRETRAIN_PTH != "": load_from = cfg.MODEL.PRETRAIN_PTH checkpoint = torch.load(load_from, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["net"], strict=False) print('Load pretrained model from: ' + load_from) if 'ARTrack' in cfg.MODEL.PRETRAIN_FILE and training: checkpoint = torch.load(cfg.MODEL.PRETRAIN_FILE, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["net"], strict=False) print('Load pretrained model from: ' + cfg.MODEL.PRETRAIN_FILE) return model ================================================ FILE: lib/models/artrack/base_backbone.py ================================================ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.vision_transformer import resize_pos_embed from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrack.utils import combine_tokens, recover_tokens class BaseBackbone(nn.Module): def __init__(self): super().__init__() # for original ViT self.pos_embed = None self.img_size = [224, 224] self.patch_size = 16 self.embed_dim = 384 self.cat_mode = 'direct' self.pos_embed_z = None self.pos_embed_x = None self.template_segment_pos_embed = None self.search_segment_pos_embed = None self.return_inter = False self.return_stage = [2, 5, 8, 11] self.add_cls_token = False self.add_sep_seg = False def finetune_track(self, cfg, patch_start_index=1): search_size = to_2tuple(cfg.DATA.SEARCH.SIZE) template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE) new_patch_size = cfg.MODEL.BACKBONE.STRIDE self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE self.return_inter = cfg.MODEL.RETURN_INTER self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG # resize patch embedding if new_patch_size != self.patch_size: print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!') old_patch_embed = {} for name, param in self.patch_embed.named_parameters(): if 'weight' in name: param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size), mode='bicubic', align_corners=False) param = nn.Parameter(param) old_patch_embed[name] = param self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3, embed_dim=self.embed_dim) self.patch_embed.proj.bias = old_patch_embed['proj.bias'] self.patch_embed.proj.weight = old_patch_embed['proj.weight'] # for patch embedding patch_pos_embed = self.pos_embed[:, patch_start_index:, :] patch_pos_embed = patch_pos_embed.transpose(1, 2) B, E, Q = patch_pos_embed.shape P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W) # for search region H, W = search_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2) # for template region H, W = template_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2) self.pos_embed_z = nn.Parameter(template_patch_pos_embed) self.pos_embed_x = nn.Parameter(search_patch_pos_embed) # for cls token (keep it but not used) if self.add_cls_token and patch_start_index > 0: cls_pos_embed = self.pos_embed[:, 0:1, :] self.cls_pos_embed = nn.Parameter(cls_pos_embed) # separate token and segment token if self.add_sep_seg: self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02) self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02) if self.return_inter: for i_layer in self.fpn_stage: if i_layer != 11: norm_layer = partial(nn.LayerNorm, eps=1e-6) layer = norm_layer(self.embed_dim) layer_name = f'norm{i_layer}' self.add_module(layer_name, layer) def forward_features(self, z, x, identity): B, H, W = x.shape[0], x.shape[2], x.shape[3] x = self.patch_embed(x) z = self.patch_embed(z) s_x = x.shape[1] s_z = z.shape[1] if self.add_cls_token: cls_tokens = self.cls_token.expand(B, -1, -1) cls_tokens = cls_tokens + self.cls_pos_embed z += self.pos_embed_z x += self.pos_embed_x z += identity[:, 0, :].repeat(B, self.pos_embed_z.shape[1], 1) x += identity[:, 1, :].repeat(B, self.pos_embed_x.shape[1], 1) if self.add_sep_seg: x += self.search_segment_pos_embed z += self.template_segment_pos_embed x = combine_tokens(z, x, mode=self.cat_mode) if self.add_cls_token: x = torch.cat([cls_tokens, x], dim=1) x = self.pos_drop(x) for i, blk in enumerate(self.blocks): x = blk(x) lens_z = self.pos_embed_z.shape[1] lens_x = self.pos_embed_x.shape[1] #x = recover_tokens(x, lens_z, lens_x, mode=self.cat_mode) return self.norm(x) def forward(self, z, x, identity, **kwargs): """ Joint feature extraction and relation modeling for the basic ViT backbone. Args: z (torch.Tensor): template feature, [B, C, H_z, W_z] x (torch.Tensor): search region feature, [B, C, H_x, W_x] Returns: x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C] attn : None """ x = self.forward_features(z, x, identity) return x ================================================ FILE: lib/models/artrack/utils.py ================================================ import math import torch import torch.nn.functional as F def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False): # [B, HW, C] len_t = template_tokens.shape[1] len_s = search_tokens.shape[1] if mode == 'direct': merged_feature = torch.cat((template_tokens, search_tokens), dim=1) elif mode == 'template_central': central_pivot = len_s // 2 first_half = search_tokens[:, :central_pivot, :] second_half = search_tokens[:, central_pivot:, :] merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1) elif mode == 'partition': feat_size_s = int(math.sqrt(len_s)) feat_size_t = int(math.sqrt(len_t)) window_size = math.ceil(feat_size_t / 2.) # pad feature maps to multiples of window size B, _, C = template_tokens.shape H = W = feat_size_t template_tokens = template_tokens.view(B, H, W, C) pad_l = pad_b = pad_r = 0 # pad_r = (window_size - W % window_size) % window_size pad_t = (window_size - H % window_size) % window_size template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = template_tokens.shape template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C) template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2) _, Hc, Wc, _ = template_tokens.shape template_tokens = template_tokens.view(B, -1, C) merged_feature = torch.cat([template_tokens, search_tokens], dim=1) # calculate new h and w, which may be useful for SwinT or others merged_h, merged_w = feat_size_s + Hc, feat_size_s if return_res: return merged_feature, merged_h, merged_w else: raise NotImplementedError return merged_feature def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'): if mode == 'direct': recovered_tokens = merged_tokens elif mode == 'template_central': central_pivot = len_search_token // 2 len_remain = len_search_token - central_pivot len_half_and_t = central_pivot + len_template_token first_half = merged_tokens[:, :central_pivot, :] second_half = merged_tokens[:, -len_remain:, :] template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :] recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1) elif mode == 'partition': recovered_tokens = merged_tokens else: raise NotImplementedError return recovered_tokens def window_partition(x, window_size: int): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size: int, H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x ================================================ FILE: lib/models/artrack/vit.py ================================================ """ Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 The official jax code is released and available at https://github.com/google-research/vision_transformer DeiT model defs and weights from https://github.com/facebookresearch/deit, paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2021 Ross Wightman Modified by Botao Ye """ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_ from timm.models.registry import register_model from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrack.base_backbone import BaseBackbone class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, return_attention=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, return_attention=False): if return_attention: feat, attn = self.attn(self.norm1(x), True) x = x + self.drop_path(feat) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn else: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(BaseBackbone): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init=''): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) # self.extension = nn.Sequential(*[ # Block( # dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, # attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) # for i in range(6)]) self.norm = norm_layer(embed_dim) # # Representation layer # if representation_size and not distilled: # self.num_features = representation_size # self.pre_logits = nn.Sequential(OrderedDict([ # ('fc', nn.Linear(embed_dim, representation_size)), # ('act', nn.Tanh()) # ])) # else: # self.pre_logits = nn.Identity() # # # Classifier head(s) # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # self.head_dist = None # if distilled: # self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.dist_token is not None: trunc_normal_(self.dist_token, std=.02) if mode.startswith('jax'): # leave cls token as zeros to match jax impl named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) else: trunc_normal_(self.cls_token, std=.02) self.apply(_init_vit_weights) def _init_weights(self, m): # this fn left here for compat with downstream users _init_vit_weights(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} def get_classifier(self): if self.dist_token is None: return self.head else: return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.num_tokens == 2: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): """ ViT weight initialization * When called without n, head_bias, jax_impl args it will behave exactly the same as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif name.startswith('pre_logits'): lecun_normal_(module.weight) nn.init.zeros_(module.bias) else: if jax_impl: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) else: trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif jax_impl and isinstance(module, nn.Conv2d): # NOTE conv was left to pytorch default in my original init lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): nn.init.zeros_(module.bias) nn.init.ones_(module.weight) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): # Rescale the grid of position embeddings when loading from state_dict. Adapted from # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if num_tokens: posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] ntok_new -= num_tokens else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = VisionTransformer(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224(pretrained=False, **kwargs): model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model ================================================ FILE: lib/models/artrack/vit_ce.py ================================================ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import to_2tuple from lib.models.layers.patch_embed import PatchEmbed from .utils import combine_tokens, recover_tokens from .vit import VisionTransformer from ..layers.attn_blocks import CEBlock _logger = logging.getLogger(__name__) class VisionTransformerCE(VisionTransformer): """ Vision Transformer with candidate elimination (CE) module A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init='', ce_loc=None, ce_keep_ratio=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ # super().__init__() super().__init__() if isinstance(img_size, tuple): self.img_size = img_size else: self.img_size = to_2tuple(img_size) self.patch_size = patch_size self.in_chans = in_chans self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule blocks = [] ce_index = 0 self.ce_loc = ce_loc for i in range(depth): ce_keep_ratio_i = 1.0 if ce_loc is not None and i in ce_loc: ce_keep_ratio_i = ce_keep_ratio[ce_index] ce_index += 1 blocks.append( CEBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, keep_ratio_search=ce_keep_ratio_i) ) self.blocks = nn.Sequential(*blocks) self.norm = norm_layer(embed_dim) self.init_weights(weight_init) def forward_features(self, z, x, mask_z=None, mask_x=None, ce_template_mask=None, ce_keep_rate=None, return_last_attn=False ): B, H, W = x.shape[0], x.shape[2], x.shape[3] x = self.patch_embed(x) z = self.patch_embed(z) # attention mask handling # B, H, W if mask_z is not None and mask_x is not None: mask_z = F.interpolate(mask_z[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0] mask_z = mask_z.flatten(1).unsqueeze(-1) mask_x = F.interpolate(mask_x[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0] mask_x = mask_x.flatten(1).unsqueeze(-1) mask_x = combine_tokens(mask_z, mask_x, mode=self.cat_mode) mask_x = mask_x.squeeze(-1) if self.add_cls_token: cls_tokens = self.cls_token.expand(B, -1, -1) cls_tokens = cls_tokens + self.cls_pos_embed z += self.pos_embed_z x += self.pos_embed_x if self.add_sep_seg: x += self.search_segment_pos_embed z += self.template_segment_pos_embed x = combine_tokens(z, x, mode=self.cat_mode) if self.add_cls_token: x = torch.cat([cls_tokens, x], dim=1) x = self.pos_drop(x) lens_z = self.pos_embed_z.shape[1] lens_x = self.pos_embed_x.shape[1] global_index_t = torch.linspace(0, lens_z - 1, lens_z).to(x.device) global_index_t = global_index_t.repeat(B, 1) global_index_s = torch.linspace(0, lens_x - 1, lens_x).to(x.device) global_index_s = global_index_s.repeat(B, 1) removed_indexes_s = [] for i, blk in enumerate(self.blocks): x, global_index_t, global_index_s, removed_index_s, attn = \ blk(x, global_index_t, global_index_s, mask_x, ce_template_mask, ce_keep_rate) if self.ce_loc is not None and i in self.ce_loc: removed_indexes_s.append(removed_index_s) x = self.norm(x) lens_x_new = global_index_s.shape[1] lens_z_new = global_index_t.shape[1] z = x[:, :lens_z_new] x = x[:, lens_z_new:] if removed_indexes_s and removed_indexes_s[0] is not None: removed_indexes_cat = torch.cat(removed_indexes_s, dim=1) pruned_lens_x = lens_x - lens_x_new pad_x = torch.zeros([B, pruned_lens_x, x.shape[2]], device=x.device) x = torch.cat([x, pad_x], dim=1) index_all = torch.cat([global_index_s, removed_indexes_cat], dim=1) # recover original token order C = x.shape[-1] # x = x.gather(1, index_all.unsqueeze(-1).expand(B, -1, C).argsort(1)) x = torch.zeros_like(x).scatter_(dim=1, index=index_all.unsqueeze(-1).expand(B, -1, C).to(torch.int64), src=x) x = recover_tokens(x, lens_z_new, lens_x, mode=self.cat_mode) # re-concatenate with the template, which may be further used by other modules x = torch.cat([z, x], dim=1) aux_dict = { "attn": attn, "removed_indexes_s": removed_indexes_s, # used for visualization } return x, aux_dict def forward(self, z, x, ce_template_mask=None, ce_keep_rate=None, tnc_keep_rate=None, return_last_attn=False): x, aux_dict = self.forward_features(z, x, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate,) return x, aux_dict def _create_vision_transformer(pretrained=False, **kwargs): model = VisionTransformerCE(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224_ce(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer(pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224_ce(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer(pretrained=pretrained, **model_kwargs) return model ================================================ FILE: lib/models/artrack_seq/__init__.py ================================================ from .artrack_seq import build_artrack_seq ================================================ FILE: lib/models/artrack_seq/artrack_seq.py ================================================ """ Basic OSTrack model. """ import math import os from typing import List import torch from torch import nn from torch.nn.modules.transformer import _get_clones from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.layers.head_seq import build_pix_head from lib.models.artrack_seq.vit import vit_base_patch16_224, vit_large_patch16_224 from lib.utils.box_ops import box_xyxy_to_cxcywh import time class ARTrackSeq(nn.Module): """ This is the base class for ARTrackSeq """ def __init__(self, transformer, pix_head, hidden_dim): """ Initializes the model. Parameters: transformer: torch module of the transformer architecture. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.backbone = transformer self.pix_head = pix_head self.identity = torch.nn.Parameter(torch.zeros(1, 2, hidden_dim)) self.identity = trunc_normal_(self.identity, std=.02) def forward(self, template: torch.Tensor, search: torch.Tensor, seq_input=None, head_type=None, stage=None, search_feature=None, update=None ): x, aux_dict = self.backbone(z=template, x=search, identity=self.identity) # Forward head feat_last = x if isinstance(x, list): feat_last = x[-1] pos_z = self.backbone.pos_embed_z pos_x = self.backbone.pos_embed_x out = self.forward_head(feat_last, pos_z, pos_x, self.identity, seq_input, stage) out.update(aux_dict) out['backbone_feat'] = x return out def forward_head(self, cat_feature, pos_z, pos_x, identity, seq_input=None, stage=None): """ cat_feature: output embeddings of the backbone, it can be (HW1+HW2, B, C) or (HW2, B, C) """ output_dict = self.pix_head(cat_feature, pos_z, pos_x, identity, seq_input, stage) return output_dict def build_artrack_seq(cfg, training=True): current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root pretrained_path = os.path.join(current_dir, '../../../pretrained_models') if cfg.MODEL.PRETRAIN_FILE and ('ARTrack' not in cfg.MODEL.PRETRAIN_FILE) and training: pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE) else: pretrained = '' if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224': backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224': print("i use vit_large") backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE) hidden_dim = backbone.embed_dim patch_start_index = 1 else: raise NotImplementedError backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index) pix_head = build_pix_head(cfg, hidden_dim) model = ARTrackSeq( backbone, pix_head, hidden_dim, ) load_from = cfg.MODEL.PRETRAIN_PTH checkpoint = torch.load(load_from, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["net"], strict=False) print('Load pretrained model from: ' + load_from) if 'sequence' in cfg.MODEL.PRETRAIN_FILE and training: print("i change myself") checkpoint = torch.load(cfg.MODEL.PRETRAIN_FILE, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["net"], strict=False) print('Load pretrained model from: ' + cfg.MODEL.PRETRAIN_FILE) return model ================================================ FILE: lib/models/artrack_seq/base_backbone.py ================================================ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.vision_transformer import resize_pos_embed from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrack_seq.utils import combine_tokens, recover_tokens class BaseBackbone(nn.Module): def __init__(self): super().__init__() # for original ViT self.pos_embed = None self.img_size = [224, 224] self.patch_size = 16 self.embed_dim = 384 self.cat_mode = 'direct' self.pos_embed_z = None self.pos_embed_x = None self.template_segment_pos_embed = None self.search_segment_pos_embed = None self.return_inter = False self.return_stage = [2, 5, 8, 11] self.add_cls_token = False self.add_sep_seg = False def finetune_track(self, cfg, patch_start_index=1): search_size = to_2tuple(cfg.DATA.SEARCH.SIZE) template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE) new_patch_size = cfg.MODEL.BACKBONE.STRIDE self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE self.return_inter = cfg.MODEL.RETURN_INTER self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG # resize patch embedding if new_patch_size != self.patch_size: print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!') old_patch_embed = {} for name, param in self.patch_embed.named_parameters(): if 'weight' in name: param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size), mode='bicubic', align_corners=False) param = nn.Parameter(param) old_patch_embed[name] = param self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3, embed_dim=self.embed_dim) self.patch_embed.proj.bias = old_patch_embed['proj.bias'] self.patch_embed.proj.weight = old_patch_embed['proj.weight'] # for patch embedding patch_pos_embed = self.pos_embed[:, patch_start_index:, :] patch_pos_embed = patch_pos_embed.transpose(1, 2) B, E, Q = patch_pos_embed.shape P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W) # for search region H, W = search_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2) # for template region H, W = template_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2) self.pos_embed_z = nn.Parameter(template_patch_pos_embed) self.pos_embed_x = nn.Parameter(search_patch_pos_embed) # for cls token (keep it but not used) if self.add_cls_token and patch_start_index > 0: cls_pos_embed = self.pos_embed[:, 0:1, :] self.cls_pos_embed = nn.Parameter(cls_pos_embed) # separate token and segment token if self.add_sep_seg: self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02) self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02) # self.cls_token = None # self.pos_embed = None if self.return_inter: for i_layer in self.fpn_stage: if i_layer != 11: norm_layer = partial(nn.LayerNorm, eps=1e-6) layer = norm_layer(self.embed_dim) layer_name = f'norm{i_layer}' self.add_module(layer_name, layer) def forward_features(self, z, x, identity): B, H, W = x.shape[0], x.shape[2], x.shape[3] x = self.patch_embed(x) z = self.patch_embed(z) s_x = x.shape[1] s_z = z.shape[1] if self.add_cls_token: cls_tokens = self.cls_token.expand(B, -1, -1) cls_tokens = cls_tokens + self.cls_pos_embed z += self.pos_embed_z x += self.pos_embed_x z += identity[:, 0, :].repeat(B, self.pos_embed_z.shape[1], 1) x += identity[:, 1, :].repeat(B, self.pos_embed_x.shape[1], 1) if self.add_sep_seg: x += self.search_segment_pos_embed z += self.template_segment_pos_embed x = combine_tokens(z, x, mode=self.cat_mode) if self.add_cls_token: x = torch.cat([cls_tokens, x], dim=1) x = self.pos_drop(x) for i, blk in enumerate(self.blocks): x = blk(x) lens_z = self.pos_embed_z.shape[1] lens_x = self.pos_embed_x.shape[1] # x = recover_tokens(x, lens_z, lens_x, mode=self.cat_mode) aux_dict = {"attn": None} return self.norm(x), aux_dict def forward(self, z, x, identity, **kwargs): """ Joint feature extraction and relation modeling for the basic ViT backbone. Args: z (torch.Tensor): template feature, [B, C, H_z, W_z] x (torch.Tensor): search region feature, [B, C, H_x, W_x] Returns: x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C] attn : None """ x, aux_dict = self.forward_features(z, x, identity) return x, aux_dict ================================================ FILE: lib/models/artrack_seq/utils.py ================================================ import math import torch import torch.nn.functional as F def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False): # [B, HW, C] len_t = template_tokens.shape[1] len_s = search_tokens.shape[1] if mode == 'direct': merged_feature = torch.cat((template_tokens, search_tokens), dim=1) elif mode == 'template_central': central_pivot = len_s // 2 first_half = search_tokens[:, :central_pivot, :] second_half = search_tokens[:, central_pivot:, :] merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1) elif mode == 'partition': feat_size_s = int(math.sqrt(len_s)) feat_size_t = int(math.sqrt(len_t)) window_size = math.ceil(feat_size_t / 2.) # pad feature maps to multiples of window size B, _, C = template_tokens.shape H = W = feat_size_t template_tokens = template_tokens.view(B, H, W, C) pad_l = pad_b = pad_r = 0 # pad_r = (window_size - W % window_size) % window_size pad_t = (window_size - H % window_size) % window_size template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = template_tokens.shape template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C) template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2) _, Hc, Wc, _ = template_tokens.shape template_tokens = template_tokens.view(B, -1, C) merged_feature = torch.cat([template_tokens, search_tokens], dim=1) # calculate new h and w, which may be useful for SwinT or others merged_h, merged_w = feat_size_s + Hc, feat_size_s if return_res: return merged_feature, merged_h, merged_w else: raise NotImplementedError return merged_feature def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'): if mode == 'direct': recovered_tokens = merged_tokens elif mode == 'template_central': central_pivot = len_search_token // 2 len_remain = len_search_token - central_pivot len_half_and_t = central_pivot + len_template_token first_half = merged_tokens[:, :central_pivot, :] second_half = merged_tokens[:, -len_remain:, :] template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :] recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1) elif mode == 'partition': recovered_tokens = merged_tokens else: raise NotImplementedError return recovered_tokens def window_partition(x, window_size: int): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size: int, H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x ================================================ FILE: lib/models/artrack_seq/vit.py ================================================ """ Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 The official jax code is released and available at https://github.com/google-research/vision_transformer DeiT model defs and weights from https://github.com/facebookresearch/deit, paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2021 Ross Wightman Modified by Botao Ye """ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_ from timm.models.registry import register_model from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrack_seq.base_backbone import BaseBackbone class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, return_attention=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, return_attention=False): if return_attention: feat, attn = self.attn(self.norm1(x), True) x = x + self.drop_path(feat) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn else: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(BaseBackbone): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init=''): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.norm = norm_layer(embed_dim) # # Representation layer # if representation_size and not distilled: # self.num_features = representation_size # self.pre_logits = nn.Sequential(OrderedDict([ # ('fc', nn.Linear(embed_dim, representation_size)), # ('act', nn.Tanh()) # ])) # else: # self.pre_logits = nn.Identity() # # # Classifier head(s) # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # self.head_dist = None # if distilled: # self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.dist_token is not None: trunc_normal_(self.dist_token, std=.02) if mode.startswith('jax'): # leave cls token as zeros to match jax impl named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) else: trunc_normal_(self.cls_token, std=.02) self.apply(_init_vit_weights) def _init_weights(self, m): # this fn left here for compat with downstream users _init_vit_weights(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} def get_classifier(self): if self.dist_token is None: return self.head else: return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.num_tokens == 2: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): """ ViT weight initialization * When called without n, head_bias, jax_impl args it will behave exactly the same as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif name.startswith('pre_logits'): lecun_normal_(module.weight) nn.init.zeros_(module.bias) else: if jax_impl: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) else: trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif jax_impl and isinstance(module, nn.Conv2d): # NOTE conv was left to pytorch default in my original init lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): nn.init.zeros_(module.bias) nn.init.ones_(module.weight) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): # Rescale the grid of position embeddings when loading from state_dict. Adapted from # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if num_tokens: posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] ntok_new -= num_tokens else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = VisionTransformer(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224(pretrained=False, **kwargs): model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model ================================================ FILE: lib/models/artrack_seq/vit_ce.py ================================================ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import to_2tuple from lib.models.layers.patch_embed import PatchEmbed from .utils import combine_tokens, recover_tokens from .vit import VisionTransformer from ..layers.attn_blocks import CEBlock _logger = logging.getLogger(__name__) class VisionTransformerCE(VisionTransformer): """ Vision Transformer with candidate elimination (CE) module A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init='', ce_loc=None, ce_keep_ratio=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ # super().__init__() super().__init__() if isinstance(img_size, tuple): self.img_size = img_size else: self.img_size = to_2tuple(img_size) self.patch_size = patch_size self.in_chans = in_chans self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule blocks = [] ce_index = 0 self.ce_loc = ce_loc for i in range(depth): ce_keep_ratio_i = 1.0 if ce_loc is not None and i in ce_loc: ce_keep_ratio_i = ce_keep_ratio[ce_index] ce_index += 1 blocks.append( CEBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, keep_ratio_search=ce_keep_ratio_i) ) self.blocks = nn.Sequential(*blocks) self.norm = norm_layer(embed_dim) self.init_weights(weight_init) def forward_features(self, z, x, mask_z=None, mask_x=None, ce_template_mask=None, ce_keep_rate=None, return_last_attn=False ): B, H, W = x.shape[0], x.shape[2], x.shape[3] x = self.patch_embed(x) z = self.patch_embed(z) # attention mask handling # B, H, W if mask_z is not None and mask_x is not None: mask_z = F.interpolate(mask_z[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0] mask_z = mask_z.flatten(1).unsqueeze(-1) mask_x = F.interpolate(mask_x[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0] mask_x = mask_x.flatten(1).unsqueeze(-1) mask_x = combine_tokens(mask_z, mask_x, mode=self.cat_mode) mask_x = mask_x.squeeze(-1) if self.add_cls_token: cls_tokens = self.cls_token.expand(B, -1, -1) cls_tokens = cls_tokens + self.cls_pos_embed z += self.pos_embed_z x += self.pos_embed_x if self.add_sep_seg: x += self.search_segment_pos_embed z += self.template_segment_pos_embed x = combine_tokens(z, x, mode=self.cat_mode) if self.add_cls_token: x = torch.cat([cls_tokens, x], dim=1) x = self.pos_drop(x) lens_z = self.pos_embed_z.shape[1] lens_x = self.pos_embed_x.shape[1] global_index_t = torch.linspace(0, lens_z - 1, lens_z).to(x.device) global_index_t = global_index_t.repeat(B, 1) global_index_s = torch.linspace(0, lens_x - 1, lens_x).to(x.device) global_index_s = global_index_s.repeat(B, 1) removed_indexes_s = [] for i, blk in enumerate(self.blocks): x, global_index_t, global_index_s, removed_index_s, attn = \ blk(x, global_index_t, global_index_s, mask_x, ce_template_mask, ce_keep_rate) if self.ce_loc is not None and i in self.ce_loc: removed_indexes_s.append(removed_index_s) x = self.norm(x) lens_x_new = global_index_s.shape[1] lens_z_new = global_index_t.shape[1] z = x[:, :lens_z_new] x = x[:, lens_z_new:] if removed_indexes_s and removed_indexes_s[0] is not None: removed_indexes_cat = torch.cat(removed_indexes_s, dim=1) pruned_lens_x = lens_x - lens_x_new pad_x = torch.zeros([B, pruned_lens_x, x.shape[2]], device=x.device) x = torch.cat([x, pad_x], dim=1) index_all = torch.cat([global_index_s, removed_indexes_cat], dim=1) # recover original token order C = x.shape[-1] # x = x.gather(1, index_all.unsqueeze(-1).expand(B, -1, C).argsort(1)) x = torch.zeros_like(x).scatter_(dim=1, index=index_all.unsqueeze(-1).expand(B, -1, C).to(torch.int64), src=x) x = recover_tokens(x, lens_z_new, lens_x, mode=self.cat_mode) # re-concatenate with the template, which may be further used by other modules x = torch.cat([z, x], dim=1) aux_dict = { "attn": attn, "removed_indexes_s": removed_indexes_s, # used for visualization } return x, aux_dict def forward(self, z, x, ce_template_mask=None, ce_keep_rate=None, tnc_keep_rate=None, return_last_attn=False): x, aux_dict = self.forward_features(z, x, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate,) return x, aux_dict def _create_vision_transformer(pretrained=False, **kwargs): model = VisionTransformerCE(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224_ce(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer(pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224_ce(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer(pretrained=pretrained, **model_kwargs) return model ================================================ FILE: lib/models/artrackv2/__init__.py ================================================ from .artrackv2 import build_artrackv2 ================================================ FILE: lib/models/artrackv2/artrackv2.py ================================================ from copy import deepcopy import math import os from typing import List import torch from torch import nn from torch.nn.modules.transformer import _get_clones from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.artrackv2.vit import vit_base_patch16_224, vit_large_patch16_224 from lib.utils.box_ops import box_xyxy_to_cxcywh class ARTrackV2(nn.Module): def __init__(self, transformer, score_mlp, hidden_dim, ): super().__init__() self.identity = torch.nn.Parameter(torch.zeros(1, 3, hidden_dim)) self.identity = trunc_normal_(self.identity, std=.02) self.backbone = transformer self.score_mlp = score_mlp def forward(self, template: torch.Tensor, search: torch.Tensor, ce_template_mask=None, ce_keep_rate=None, return_last_attn=False, seq_input=None, target_in_search_img=None, gt_bboxes=None, ): template_0 = template[0] template_1 = template[1] out, z_0_feat, z_1_feat, x_feat = self.backbone(z_0=template_0, z_1=template_1, x=search, identity=self.identity, seqs_input=seq_input, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate, return_last_attn=return_last_attn,) score_feat = out["score_feat"] score = self.score_mlp(score_feat) out["score"] = score return out class MlpScoreDecoder(nn.Module): def __init__(self, in_dim, hidden_dim, num_layers, bn=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) out_dim = 1 # score if bn: self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k), nn.ReLU()) if i < num_layers - 1 else nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k)) for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))]) else: self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.ReLU()) if i < num_layers - 1 else nn.Linear(n, k) for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))]) def forward(self, reg_tokens): """ reg tokens shape: (b, 4, embed_dim) """ x = self.layers(reg_tokens) # (b, 4, 1) x = x.mean(dim=1) # (b, 1) return x def build_score_decoder(cfg, hidden_dim): return MlpScoreDecoder( in_dim=hidden_dim, hidden_dim=hidden_dim, num_layers=2, bn=False ) def build_artrackv2(cfg, training=True): current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root pretrained_path = os.path.join(current_dir, '../../../pretrained_models') if cfg.MODEL.PRETRAIN_FILE and ('ARTrack' not in cfg.MODEL.PRETRAIN_FILE) and training: pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE) else: pretrained = '' if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224': backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224': print("i use vit_large") backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION) hidden_dim = backbone.embed_dim patch_start_index = 1 else: raise NotImplementedError backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index) score_decoder = build_score_decoder(cfg, hidden_dim) model = ARTrackV2( backbone, score_decoder, hidden_dim, ) return model ================================================ FILE: lib/models/artrackv2/base_backbone.py ================================================ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.vision_transformer import resize_pos_embed from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrackv2.utils import combine_tokens, recover_tokens def generate_square_subsequent_mask(sz, sx, ss): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ sum = sz + sx + ss mask = (torch.triu(torch.ones(sum, sum)) == 1).transpose(0, 1) mask[:, :] = 0 mask[:int(sz/2), :int(sz/2)] = 1 #template self mask[int(sz/2):sz, int(sz/2):sz] = 1 # dt self mask[int(sz/2):sz, sz:sz+sx] = 1 # dt search mask[int(sz / 2):sz, -1] = 1 # dt search mask[sz:sz+sx, :sz+sx] = 1 # sr dt-t-sr mask[sz+sx:, :] = 1 # co dt-t-sr-co return ~mask class BaseBackbone(nn.Module): def __init__(self): super().__init__() # for original ViT self.pos_embed = None self.img_size = [224, 224] self.patch_size = 16 self.embed_dim = 384 self.cat_mode = 'direct' self.pos_embed_z0 = None self.pos_embed_z1 = None self.pos_embed_x = None self.template_segment_pos_embed = None self.search_segment_pos_embed = None self.return_inter = False self.return_stage = [2, 5, 8, 11] self.add_cls_token = False self.add_sep_seg = False def finetune_track(self, cfg, patch_start_index=1): search_size = to_2tuple(cfg.DATA.SEARCH.SIZE) template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE) new_patch_size = cfg.MODEL.BACKBONE.STRIDE self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE self.return_inter = cfg.MODEL.RETURN_INTER self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG # resize patch embedding if new_patch_size != self.patch_size: print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!') old_patch_embed = {} for name, param in self.patch_embed.named_parameters(): if 'weight' in name: param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size), mode='bicubic', align_corners=False) param = nn.Parameter(param) old_patch_embed[name] = param self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3, embed_dim=self.embed_dim) self.patch_embed.proj.bias = old_patch_embed['proj.bias'] self.patch_embed.proj.weight = old_patch_embed['proj.weight'] # for patch embedding patch_pos_embed = self.pos_embed[:, patch_start_index:, :] patch_pos_embed = patch_pos_embed.transpose(1, 2) B, E, Q = patch_pos_embed.shape P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W) # for search region H, W = search_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2) # for template region H, W = template_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2) self.pos_embed_z0 = nn.Parameter(template_patch_pos_embed) self.pos_embed_z1 = nn.Parameter(template_patch_pos_embed) self.pos_embed_x = nn.Parameter(search_patch_pos_embed) # for cls token (keep it but not used) if self.add_cls_token and patch_start_index > 0: cls_pos_embed = self.pos_embed[:, 0:1, :] self.cls_pos_embed = nn.Parameter(cls_pos_embed) # separate token and segment token if self.add_sep_seg: self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02) self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02) if self.return_inter: for i_layer in self.fpn_stage: if i_layer != 11: norm_layer = partial(nn.LayerNorm, eps=1e-6) layer = norm_layer(self.embed_dim) layer_name = f'norm{i_layer}' self.add_module(layer_name, layer) def forward_features(self, z_0, z_1, x, identity, seqs_input): share_weight = self.word_embeddings.weight.T B, H, W = x.shape[0], x.shape[2], x.shape[3] seqs_input = seqs_input.to(torch.int64).to(x.device) tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) query_embed = self.position_embeddings.weight.unsqueeze(1) query_embed = query_embed.repeat(1, B, 1) tgt = tgt.transpose(0, 1) query_embed = query_embed.transpose(0, 1) x = self.patch_embed(x) z_0 = self.patch_embed(z_0) z_1 = self.patch_embed(z_1) len_x = x.shape[1] len_z = z_0.shape[1] + z_1.shape[1] len_seq = seqs_input.shape[1] mask = generate_square_subsequent_mask(len_z, len_x, len_seq).to(tgt.device) if self.add_cls_token: cls_tokens = self.cls_token.expand(B, -1, -1) cls_tokens = cls_tokens + self.cls_pos_embed z_0 += self.pos_embed_z0 z_1 += self.pos_embed_z1 x += self.pos_embed_x tgt += query_embed z_0 += identity[:, 0, :].repeat(B, self.pos_embed_z0.shape[1], 1) z_1 += identity[:, 1, :].repeat(B, self.pos_embed_z1.shape[1], 1) x += identity[:, 2, :].repeat(B, self.pos_embed_x.shape[1], 1) if self.add_sep_seg: x += self.search_segment_pos_embed z += self.template_segment_pos_embed z = torch.cat((z_0, z_1), dim=1) x = combine_tokens(z, x, mode=self.cat_mode) x = torch.cat((x, tgt), dim=1) if self.add_cls_token: x = torch.cat([cls_tokens, x], dim=1) x = self.pos_drop(x) for i, blk in enumerate(self.blocks): x = blk(x, padding_mask=mask) # for j, blk in enumerate(self.extension): x = blk(x, padding_mask=mask) x_out = self.norm(x[:, -5:-1]) score_feat = x[:, -1] lens_z = self.pos_embed_z0.shape[1] lens_x = self.pos_embed_x.shape[1] z_0_feat = x[:, :lens_z] z_1_feat = x[:, lens_z:lens_z*2] x_feat = x[:, lens_z*2:lens_z*2+lens_x] #x = recover_tokens(x, lens_z, lens_x, mode=self.cat_mode) at = torch.matmul(x_out, share_weight) at = at + self.output_bias at = at[:, -4:] at = at.transpose(0, 1) output = {'feat': at, 'score_feat':score_feat, "state": "train"} return output, z_0_feat, z_1_feat, x_feat def forward_track(self, z_0, z_1, x, identity): share_weight = self.word_embeddings.weight.T out_list = [] x0 = self.bins * self.range y0 = self.bins * self.range + 1 x1 = self.bins * self.range + 2 y1 = self.bins * self.range + 3 score = self.bins * self.range + 5 B, H, W = x.shape[0], x.shape[2], x.shape[3] seq = torch.cat([torch.ones((B, 1)).to(x) * x0, torch.ones((B, 1)).to(x) * y0, torch.ones((B, 1)).to(x) * x1, torch.ones((B, 1)).to(x) * y1, torch.ones((B, 1)).to(x) * score], dim=1) seq_all = torch.cat([seq], dim=1) seqs_input = seq_all.to(torch.int64).to(x.device) output_x_feat = x.clone() tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) x = self.patch_embed(x) z_0 = self.patch_embed(z_0) z_1 = self.patch_embed(z_1) len_x = x.shape[1] len_z = z_0.shape[1] + z_1.shape[1] len_seq = seqs_input.shape[1] z_0 += identity[:, 0, :].repeat(B, self.pos_embed_z0.shape[1], 1) z_1 += identity[:, 1, :].repeat(B, self.pos_embed_z0.shape[1], 1) x += identity[:, 2, :].repeat(B, self.pos_embed_x.shape[1], 1) query_pos_embed = self.position_embeddings.weight.unsqueeze(1) query_pos_embed = query_pos_embed.repeat(1, B, 1) tgt = tgt.transpose(0, 1) query_pos_embed = query_pos_embed.transpose(0, 1) z_0 += self.pos_embed_z0 z_1 += self.pos_embed_z1 x += self.pos_embed_x mask = generate_square_subsequent_mask(len_z, len_x, len_seq).to(tgt.device) tgt += query_pos_embed[:, :tgt.shape[1]] z = torch.cat((z_0, z_1), dim=1) zx = combine_tokens(z, x, mode=self.cat_mode) zxs = torch.cat((zx, tgt), dim=1) zxs = self.pos_drop(zxs) for j, blk in enumerate(self.blocks): zxs = blk(zxs, padding_mask=mask) for j, blk in enumerate(self.extension): zxs = blk(zxs, padding_mask=mask) lens_z_single = self.pos_embed_z0.shape[1] lens_x = self.pos_embed_x.shape[1] z_0_feat = zxs[:, :lens_z_single] z_1_feat = zxs[:, lens_z_single:lens_z_single * 2] x_feat = zxs[:, lens_z_single * 2:lens_z_single * 2 + lens_x] x_out = self.norm(zxs[:, -5:-1]) score_feat = x[:, -1] possibility = torch.matmul(x_out, share_weight) out = possibility + self.output_bias temp = out.transpose(0, 1) out_list.append(out.unsqueeze(0)) out = out.softmax(-1) value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1] for i in range(4): value, extra_seq = out[:, i, :].topk(dim=-1, k=1)[0], out[:, i, :].topk(dim=-1, k=1)[1] if i == 0: seqs_output = extra_seq values = value else: seqs_output = torch.cat([seqs_output, extra_seq], dim=-1) values = torch.cat([values, value], dim=-1) output = {'seqs': seqs_output, 'class': values, 'feat': temp, "state": "val/test", "x_feat": output_x_feat.detach(), "score_feat": score_feat} return output, None, None, None def forward(self, z_0, z_1, x, identity, seqs_input, **kwargs): """ Joint feature extraction and relation modeling for the basic ViT backbone. Args: z (torch.Tensor): template feature, [B, C, H_z, W_z] x (torch.Tensor): search region feature, [B, C, H_x, W_x] Returns: x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C] attn : None """ if seqs_input == None: output = self.forward_track(z_0, z_1, x, identity) else: output = self.forward_features(z_0, z_1, x, identity, seqs_input) return output ================================================ FILE: lib/models/artrackv2/utils.py ================================================ import math import torch import torch.nn.functional as F def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False): # [B, HW, C] len_t = template_tokens.shape[1] len_s = search_tokens.shape[1] if mode == 'direct': merged_feature = torch.cat((template_tokens, search_tokens), dim=1) elif mode == 'template_central': central_pivot = len_s // 2 first_half = search_tokens[:, :central_pivot, :] second_half = search_tokens[:, central_pivot:, :] merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1) elif mode == 'partition': feat_size_s = int(math.sqrt(len_s)) feat_size_t = int(math.sqrt(len_t)) window_size = math.ceil(feat_size_t / 2.) # pad feature maps to multiples of window size B, _, C = template_tokens.shape H = W = feat_size_t template_tokens = template_tokens.view(B, H, W, C) pad_l = pad_b = pad_r = 0 # pad_r = (window_size - W % window_size) % window_size pad_t = (window_size - H % window_size) % window_size template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = template_tokens.shape template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C) template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2) _, Hc, Wc, _ = template_tokens.shape template_tokens = template_tokens.view(B, -1, C) merged_feature = torch.cat([template_tokens, search_tokens], dim=1) # calculate new h and w, which may be useful for SwinT or others merged_h, merged_w = feat_size_s + Hc, feat_size_s if return_res: return merged_feature, merged_h, merged_w else: raise NotImplementedError return merged_feature def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'): if mode == 'direct': recovered_tokens = merged_tokens elif mode == 'template_central': central_pivot = len_search_token // 2 len_remain = len_search_token - central_pivot len_half_and_t = central_pivot + len_template_token first_half = merged_tokens[:, :central_pivot, :] second_half = merged_tokens[:, -len_remain:, :] template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :] recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1) elif mode == 'partition': recovered_tokens = merged_tokens else: raise NotImplementedError return recovered_tokens def window_partition(x, window_size: int): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size: int, H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x ================================================ FILE: lib/models/artrackv2/vit.py ================================================ """ Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 The official jax code is released and available at https://github.com/google-research/vision_transformer DeiT model defs and weights from https://github.com/facebookresearch/deit, paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2021 Ross Wightman Modified by Botao Ye """ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_ from timm.models.registry import register_model from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrackv2.base_backbone import BaseBackbone class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, return_attention=False, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale if padding_mask != None: attn = attn.masked_fill(padding_mask, float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, return_attention=False, padding_mask=None): if return_attention: feat, attn = self.attn(self.norm1(x), True, padding_mask) x = x + self.drop_path(feat) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn else: x = x + self.drop_path(self.attn(self.norm1(x), padding_mask=padding_mask)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(BaseBackbone): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init='', bins=400, range_time=2, extension=3): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.bins = bins in_channel = embed_dim self.range = range_time self.word_embeddings = nn.Embedding(self.bins * self.range + 6, in_channel, padding_idx=self.bins * self.range + 4, max_norm=None, norm_type=2.0) nn.init.kaiming_normal_(self.word_embeddings.weight.data) self.position_embeddings = nn.Embedding( 5, in_channel) self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 6)) self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.extension = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(extension)]) self.norm = norm_layer(embed_dim) self.apply(self.ext_init_weights) self.init_weights(weight_init) def ext_init_weights(self, m): if isinstance(m, nn.Linear): # we use xavier_uniform following official JAX ViT: torch.nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.dist_token is not None: trunc_normal_(self.dist_token, std=.02) if mode.startswith('jax'): # leave cls token as zeros to match jax impl named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) else: trunc_normal_(self.cls_token, std=.02) self.apply(_init_vit_weights) def _init_weights(self, m): # this fn left here for compat with downstream users _init_vit_weights(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} def get_classifier(self): if self.dist_token is None: return self.head else: return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.num_tokens == 2: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): """ ViT weight initialization * When called without n, head_bias, jax_impl args it will behave exactly the same as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif name.startswith('pre_logits'): lecun_normal_(module.weight) nn.init.zeros_(module.bias) else: if jax_impl: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) else: trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif jax_impl and isinstance(module, nn.Conv2d): # NOTE conv was left to pytorch default in my original init lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): nn.init.zeros_(module.bias) nn.init.ones_(module.weight) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): # Rescale the grid of position embeddings when loading from state_dict. Adapted from # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if num_tokens: posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] ntok_new -= num_tokens else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = VisionTransformer(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224(pretrained=False, bins=400, range=2, extension=3, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, bins=bins, range_time=range, extension=extension, **kwargs) model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224(pretrained=False, bins=400, range=2, extension=3, **kwargs): model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, bins=bins, range_time=range, extension=extension, **kwargs) model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model ================================================ FILE: lib/models/artrackv2_seq/__init__.py ================================================ from .artrackv2_seq import build_artrackv2_seq ================================================ FILE: lib/models/artrackv2_seq/artrackv2_seq.py ================================================ """ Basic OSTrack model. """ from copy import deepcopy import math import os from typing import List import torch from torch import nn from torch.nn.modules.transformer import _get_clones from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.artrackv2_seq.vit import vit_base_patch16_224, vit_large_patch16_224 from lib.utils.box_ops import box_xyxy_to_cxcywh from lib.models.layers.mask_decoder import build_maskdecoder from lib.models.layers.head import build_decoder, MLP, DropPathAllocator import time class ARTrackV2Seq(nn.Module): """ This is the base class for OSTrack """ def __init__(self, transformer, cross_2_decoder, score_mlp, hidden_dim, ): """ Initializes the model. Parameters: transformer: torch module of the transformer architecture. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.backbone = transformer self.score_mlp = score_mlp self.identity = torch.nn.Parameter(torch.zeros(1, 3, hidden_dim)) self.identity = trunc_normal_(self.identity, std=.02) self.cross_2_decoder = cross_2_decoder def forward(self, template: torch.Tensor, dz_feat: torch.Tensor, search: torch.Tensor, ce_template_mask=None, ce_keep_rate=None, return_last_attn=False, seq_input=None, head_type=None, stage=None, search_feature=None, target_in_search_img=None, gt_bboxes=None, ): template_0 = template[:, 0] out, z_0_feat, z_1_feat, x_feat, score_feat = self.backbone(z_0=template_0, z_1_feat=dz_feat, x=search, identity=self.identity, seqs_input=seq_input, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate, return_last_attn=return_last_attn,) seq_feat = out['seq_feat'].permute(1, 0 ,2) score = self.score_mlp(score_feat) out['score'] = score loss = torch.tensor(0.0, dtype=torch.float32).to(search.device) if target_in_search_img != None: target_in_search_gt = self.backbone.patch_embed(target_in_search_img) z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5), z_1_feat.shape[2]).permute(0, 3, 1, 2) target_in_search_gt = self.cross_2_decoder.unpatchify(target_in_search_gt) update_img, loss_temp = self.cross_2_decoder(z_1_feat, target_in_search_gt) update_feat = self.cross_2_decoder.patchify(update_img) out['dz_feat'] = update_feat loss += loss_temp out['renew_loss'] = loss else: z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5), z_1_feat.shape[2]).permute(0, 3, 1, 2) update_feat = self.cross_2_decoder(z_1_feat, eval=True) update_feat = self.cross_2_decoder.patchify(update_feat) out['dz_feat'] = update_feat return out class MlpScoreDecoder(nn.Module): def __init__(self, in_dim, hidden_dim, num_layers, bn=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) out_dim = 1 # score if bn: self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k), nn.ReLU()) if i < num_layers - 1 else nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k)) for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))]) else: self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.ReLU()) if i < num_layers - 1 else nn.Linear(n, k) for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))]) def forward(self, reg_tokens): """ reg tokens shape: (b, 4, embed_dim) """ x = self.layers(reg_tokens) # (b, 4, 1) x = x.mean(dim=1) # (b, 1) return x def build_score_decoder(cfg, hidden_dim): return MlpScoreDecoder( in_dim=hidden_dim, hidden_dim=hidden_dim, num_layers=2, bn=False ) def build_artrackv2_seq(cfg, training=True): current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root pretrained_path = os.path.join(current_dir, '../../../pretrained_models') if cfg.MODEL.PRETRAIN_FILE and ('OSTrack' not in cfg.MODEL.PRETRAIN_FILE) and training: pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE) else: pretrained = '' if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224': backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION, prenum=cfg.MODEL.PRENUM) hidden_dim = backbone.embed_dim patch_start_index = 1 elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224': print("i use vit_large") backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION, prenum=cfg.MODEL.PRENUM) hidden_dim = backbone.embed_dim patch_start_index = 1 else: raise NotImplementedError backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index) cross_2_decoder = build_maskdecoder(cfg, hidden_dim) drop_path = cfg.MODEL.DROP_PATH drop_path_allocator = DropPathAllocator(drop_path) num_heads = cfg.MODEL.NUM_HEADS mlp_ratio = cfg.MODEL.MLP_RATIO qkv_bias = cfg.MODEL.QKV_BIAS drop_rate = cfg.MODEL.DROP_RATE attn_drop = cfg.MODEL.ATTN_DROP score_mlp = build_score_decoder(cfg, hidden_dim) model = ARTrackV2Seq( backbone, cross_2_decoder, score_mlp, hidden_dim, ) load_from = cfg.MODEL.PRETRAIN_PTH checkpoint = torch.load(load_from, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["net"], strict=False) print('Load pretrained model from: ' + load_from) if 'sequence' in cfg.MODEL.PRETRAIN_FILE and training: print("i change myself") checkpoint = torch.load(cfg.MODEL.PRETRAIN_FILE, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["net"], strict=False) print('Load pretrained model from: ' + cfg.MODEL.PRETRAIN_FILE) return model ================================================ FILE: lib/models/artrackv2_seq/base_backbone.py ================================================ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.vision_transformer import resize_pos_embed from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrackv2_seq.utils import combine_tokens, recover_tokens import time def generate_square_subsequent_mask(sz, sx, ss): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ # 0 means mask, 1 means visible sum = sz + sx + ss mask = (torch.triu(torch.ones(sum, sum)) == 1).transpose(0, 1) mask[:, :] = 0 mask[:int(sz/2), :int(sz/2)] = 1 #template self mask[int(sz/2):sz, int(sz/2):sz] = 1 # dt self mask[int(sz/2):sz, sz:sz+sx] = 1 # dt search mask[int(sz / 2):sz, -1] = 1 # dt search mask[sz:sz+sx, :sz+sx] = 1 # sr dt-t-sr mask[sz+sx:, :] = 1 # co dt-t-sr-co return ~mask class BaseBackbone(nn.Module): def __init__(self): super().__init__() # for original ViT self.pos_embed = None self.img_size = [224, 224] self.patch_size = 16 self.embed_dim = 384 self.cat_mode = 'direct' self.pos_embed_z = None self.pos_embed_x = None self.template_segment_pos_embed = None self.search_segment_pos_embed = None self.return_inter = False self.return_stage = [2, 5, 8, 11] self.add_cls_token = False self.add_sep_seg = False def finetune_track(self, cfg, patch_start_index=1): search_size = to_2tuple(cfg.DATA.SEARCH.SIZE) template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE) new_patch_size = cfg.MODEL.BACKBONE.STRIDE self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE self.return_inter = cfg.MODEL.RETURN_INTER self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG # resize patch embedding if new_patch_size != self.patch_size: print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!') old_patch_embed = {} for name, param in self.patch_embed.named_parameters(): if 'weight' in name: param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size), mode='bicubic', align_corners=False) param = nn.Parameter(param) old_patch_embed[name] = param self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3, embed_dim=self.embed_dim) self.patch_embed.proj.bias = old_patch_embed['proj.bias'] self.patch_embed.proj.weight = old_patch_embed['proj.weight'] # for patch embedding patch_pos_embed = self.pos_embed[:, patch_start_index:, :] patch_pos_embed = patch_pos_embed.transpose(1, 2) B, E, Q = patch_pos_embed.shape P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W) # for search region H, W = search_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2) # for template region H, W = template_size new_P_H, new_P_W = H // new_patch_size, W // new_patch_size template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic', align_corners=False) template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2) self.pos_embed_z = nn.Parameter(template_patch_pos_embed) self.pos_embed_z0 = nn.Parameter(template_patch_pos_embed) self.pos_embed_z1 = nn.Parameter(template_patch_pos_embed) self.pos_embed_x = nn.Parameter(search_patch_pos_embed) # for cls token (keep it but not used) if self.add_cls_token and patch_start_index > 0: cls_pos_embed = self.pos_embed[:, 0:1, :] self.cls_pos_embed = nn.Parameter(cls_pos_embed) # separate token and segment token if self.add_sep_seg: self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02) self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02) # self.cls_token = None # self.pos_embed = None if self.return_inter: for i_layer in self.fpn_stage: if i_layer != 11: norm_layer = partial(nn.LayerNorm, eps=1e-6) layer = norm_layer(self.embed_dim) layer_name = f'norm{i_layer}' self.add_module(layer_name, layer) def forward_features(self, z_0, z_1_feat, x, identity, seqs_input): share_weight = self.word_embeddings.weight.T out_list = [] x0 = self.bins * self.range y0 = self.bins * self.range + 1 x1 = self.bins * self.range + 2 y1 = self.bins * self.range + 3 score = self.bins * self.range + 5 B, H, W = x.shape[0], x.shape[2], x.shape[3] command = torch.cat([torch.ones((B, 1)).to(x) * x0, torch.ones((B, 1)).to(x) * y0, torch.ones((B, 1)).to(x) * x1, torch.ones((B, 1)).to(x) * y1, torch.ones((B, 1)).to(x) * score], dim=1) trajectory = seqs_input command = command.to(trajectory) seqs_input_ = torch.cat([trajectory, command], dim=1) seqs_input_ = seqs_input_.to(torch.int64).to(x.device) output_x_feat = x.clone() tgt = self.word_embeddings(seqs_input_).permute(1, 0, 2) x = self.patch_embed(x) z_0 = self.patch_embed(z_0) z_1 = z_1_feat len_x = x.shape[1] len_z = z_0.shape[1] + z_1.shape[1] len_seq = seqs_input_.shape[1] z_0 += identity[:, 0, :].repeat(B, self.pos_embed_z.shape[1], 1) z_1 += identity[:, 1, :].repeat(B, self.pos_embed_z.shape[1], 1) x += identity[:, 2, :].repeat(B, self.pos_embed_x.shape[1], 1) query_command_embed_ = self.position_embeddings.weight.unsqueeze(1) prev_embed_ = self.prev_position_embeddings.weight.unsqueeze(1) query_seq_embed = torch.cat([prev_embed_, query_command_embed_], dim=0) query_seq_embed = query_seq_embed.repeat(1, B, 1) tgt = tgt.transpose(0, 1) query_seq_embed = query_seq_embed.transpose(0, 1) z_0 += self.pos_embed_z0 z_1 += self.pos_embed_z1 x += self.pos_embed_x mask = generate_square_subsequent_mask(len_z, len_x, len_seq).to(tgt.device) tgt += query_seq_embed[:, :tgt.shape[1]] z = torch.cat((z_0, z_1), dim=1) zx = combine_tokens(z, x, mode=self.cat_mode) zxs = torch.cat((zx, tgt), dim=1) zxs = self.pos_drop(zxs) for j, blk in enumerate(self.blocks): zxs = blk(zxs, padding_mask=mask) for j, blk in enumerate(self.extension): zxs = blk(zxs, padding_mask=mask) lens_z_single = self.pos_embed_z.shape[1] z_0_feat = zxs[:, :lens_z_single] z_1_feat = zxs[:, lens_z_single:lens_z_single*2] x_feat = zxs[:, lens_z_single*2:lens_z_single*2+len_x] x_out = self.norm(zxs[:, -5:-1]) score_feat = zxs[:, -1] seq_feat = x_out possibility = torch.matmul(x_out, share_weight) out = possibility + self.output_bias temp = out.transpose(0, 1) out_list.append(out.unsqueeze(0)) out = out.softmax(-1) value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1] for i in range(4): value, extra_seq = out[:, i, :].topk(dim=-1, k=1)[0], out[:, i, :].topk(dim=-1, k=1)[1] if i == 0: seqs_output = extra_seq values = value else: seqs_output = torch.cat([seqs_output, extra_seq], dim=-1) values = torch.cat([values, value], dim=-1) output = {'seqs': seqs_output, 'class': values, 'feat': temp, "state": "val/test", "x_feat": output_x_feat.detach(), "seq_feat": seq_feat} return output, z_0_feat, z_1_feat, x_feat, score_feat def forward(self, z_0, z_1_feat, x, identity, seqs_input, **kwargs): """ Joint feature extraction and relation modeling for the basic ViT backbone. Args: z (torch.Tensor): template feature, [B, C, H_z, W_z] x (torch.Tensor): search region feature, [B, C, H_x, W_x] Returns: x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C] attn : None """ output = self.forward_features(z_0, z_1_feat, x, identity, seqs_input) return output ================================================ FILE: lib/models/artrackv2_seq/utils.py ================================================ import math import torch import torch.nn.functional as F def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False): # [B, HW, C] len_t = template_tokens.shape[1] len_s = search_tokens.shape[1] if mode == 'direct': merged_feature = torch.cat((template_tokens, search_tokens), dim=1) elif mode == 'template_central': central_pivot = len_s // 2 first_half = search_tokens[:, :central_pivot, :] second_half = search_tokens[:, central_pivot:, :] merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1) elif mode == 'partition': feat_size_s = int(math.sqrt(len_s)) feat_size_t = int(math.sqrt(len_t)) window_size = math.ceil(feat_size_t / 2.) # pad feature maps to multiples of window size B, _, C = template_tokens.shape H = W = feat_size_t template_tokens = template_tokens.view(B, H, W, C) pad_l = pad_b = pad_r = 0 # pad_r = (window_size - W % window_size) % window_size pad_t = (window_size - H % window_size) % window_size template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = template_tokens.shape template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C) template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2) _, Hc, Wc, _ = template_tokens.shape template_tokens = template_tokens.view(B, -1, C) merged_feature = torch.cat([template_tokens, search_tokens], dim=1) # calculate new h and w, which may be useful for SwinT or others merged_h, merged_w = feat_size_s + Hc, feat_size_s if return_res: return merged_feature, merged_h, merged_w else: raise NotImplementedError return merged_feature def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'): if mode == 'direct': recovered_tokens = merged_tokens elif mode == 'template_central': central_pivot = len_search_token // 2 len_remain = len_search_token - central_pivot len_half_and_t = central_pivot + len_template_token first_half = merged_tokens[:, :central_pivot, :] second_half = merged_tokens[:, -len_remain:, :] template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :] recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1) elif mode == 'partition': recovered_tokens = merged_tokens else: raise NotImplementedError return recovered_tokens def window_partition(x, window_size: int): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size: int, H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x ================================================ FILE: lib/models/artrackv2_seq/vit.py ================================================ """ Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 The official jax code is released and available at https://github.com/google-research/vision_transformer DeiT model defs and weights from https://github.com/facebookresearch/deit, paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2021 Ross Wightman Modified by Botao Ye """ import math import logging from functools import partial from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_ from timm.models.registry import register_model from lib.models.layers.patch_embed import PatchEmbed from lib.models.artrackv2_seq.base_backbone import BaseBackbone class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, return_attention=False, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale if padding_mask != None: attn = attn.masked_fill(padding_mask, float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, return_attention=False, padding_mask=None): if return_attention: feat, attn = self.attn(self.norm1(x), True, padding_mask) x = x + self.drop_path(feat) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn else: x = x + self.drop_path(self.attn(self.norm1(x), padding_mask=padding_mask)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(BaseBackbone): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None, weight_init='', bins=400, range_time=2, prenum=7, extension=3): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_rate (float): dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer weight_init: (str): weight init scheme """ super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.bins = bins in_channel = embed_dim self.range = range_time self.word_embeddings = nn.Embedding(self.bins * self.range + 6, in_channel, padding_idx=self.bins * self.range+4, max_norm=1, norm_type=2.0) self.position_embeddings = nn.Embedding( 5, in_channel) self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 6)) self.prev_position_embeddings = nn.Embedding(prenum * 4, in_channel) self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.extension = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(extension)]) self.norm = norm_layer(embed_dim) self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.dist_token is not None: trunc_normal_(self.dist_token, std=.02) if mode.startswith('jax'): # leave cls token as zeros to match jax impl named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) else: trunc_normal_(self.cls_token, std=.02) self.apply(_init_vit_weights) def _init_weights(self, m): # this fn left here for compat with downstream users _init_vit_weights(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} def get_classifier(self): if self.dist_token is None: return self.head else: return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.num_tokens == 2: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): """ ViT weight initialization * When called without n, head_bias, jax_impl args it will behave exactly the same as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif name.startswith('pre_logits'): lecun_normal_(module.weight) nn.init.zeros_(module.bias) else: if jax_impl: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) else: trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif jax_impl and isinstance(module, nn.Conv2d): # NOTE conv was left to pytorch default in my original init lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): nn.init.zeros_(module.bias) nn.init.ones_(module.weight) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) if not prefix and 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): # Rescale the grid of position embeddings when loading from state_dict. Adapted from # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) ntok_new = posemb_new.shape[1] if num_tokens: posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] ntok_new -= num_tokens else: posemb_tok, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = VisionTransformer(**kwargs) if pretrained: if 'npz' in pretrained: model.load_pretrained(pretrained, prefix='') else: checkpoint = torch.load(pretrained, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False) print('Load pretrained model from: ' + pretrained) return model def vit_base_patch16_224(pretrained=False, bins=400, range=2, extension=3, prenum=7, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, bins=bins, range_time=range, extension=extension, prenum=prenum, **kwargs) model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model def vit_large_patch16_224(pretrained=False, bins=400, range=2, extension=3, prenum=7, **kwargs): model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, bins=bins, range_time=range, extension=extension, prenum=prenum, **kwargs) model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) return model ================================================ FILE: lib/models/layers/__init__.py ================================================ ================================================ FILE: lib/models/layers/attn.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import trunc_normal_ from lib.models.layers.rpe import generate_2d_concatenated_self_attention_relative_positional_encoding_index class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., rpe=False, z_size=7, x_size=14): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.rpe =rpe if self.rpe: relative_position_index = \ generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size], [x_size, x_size]) self.register_buffer("relative_position_index", relative_position_index) # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, relative_position_index.max() + 1))) trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, x, mask=None, return_attention=False): # x: B, N, C # mask: [B, N, ] torch.bool B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale if self.rpe: relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0) attn += relative_position_bias if mask is not None: attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2), float('-inf'),) split_attn = False len_t = 49 if split_attn: attn_t = attn[..., :len_t].softmax(dim=-1) attn_s = attn[..., len_t:].softmax(dim=-1) attn = torch.cat([attn_t, attn_s], dim=-1) else: attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) if return_attention: return x, attn else: return x class Attention_talking_head(nn.Module): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., rpe=True, z_size=7, x_size=14): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_l = nn.Linear(num_heads, num_heads) self.proj_w = nn.Linear(num_heads, num_heads) self.proj_drop = nn.Dropout(proj_drop) self.rpe = rpe if self.rpe: relative_position_index = \ generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size], [x_size, x_size]) self.register_buffer("relative_position_index", relative_position_index) # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, relative_position_index.max() + 1))) trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = (q @ k.transpose(-2, -1)) if self.rpe: relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0) attn += relative_position_bias if mask is not None: attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2), float('-inf'),) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = attn.softmax(dim=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x ================================================ FILE: lib/models/layers/attn_blocks.py ================================================ import math import torch import torch.nn as nn from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_ from lib.models.layers.attn import Attention def candidate_elimination(attn: torch.Tensor, tokens: torch.Tensor, lens_t: int, keep_ratio: float, global_index: torch.Tensor, box_mask_z: torch.Tensor): """ Eliminate potential background candidates for computation reduction and noise cancellation. Args: attn (torch.Tensor): [B, num_heads, L_t + L_s, L_t + L_s], attention weights tokens (torch.Tensor): [B, L_t + L_s, C], template and search region tokens lens_t (int): length of template keep_ratio (float): keep ratio of search region tokens (candidates) global_index (torch.Tensor): global index of search region tokens box_mask_z (torch.Tensor): template mask used to accumulate attention weights Returns: tokens_new (torch.Tensor): tokens after candidate elimination keep_index (torch.Tensor): indices of kept search region tokens removed_index (torch.Tensor): indices of removed search region tokens """ lens_s = attn.shape[-1] - lens_t bs, hn, _, _ = attn.shape lens_keep = math.ceil(keep_ratio * lens_s) if lens_keep == lens_s: return tokens, global_index, None attn_t = attn[:, :, :lens_t, lens_t:] if box_mask_z is not None: box_mask_z = box_mask_z.unsqueeze(1).unsqueeze(-1).expand(-1, attn_t.shape[1], -1, attn_t.shape[-1]) # attn_t = attn_t[:, :, box_mask_z, :] attn_t = attn_t[box_mask_z] attn_t = attn_t.view(bs, hn, -1, lens_s) attn_t = attn_t.mean(dim=2).mean(dim=1) # B, H, L-T, L_s --> B, L_s # attn_t = [attn_t[i, :, box_mask_z[i, :], :] for i in range(attn_t.size(0))] # attn_t = [attn_t[i].mean(dim=1).mean(dim=0) for i in range(len(attn_t))] # attn_t = torch.stack(attn_t, dim=0) else: attn_t = attn_t.mean(dim=2).mean(dim=1) # B, H, L-T, L_s --> B, L_s # use sort instead of topk, due to the speed issue # https://github.com/pytorch/pytorch/issues/22812 sorted_attn, indices = torch.sort(attn_t, dim=1, descending=True) topk_attn, topk_idx = sorted_attn[:, :lens_keep], indices[:, :lens_keep] non_topk_attn, non_topk_idx = sorted_attn[:, lens_keep:], indices[:, lens_keep:] keep_index = global_index.gather(dim=1, index=topk_idx) removed_index = global_index.gather(dim=1, index=non_topk_idx) # separate template and search tokens tokens_t = tokens[:, :lens_t] tokens_s = tokens[:, lens_t:] # obtain the attentive and inattentive tokens B, L, C = tokens_s.shape # topk_idx_ = topk_idx.unsqueeze(-1).expand(B, lens_keep, C) attentive_tokens = tokens_s.gather(dim=1, index=topk_idx.unsqueeze(-1).expand(B, -1, C)) # inattentive_tokens = tokens_s.gather(dim=1, index=non_topk_idx.unsqueeze(-1).expand(B, -1, C)) # compute the weighted combination of inattentive tokens # fused_token = non_topk_attn @ inattentive_tokens # concatenate these tokens # tokens_new = torch.cat([tokens_t, attentive_tokens, fused_token], dim=0) tokens_new = torch.cat([tokens_t, attentive_tokens], dim=1) return tokens_new, keep_index, removed_index class CEBlock(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, keep_ratio_search=1.0,): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.keep_ratio_search = keep_ratio_search def forward(self, x, global_index_template, global_index_search, mask=None, ce_template_mask=None, keep_ratio_search=None): x_attn, attn = self.attn(self.norm1(x), mask, True) x = x + self.drop_path(x_attn) lens_t = global_index_template.shape[1] removed_index_search = None if self.keep_ratio_search < 1 and (keep_ratio_search is None or keep_ratio_search < 1): keep_ratio_search = self.keep_ratio_search if keep_ratio_search is None else keep_ratio_search x, global_index_search, removed_index_search = candidate_elimination(attn, x, lens_t, keep_ratio_search, global_index_search, ce_template_mask) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, global_index_template, global_index_search, removed_index_search, attn class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, mask=None): x = x + self.drop_path(self.attn(self.norm1(x), mask)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x ================================================ FILE: lib/models/layers/frozen_bn.py ================================================ import torch class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() # rsqrt(x): 1/sqrt(x), r: reciprocal bias = b - rm * scale return x * scale + bias ================================================ FILE: lib/models/layers/head.py ================================================ import torch.nn as nn import torch import torch.nn.functional as F from typing import Optional from torch import Tensor from torch.nn import Identity from timm.models.layers import trunc_normal_ from timm.models.layers import DropPath from lib.models.layers.frozen_bn import FrozenBatchNorm2d import copy def top_k_top_p_filtering_batch(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: for i in range(logits.shape[0]): indices_to_remove = logits[i] < torch.topk(logits[i], top_k)[0][..., -1, None] logits[i][indices_to_remove] = filter_value if top_p > 0.0: for i in range(logits.shape[0]): sorted_logits, sorted_indices = torch.sort(logits[i], descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[i][indices_to_remove] = filter_value return logits def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, freeze_bn=False): if freeze_bn: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), FrozenBatchNorm2d(out_planes), nn.ReLU(inplace=True)) else: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) class Corner_Predictor(nn.Module): """ Corner Predictor module""" def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False): super(Corner_Predictor, self).__init__() self.feat_sz = feat_sz self.stride = stride self.img_sz = self.feat_sz * self.stride '''top-left corner''' self.conv1_tl = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_tl = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_tl = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_tl = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_tl = nn.Conv2d(channel // 8, 1, kernel_size=1) '''bottom-right corner''' self.conv1_br = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_br = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_br = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_br = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_br = nn.Conv2d(channel // 8, 1, kernel_size=1) '''about coordinates and indexs''' with torch.no_grad(): self.indice = torch.arange(0, self.feat_sz).view(-1, 1) * self.stride # generate mesh-grid self.coord_x = self.indice.repeat((self.feat_sz, 1)) \ .view((self.feat_sz * self.feat_sz,)).float().cuda() self.coord_y = self.indice.repeat((1, self.feat_sz)) \ .view((self.feat_sz * self.feat_sz,)).float().cuda() def forward(self, x, return_dist=False, softmax=True): """ Forward pass with input x. """ score_map_tl, score_map_br = self.get_score_map(x) if return_dist: coorx_tl, coory_tl, prob_vec_tl = self.soft_argmax(score_map_tl, return_dist=True, softmax=softmax) coorx_br, coory_br, prob_vec_br = self.soft_argmax(score_map_br, return_dist=True, softmax=softmax) return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz, prob_vec_tl, prob_vec_br else: coorx_tl, coory_tl = self.soft_argmax(score_map_tl) coorx_br, coory_br = self.soft_argmax(score_map_br) return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz def get_score_map(self, x): # top-left branch x_tl1 = self.conv1_tl(x) x_tl2 = self.conv2_tl(x_tl1) x_tl3 = self.conv3_tl(x_tl2) x_tl4 = self.conv4_tl(x_tl3) score_map_tl = self.conv5_tl(x_tl4) # bottom-right branch x_br1 = self.conv1_br(x) x_br2 = self.conv2_br(x_br1) x_br3 = self.conv3_br(x_br2) x_br4 = self.conv4_br(x_br3) score_map_br = self.conv5_br(x_br4) return score_map_tl, score_map_br def soft_argmax(self, score_map, return_dist=False, softmax=True): """ get soft-argmax coordinate for a given heatmap """ score_vec = score_map.view((-1, self.feat_sz * self.feat_sz)) # (batch, feat_sz * feat_sz) prob_vec = nn.functional.softmax(score_vec, dim=1) exp_x = torch.sum((self.coord_x * prob_vec), dim=1) exp_y = torch.sum((self.coord_y * prob_vec), dim=1) if return_dist: if softmax: return exp_x, exp_y, prob_vec else: return exp_x, exp_y, score_vec else: return exp_x, exp_y class CenterPredictor(nn.Module, ): def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False): super(CenterPredictor, self).__init__() self.feat_sz = feat_sz self.stride = stride self.img_sz = self.feat_sz * self.stride # corner predict self.conv1_ctr = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_ctr = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_ctr = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_ctr = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_ctr = nn.Conv2d(channel // 8, 1, kernel_size=1) # size regress self.conv1_offset = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_offset = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_offset = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_offset = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_offset = nn.Conv2d(channel // 8, 2, kernel_size=1) # size regress self.conv1_size = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_size = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_size = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_size = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_size = nn.Conv2d(channel // 8, 2, kernel_size=1) for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, x, gt_score_map=None): """ Forward pass with input x. """ score_map_ctr, size_map, offset_map = self.get_score_map(x) # assert gt_score_map is None if gt_score_map is None: bbox = self.cal_bbox(score_map_ctr, size_map, offset_map) else: bbox = self.cal_bbox(gt_score_map.unsqueeze(1), size_map, offset_map) return score_map_ctr, bbox, size_map, offset_map def cal_bbox(self, score_map_ctr, size_map, offset_map, return_score=False): max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True) idx_y = idx // self.feat_sz idx_x = idx % self.feat_sz idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2, # idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz # cx, cy, w, h bbox = torch.cat([(idx_x.to(torch.float) + offset[:, :1]) / self.feat_sz, (idx_y.to(torch.float) + offset[:, 1:]) / self.feat_sz, size.squeeze(-1)], dim=1) if return_score: return bbox, max_score return bbox def get_pred(self, score_map_ctr, size_map, offset_map): max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True) idx_y = idx // self.feat_sz idx_x = idx % self.feat_sz idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2, # idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz return size * self.feat_sz, offset def get_score_map(self, x): def _sigmoid(x): y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4) return y # ctr branch x_ctr1 = self.conv1_ctr(x) x_ctr2 = self.conv2_ctr(x_ctr1) x_ctr3 = self.conv3_ctr(x_ctr2) x_ctr4 = self.conv4_ctr(x_ctr3) score_map_ctr = self.conv5_ctr(x_ctr4) # offset branch x_offset1 = self.conv1_offset(x) x_offset2 = self.conv2_offset(x_offset1) x_offset3 = self.conv3_offset(x_offset2) x_offset4 = self.conv4_offset(x_offset3) score_map_offset = self.conv5_offset(x_offset4) # size branch x_size1 = self.conv1_size(x) x_size2 = self.conv2_size(x_size1) x_size3 = self.conv3_size(x_size2) x_size4 = self.conv4_size(x_size3) score_map_size = self.conv5_size(x_size4) return _sigmoid(score_map_ctr), _sigmoid(score_map_size), score_map_offset class MLP(nn.Module): """ Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) if BN: self.layers = nn.ModuleList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k)) for n, k in zip([input_dim] + h, h + [output_dim])) else: self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class SelfAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_pos_encoding_only=False): super(SelfAttention, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 if attn_pos_encoding_only: self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias) else: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_pos_encoding_only = attn_pos_encoding_only def forward(self, x, q_ape, k_ape, attn_pos): ''' Args: x (torch.Tensor): (B, L, C) q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding Returns: torch.Tensor: (B, L, C) ''' B, N, C = x.shape if self.attn_pos_encoding_only: assert q_ape is None and k_ape is None qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] else: q = x + q_ape if q_ape is not None else x q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = x + k_ape if k_ape is not None else x k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) attn = attn * self.scale if attn_pos is not None: attn = attn + attn_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class CrossAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_pos_encoding_only=False): super(CrossAttention, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 if attn_pos_encoding_only: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias) else: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_pos_encoding_only = attn_pos_encoding_only def forward(self, q, kv, q_ape, k_ape, attn_pos): ''' Args: q (torch.Tensor): (B, L_q, C) kv (torch.Tensor): (B, L_kv, C) q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding Returns: torch.Tensor: (B, L_q, C) ''' B, q_N, C = q.shape kv_N = kv.shape[1] if self.attn_pos_encoding_only: assert q_ape is None and k_ape is None q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv[0], kv[1] else: q = q + q_ape if q_ape is not None else q q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = kv + k_ape if k_ape is not None else kv k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) attn = attn * self.scale if attn_pos is not None: attn = attn + attn_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, q_N, C) x = self.proj(x) x = self.proj_drop(x) return x class Mlp(nn.Module): """ Multilayer perceptron.""" def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): ''' Args: x (torch.Tensor): (B, L, C), input tensor Returns: torch.Tensor: (B, L, C), output tensor ''' x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class FeatureFusion(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=2., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=False): super(FeatureFusion, self).__init__() self.z_norm1 = norm_layer(dim) self.x_norm1 = norm_layer(dim) self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.z_norm2_1 = norm_layer(dim) self.z_norm2_2 = norm_layer(dim) self.x_norm2_1 = norm_layer(dim) self.x_norm2_2 = norm_layer(dim) self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) mlp_hidden_dim = int(dim * mlp_ratio) self.z_norm3 = norm_layer(dim) self.x_norm3 = norm_layer(dim) print(mlp_ratio) self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.drop_path = drop_path def forward(self, z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos): z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None, z_self_attn_pos)) x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None, x_self_attn_pos)) z = z + self.drop_path(self.z_x_cross_attention(self.z_norm2_1(z), self.x_norm2_1(x), None, None, z_x_cross_attn_pos)) x = x + self.drop_path(self.x_z_cross_attention(self.x_norm2_2(x), self.z_norm2_2(z), None, None, x_z_cross_attn_pos)) z = z + self.drop_path(self.z_mlp(self.z_norm3(z))) x = x + self.drop_path(self.x_mlp(self.x_norm3(x))) return z, x class FeatureFusionEncoder(nn.Module): def __init__(self, feature_fusion_layers, z_pos_enc, x_pos_enc, z_rel_pos_index, x_rel_pos_index, z_x_rel_pos_index, x_z_rel_pos_index, z_rel_pos_bias_table, x_rel_pos_bias_table, z_x_rel_pos_bias_table, x_z_rel_pos_bias_table): super(FeatureFusionEncoder, self).__init__() self.layers = nn.ModuleList(feature_fusion_layers) self.z_pos_enc = z_pos_enc self.x_pos_enc = x_pos_enc self.register_buffer('z_rel_pos_index', z_rel_pos_index, False) self.register_buffer('x_rel_pos_index', x_rel_pos_index, False) self.register_buffer('z_x_rel_pos_index', z_x_rel_pos_index, False) self.register_buffer('x_z_rel_pos_index', x_z_rel_pos_index, False) self.z_rel_pos_bias_table = z_rel_pos_bias_table self.x_rel_pos_bias_table = x_rel_pos_bias_table self.z_x_rel_pos_bias_table = z_x_rel_pos_bias_table self.x_z_rel_pos_bias_table = x_z_rel_pos_bias_table def forward(self, z, x, z_pos, x_pos): ''' Args: z (torch.Tensor): (B, L_z, C), template image feature tokens x (torch.Tensor): (B, L_x, C), search image feature tokens z_pos (torch.Tensor | None): (1 or B, L_z, C), optional positional encoding for z x_pos (torch.Tensor | None): (1 or B, L_x, C), optional positional encoding for x Returns: Tuple[torch.Tensor, torch.Tensor]: (B, L_z, C): template image feature tokens (B, L_x, C): search image feature tokens ''' # Support untied positional encoding only for simplicity assert z_pos is None and x_pos is None # untied positional encoding z_q_pos, z_k_pos = self.z_pos_enc() x_q_pos, x_k_pos = self.x_pos_enc() z_self_attn_pos = (z_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0) x_self_attn_pos = (x_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0) z_x_cross_attn_pos = (z_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0) x_z_cross_attn_pos = (x_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0) # relative positional encoding z_self_attn_pos = z_self_attn_pos + self.z_rel_pos_bias_table(self.z_rel_pos_index) x_self_attn_pos = x_self_attn_pos + self.x_rel_pos_bias_table(self.x_rel_pos_index) z_x_cross_attn_pos = z_x_cross_attn_pos + self.z_x_rel_pos_bias_table(self.z_x_rel_pos_index) x_z_cross_attn_pos = x_z_cross_attn_pos + self.x_z_rel_pos_bias_table(self.x_z_rel_pos_index) for layer in self.layers: z, x = layer(z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos) return z, x class Learned2DPositionalEncoder(nn.Module): def __init__(self, dim, w, h): super(Learned2DPositionalEncoder, self).__init__() self.w_pos = nn.Parameter(torch.empty(w, dim)) self.h_pos = nn.Parameter(torch.empty(h, dim)) trunc_normal_(self.w_pos, std=0.02) trunc_normal_(self.h_pos, std=0.02) def forward(self): w = self.w_pos.shape[0] h = self.h_pos.shape[0] return (self.w_pos[None, :, :] + self.h_pos[:, None, :]).view(h * w, -1) class Untied2DPositionalEncoder(nn.Module): def __init__(self, dim, num_heads, w, h, scale=None, with_q=True, with_k=True): super(Untied2DPositionalEncoder, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.pos = Learned2DPositionalEncoder(dim, w, h) self.norm = nn.LayerNorm(dim) self.pos_q_linear = None self.pos_k_linear = None if with_q: self.pos_q_linear = nn.Linear(dim, dim) if with_k: self.pos_k_linear = nn.Linear(dim, dim) self.num_heads = num_heads head_dim = dim // num_heads self.scale = scale or head_dim ** -0.5 def forward(self): pos = self.norm(self.pos()) seq_len = pos.shape[0] if self.pos_q_linear is not None and self.pos_k_linear is not None: pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) return pos_q, pos_k elif self.pos_q_linear is not None: pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale return pos_q elif self.pos_k_linear is not None: pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) return pos_k else: raise RuntimeError def generate_2d_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :] diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :] diff = torch.stack((diff_h, diff_w), dim=-1) _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0) return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1]) class RelativePosition2DEncoder(nn.Module): def __init__(self, num_heads, embed_size): super(RelativePosition2DEncoder, self).__init__() self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size))) trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, attn_rpe_index): ''' Args: attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size Returns: torch.Tensor: (1, num_heads, *) ''' return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0) class DropPathAllocator: def __init__(self, max_drop_path_rate, stochastic_depth_decay = True): self.max_drop_path_rate = max_drop_path_rate self.stochastic_depth_decay = stochastic_depth_decay self.allocated = [] self.allocating = [] def __enter__(self): self.allocating = [] def __exit__(self, exc_type, exc_val, exc_tb): if len(self.allocating) != 0: self.allocated.append(self.allocating) self.allocating = None if not self.stochastic_depth_decay: for depth_module in self.allocated: for module in depth_module: if isinstance(module, DropPath): module.drop_prob = self.max_drop_path_rate else: depth = self.get_depth() dpr = [x.item() for x in torch.linspace(0, self.max_drop_path_rate, depth)] assert len(dpr) == len(self.allocated) for drop_path_rate, depth_modules in zip(dpr, self.allocated): for module in depth_modules: if isinstance(module, DropPath): module.drop_prob = drop_path_rate def __len__(self): length = 0 for depth_modules in self.allocated: length += len(depth_modules) return length def increase_depth(self): self.allocated.append(self.allocating) self.allocating = [] def get_depth(self): return len(self.allocated) def allocate(self): if self.max_drop_path_rate == 0 or (self.stochastic_depth_decay and self.get_depth() == 0): drop_path_module = Identity() else: drop_path_module = DropPath() self.allocating.append(drop_path_module) return drop_path_module def get_all_allocated(self): allocated = [] for depth_module in self.allocated: for module in depth_module: allocated.append(module) return allocated def build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, dim, z_size, x_size, drop_path): z_shape = [z_size, z_size] x_shape = [x_size, x_size] encoder_layers = [] for i in range(encoder_layer): encoder_layers.append( FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop, drop_path=drop_path.allocate(), attn_pos_encoding_only=True) ) z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1]) x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1]) z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape) x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape) z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape) x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape) z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1) x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1) z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1) x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1) return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index, x_self_attn_rel_pos_index, z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index, z_self_attn_rel_pos_bias_table, x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table, x_z_cross_attn_rel_pos_bias_table) class TargetQueryDecoderLayer(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm): super(TargetQueryDecoderLayer, self).__init__() self.norm_1 = norm_layer(dim) self.self_attn1 = nn.MultiheadAttention(dim, num_heads, dropout=drop) self.norm_2_query = norm_layer(dim) self.norm_2_memory = norm_layer(dim) self.multihead_attn = nn.MultiheadAttention(dim, num_heads, dropout=drop) self.norm_3 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlpz = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.drop_path = drop_path def forward(self, query, memoryz, memoryx, query_pos, pos_z, pos_x, identity, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, ): ''' Args: query (torch.Tensor): (B, num_queries, C) memory (torch.Tensor): (B, L, C) query_pos (torch.Tensor): (1 or B, num_queries, C) memory_pos (torch.Tensor): (1 or B, L, C) Returns: torch.Tensor: (B, num_queries, C) ''' tgt = query q = k = self.norm_1(query) + query_pos query = query + self.drop_path(self.self_attn1(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]) q2 = self.norm_2_query(query) + query_pos memory = torch.cat((memoryz,memoryx),dim=1) pos = torch.cat((pos_z, pos_x), dim=1) ide = torch.cat((identity[: ,0, :].repeat(1, pos_z.shape[1], 1), identity[:, 1, :].repeat(1, pos_x.shape[1], 1)), dim=1) k2 = (self.norm_2_memory(memory) + pos + ide).permute(1, 0, 2) memory_in = memory.permute(1, 0, 2) query = query + self.drop_path( self.multihead_attn(query=q2, key=k2, value=memory_in, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]) query = query + self.drop_path(self.mlpz(self.norm_3(query))) return query def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) class TargetQueryDecoderBlock(nn.Module): def __init__(self, dim, decoder_layers, num_layer): super(TargetQueryDecoderBlock, self).__init__() self.layers = nn.ModuleList(decoder_layers) self.num_layers = num_layer self.norm = nn.LayerNorm(dim) def forward(self, tgt, z, x, pos_z, pos_x, identity, query_pos: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None): ''' Args: z (torch.Tensor): (B, L_z, C) x (torch.Tensor): (B, L_x, C) Returns: torch.Tensor: (B, num_queries, C) ''' output = tgt for layer in self.layers: output = layer(output, z, x, query_pos, pos_z, pos_x, identity, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask) output = self.norm(output) return output def build_decoder(decoder_layer, drop_path, dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, z_size, x_size): z_shape = [z_size, z_size] x_shape = [x_size, x_size] num_layers = decoder_layer decoder_layers = [] for _ in range(num_layers): decoder_layers.append( TargetQueryDecoderLayer(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path.allocate())) drop_path.increase_depth() decoder = TargetQueryDecoderBlock(dim, decoder_layers, num_layers) return decoder def generate_square_subsequent_mask(sz): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float( '-inf')).masked_fill(mask == 1, float(0.0)) return mask class Pix2Track(nn.Module): def __init__(self, in_channel=64, feat_sz=20, feat_tz=10, range=2, stride=16, encoder_layer=3, decoder_layer=3, bins=400,num_heads=12, mlp_ratio=2, qkv_bias=True, drop_rate=0.0,attn_drop=0.0, drop_path=nn.Identity): super(Pix2Track, self).__init__() self.bins = bins self.range = range self.word_embeddings = nn.Embedding(self.bins * self.range + 2, in_channel, padding_idx=self.bins * self.range, max_norm=1, norm_type=2.0) print(self.bins) self.position_embeddings = nn.Embedding( 5, in_channel) self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 2)) self.encoder_layer = encoder_layer self.drop_path = drop_path self.tz = feat_tz * feat_tz self.sz = feat_sz * feat_sz trunc_normal_(self.word_embeddings.weight, std=.02) if self.encoder_layer > 0 : self.encoder = build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, in_channel, feat_tz, feat_sz, self.drop_path) else: self.encoder = None self.decoder = build_decoder(decoder_layer, self.drop_path, in_channel, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, feat_tz, feat_sz) def forward(self, zx_feat, pos_z, pos_x, identity, seqs_input=None): share_weight = self.word_embeddings.weight.T z_feat = zx_feat[:, :self.tz] x_feat = zx_feat[:, self.tz:] bs = zx_feat.shape[0] if self.encoder != None: z_feat, x_feat = self.encoder(z_feat, x_feat, None, None) if seqs_input != None: seqs_input = seqs_input.to(torch.int64).to(zx_feat.device) tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) query_embed = self.position_embeddings.weight.unsqueeze(1) query_embed = query_embed.repeat(1, bs, 1) decoder_feat = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, query_embed, tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device)) at = torch.matmul(decoder_feat, share_weight) at = at + self.output_bias output = {'feat': at, "state": "train"} else: origin_seq = torch.ones(bs, 1) * self.bins * self.range seqs_input = origin_seq.to(zx_feat.device).to(torch.int64) for i in range(4): tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) query_embed = self.position_embeddings.weight.unsqueeze(1) query_embed = query_embed.repeat(1, bs, 1) decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, query_embed[:len(tgt)], tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device)) out = torch.matmul(decoder_feat_cls.transpose(0, 1)[:, -1, :], share_weight) + self.output_bias out = out.softmax(-1) value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1] seqs_input = torch.cat([seqs_input, extra_seq], dim=-1) if i == 0: seqs_output = extra_seq values = value else: seqs_output = torch.cat([seqs_output, extra_seq], dim=-1) values = torch.cat([values, value], dim=-1) output = {'seqs': seqs_output, 'class': values, "state": "val/test"} return output def build_pix_head(cfg, hidden_dim): stride = cfg.MODEL.BACKBONE.STRIDE if cfg.MODEL.HEAD.TYPE == "MLP": mlp_head = MLP(hidden_dim, hidden_dim, 4, 3) # dim_in, dim_hidden, dim_out, 3 layers return mlp_head elif "CORNER" in cfg.MODEL.HEAD.TYPE: feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) channel = getattr(cfg.MODEL, "NUM_CHANNELS", 256) print("head channel: %d" % channel) if cfg.MODEL.HEAD.TYPE == "CORNER": corner_head = Corner_Predictor(inplanes=cfg.MODEL.HIDDEN_DIM, channel=channel, feat_sz=feat_sz, stride=stride) else: raise ValueError() return corner_head elif cfg.MODEL.HEAD.TYPE == "CENTER": in_channel = hidden_dim out_channel = cfg.MODEL.HEAD.NUM_CHANNELS feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) center_head = CenterPredictor(inplanes=in_channel, channel=out_channel, feat_sz=feat_sz, stride=stride) return center_head elif cfg.MODEL.HEAD.TYPE == "PIX": in_channel = hidden_dim feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) feat_tz = int(cfg.DATA.TEMPLATE.SIZE / stride) decoder_layer = cfg.MODEL.DECODER_LAYER encoder_layer = cfg.MODEL.ENCODER_LAYER bins = cfg.MODEL.BINS num_heads = cfg.MODEL.NUM_HEADS mlp_ratio = cfg.MODEL.MLP_RATIO qkv_bias = cfg.MODEL.QKV_BIAS drop_rate = cfg.MODEL.DROP_RATE attn_drop = cfg.MODEL.ATTN_DROP drop_path = cfg.MODEL.DROP_PATH drop_path_allocator = DropPathAllocator(drop_path) range = cfg.MODEL.RANGE pix_head = Pix2Track(in_channel=in_channel, feat_sz=feat_sz, feat_tz=feat_tz, range=range, stride=stride, encoder_layer=encoder_layer, decoder_layer=decoder_layer, bins=bins, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate, attn_drop=attn_drop, drop_path=drop_path_allocator) return pix_head else: raise ValueError("HEAD TYPE %s is not supported." % cfg.MODEL.HEAD_TYPE) ================================================ FILE: lib/models/layers/head_seq.py ================================================ import torch.nn as nn import torch import torch.nn.functional as F from typing import Optional from torch import Tensor from torch.nn import Identity from timm.models.layers import trunc_normal_ from timm.models.layers import DropPath from lib.models.layers.frozen_bn import FrozenBatchNorm2d import copy def top_k_top_p_filtering_batch(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k # torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices) # ...表示其他维度由计算机自行推断 for i in range(logits.shape[0]): indices_to_remove = logits[i] < torch.topk(logits[i], top_k)[0][..., -1, None] logits[i][indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷 if top_p > 0.0: for i in range(logits.shape[0]): sorted_logits, sorted_indices = torch.sort(logits[i], descending=True) # 对logits进行递减排序 cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[i][indices_to_remove] = filter_value return logits def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, freeze_bn=False): if freeze_bn: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), FrozenBatchNorm2d(out_planes), nn.ReLU(inplace=True)) else: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True)) class Corner_Predictor(nn.Module): """ Corner Predictor module""" def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False): super(Corner_Predictor, self).__init__() self.feat_sz = feat_sz self.stride = stride self.img_sz = self.feat_sz * self.stride '''top-left corner''' self.conv1_tl = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_tl = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_tl = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_tl = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_tl = nn.Conv2d(channel // 8, 1, kernel_size=1) '''bottom-right corner''' self.conv1_br = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_br = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_br = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_br = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_br = nn.Conv2d(channel // 8, 1, kernel_size=1) '''about coordinates and indexs''' with torch.no_grad(): self.indice = torch.arange(0, self.feat_sz).view(-1, 1) * self.stride # generate mesh-grid self.coord_x = self.indice.repeat((self.feat_sz, 1)) \ .view((self.feat_sz * self.feat_sz,)).float().cuda() self.coord_y = self.indice.repeat((1, self.feat_sz)) \ .view((self.feat_sz * self.feat_sz,)).float().cuda() def forward(self, x, return_dist=False, softmax=True): """ Forward pass with input x. """ score_map_tl, score_map_br = self.get_score_map(x) if return_dist: coorx_tl, coory_tl, prob_vec_tl = self.soft_argmax(score_map_tl, return_dist=True, softmax=softmax) coorx_br, coory_br, prob_vec_br = self.soft_argmax(score_map_br, return_dist=True, softmax=softmax) return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz, prob_vec_tl, prob_vec_br else: coorx_tl, coory_tl = self.soft_argmax(score_map_tl) coorx_br, coory_br = self.soft_argmax(score_map_br) return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz def get_score_map(self, x): # top-left branch x_tl1 = self.conv1_tl(x) x_tl2 = self.conv2_tl(x_tl1) x_tl3 = self.conv3_tl(x_tl2) x_tl4 = self.conv4_tl(x_tl3) score_map_tl = self.conv5_tl(x_tl4) # bottom-right branch x_br1 = self.conv1_br(x) x_br2 = self.conv2_br(x_br1) x_br3 = self.conv3_br(x_br2) x_br4 = self.conv4_br(x_br3) score_map_br = self.conv5_br(x_br4) return score_map_tl, score_map_br def soft_argmax(self, score_map, return_dist=False, softmax=True): """ get soft-argmax coordinate for a given heatmap """ score_vec = score_map.view((-1, self.feat_sz * self.feat_sz)) # (batch, feat_sz * feat_sz) prob_vec = nn.functional.softmax(score_vec, dim=1) exp_x = torch.sum((self.coord_x * prob_vec), dim=1) exp_y = torch.sum((self.coord_y * prob_vec), dim=1) if return_dist: if softmax: return exp_x, exp_y, prob_vec else: return exp_x, exp_y, score_vec else: return exp_x, exp_y class CenterPredictor(nn.Module, ): def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False): super(CenterPredictor, self).__init__() self.feat_sz = feat_sz self.stride = stride self.img_sz = self.feat_sz * self.stride # corner predict self.conv1_ctr = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_ctr = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_ctr = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_ctr = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_ctr = nn.Conv2d(channel // 8, 1, kernel_size=1) # size regress self.conv1_offset = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_offset = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_offset = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_offset = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_offset = nn.Conv2d(channel // 8, 2, kernel_size=1) # size regress self.conv1_size = conv(inplanes, channel, freeze_bn=freeze_bn) self.conv2_size = conv(channel, channel // 2, freeze_bn=freeze_bn) self.conv3_size = conv(channel // 2, channel // 4, freeze_bn=freeze_bn) self.conv4_size = conv(channel // 4, channel // 8, freeze_bn=freeze_bn) self.conv5_size = nn.Conv2d(channel // 8, 2, kernel_size=1) for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, x, gt_score_map=None): """ Forward pass with input x. """ score_map_ctr, size_map, offset_map = self.get_score_map(x) # assert gt_score_map is None if gt_score_map is None: bbox = self.cal_bbox(score_map_ctr, size_map, offset_map) else: bbox = self.cal_bbox(gt_score_map.unsqueeze(1), size_map, offset_map) return score_map_ctr, bbox, size_map, offset_map def cal_bbox(self, score_map_ctr, size_map, offset_map, return_score=False): max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True) idx_y = idx // self.feat_sz idx_x = idx % self.feat_sz idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2, # idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz # cx, cy, w, h bbox = torch.cat([(idx_x.to(torch.float) + offset[:, :1]) / self.feat_sz, (idx_y.to(torch.float) + offset[:, 1:]) / self.feat_sz, size.squeeze(-1)], dim=1) if return_score: return bbox, max_score return bbox def get_pred(self, score_map_ctr, size_map, offset_map): max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True) idx_y = idx // self.feat_sz idx_x = idx % self.feat_sz idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2, # idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz return size * self.feat_sz, offset def get_score_map(self, x): def _sigmoid(x): y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4) return y # ctr branch x_ctr1 = self.conv1_ctr(x) x_ctr2 = self.conv2_ctr(x_ctr1) x_ctr3 = self.conv3_ctr(x_ctr2) x_ctr4 = self.conv4_ctr(x_ctr3) score_map_ctr = self.conv5_ctr(x_ctr4) # offset branch x_offset1 = self.conv1_offset(x) x_offset2 = self.conv2_offset(x_offset1) x_offset3 = self.conv3_offset(x_offset2) x_offset4 = self.conv4_offset(x_offset3) score_map_offset = self.conv5_offset(x_offset4) # size branch x_size1 = self.conv1_size(x) x_size2 = self.conv2_size(x_size1) x_size3 = self.conv3_size(x_size2) x_size4 = self.conv4_size(x_size3) score_map_size = self.conv5_size(x_size4) return _sigmoid(score_map_ctr), _sigmoid(score_map_size), score_map_offset class MLP(nn.Module): """ Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) if BN: self.layers = nn.ModuleList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k)) for n, k in zip([input_dim] + h, h + [output_dim])) else: self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class SelfAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_pos_encoding_only=False): super(SelfAttention, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 if attn_pos_encoding_only: self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias) else: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_pos_encoding_only = attn_pos_encoding_only def forward(self, x, q_ape, k_ape, attn_pos): ''' Args: x (torch.Tensor): (B, L, C) q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding Returns: torch.Tensor: (B, L, C) ''' B, N, C = x.shape if self.attn_pos_encoding_only: assert q_ape is None and k_ape is None qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] else: q = x + q_ape if q_ape is not None else x q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = x + k_ape if k_ape is not None else x k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) attn = attn * self.scale if attn_pos is not None: attn = attn + attn_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class CrossAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_pos_encoding_only=False): super(CrossAttention, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 if attn_pos_encoding_only: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias) else: self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.attn_pos_encoding_only = attn_pos_encoding_only def forward(self, q, kv, q_ape, k_ape, attn_pos): ''' Args: q (torch.Tensor): (B, L_q, C) kv (torch.Tensor): (B, L_kv, C) q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding Returns: torch.Tensor: (B, L_q, C) ''' B, q_N, C = q.shape kv_N = kv.shape[1] if self.attn_pos_encoding_only: assert q_ape is None and k_ape is None q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv[0], kv[1] else: q = q + q_ape if q_ape is not None else q q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = kv + k_ape if k_ape is not None else kv k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) attn = attn * self.scale if attn_pos is not None: attn = attn + attn_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, q_N, C) x = self.proj(x) x = self.proj_drop(x) return x class Mlp(nn.Module): """ Multilayer perceptron.""" def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): ''' Args: x (torch.Tensor): (B, L, C), input tensor Returns: torch.Tensor: (B, L, C), output tensor ''' x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class FeatureFusion(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=2., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=False): super(FeatureFusion, self).__init__() self.z_norm1 = norm_layer(dim) self.x_norm1 = norm_layer(dim) self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.z_norm2_1 = norm_layer(dim) self.z_norm2_2 = norm_layer(dim) self.x_norm2_1 = norm_layer(dim) self.x_norm2_2 = norm_layer(dim) self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only) mlp_hidden_dim = int(dim * mlp_ratio) self.z_norm3 = norm_layer(dim) self.x_norm3 = norm_layer(dim) print(mlp_ratio) self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.drop_path = drop_path def forward(self, z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos): z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None, z_self_attn_pos)) x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None, x_self_attn_pos)) z = z + self.drop_path( self.z_x_cross_attention(self.z_norm2_1(z), self.x_norm2_1(x), None, None, z_x_cross_attn_pos)) x = x + self.drop_path( self.x_z_cross_attention(self.x_norm2_2(x), self.z_norm2_2(z), None, None, x_z_cross_attn_pos)) z = z + self.drop_path(self.z_mlp(self.z_norm3(z))) x = x + self.drop_path(self.x_mlp(self.x_norm3(x))) return z, x class FeatureFusionEncoder(nn.Module): def __init__(self, feature_fusion_layers, z_pos_enc, x_pos_enc, z_rel_pos_index, x_rel_pos_index, z_x_rel_pos_index, x_z_rel_pos_index, z_rel_pos_bias_table, x_rel_pos_bias_table, z_x_rel_pos_bias_table, x_z_rel_pos_bias_table): super(FeatureFusionEncoder, self).__init__() self.layers = nn.ModuleList(feature_fusion_layers) self.z_pos_enc = z_pos_enc self.x_pos_enc = x_pos_enc self.register_buffer('z_rel_pos_index', z_rel_pos_index, False) self.register_buffer('x_rel_pos_index', x_rel_pos_index, False) self.register_buffer('z_x_rel_pos_index', z_x_rel_pos_index, False) self.register_buffer('x_z_rel_pos_index', x_z_rel_pos_index, False) self.z_rel_pos_bias_table = z_rel_pos_bias_table self.x_rel_pos_bias_table = x_rel_pos_bias_table self.z_x_rel_pos_bias_table = z_x_rel_pos_bias_table self.x_z_rel_pos_bias_table = x_z_rel_pos_bias_table def forward(self, z, x, z_pos, x_pos): ''' Args: z (torch.Tensor): (B, L_z, C), template image feature tokens x (torch.Tensor): (B, L_x, C), search image feature tokens z_pos (torch.Tensor | None): (1 or B, L_z, C), optional positional encoding for z x_pos (torch.Tensor | None): (1 or B, L_x, C), optional positional encoding for x Returns: Tuple[torch.Tensor, torch.Tensor]: (B, L_z, C): template image feature tokens (B, L_x, C): search image feature tokens ''' # Support untied positional encoding only for simplicity assert z_pos is None and x_pos is None # untied positional encoding z_q_pos, z_k_pos = self.z_pos_enc() x_q_pos, x_k_pos = self.x_pos_enc() z_self_attn_pos = (z_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0) x_self_attn_pos = (x_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0) z_x_cross_attn_pos = (z_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0) x_z_cross_attn_pos = (x_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0) # relative positional encoding z_self_attn_pos = z_self_attn_pos + self.z_rel_pos_bias_table(self.z_rel_pos_index) x_self_attn_pos = x_self_attn_pos + self.x_rel_pos_bias_table(self.x_rel_pos_index) z_x_cross_attn_pos = z_x_cross_attn_pos + self.z_x_rel_pos_bias_table(self.z_x_rel_pos_index) x_z_cross_attn_pos = x_z_cross_attn_pos + self.x_z_rel_pos_bias_table(self.x_z_rel_pos_index) for layer in self.layers: z, x = layer(z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos) return z, x class Learned2DPositionalEncoder(nn.Module): def __init__(self, dim, w, h): super(Learned2DPositionalEncoder, self).__init__() self.w_pos = nn.Parameter(torch.empty(w, dim)) self.h_pos = nn.Parameter(torch.empty(h, dim)) trunc_normal_(self.w_pos, std=0.02) trunc_normal_(self.h_pos, std=0.02) def forward(self): w = self.w_pos.shape[0] h = self.h_pos.shape[0] return (self.w_pos[None, :, :] + self.h_pos[:, None, :]).view(h * w, -1) class Untied2DPositionalEncoder(nn.Module): def __init__(self, dim, num_heads, w, h, scale=None, with_q=True, with_k=True): super(Untied2DPositionalEncoder, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.pos = Learned2DPositionalEncoder(dim, w, h) self.norm = nn.LayerNorm(dim) self.pos_q_linear = None self.pos_k_linear = None if with_q: self.pos_q_linear = nn.Linear(dim, dim) if with_k: self.pos_k_linear = nn.Linear(dim, dim) self.num_heads = num_heads head_dim = dim // num_heads self.scale = scale or head_dim ** -0.5 def forward(self): pos = self.norm(self.pos()) seq_len = pos.shape[0] if self.pos_q_linear is not None and self.pos_k_linear is not None: pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) return pos_q, pos_k elif self.pos_q_linear is not None: pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale return pos_q elif self.pos_k_linear is not None: pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) return pos_k else: raise RuntimeError def generate_2d_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :] diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :] diff = torch.stack((diff_h, diff_w), dim=-1) _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0) return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1]) class RelativePosition2DEncoder(nn.Module): def __init__(self, num_heads, embed_size): super(RelativePosition2DEncoder, self).__init__() self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size))) trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, attn_rpe_index): ''' Args: attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size Returns: torch.Tensor: (1, num_heads, *) ''' return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0) class DropPathAllocator: def __init__(self, max_drop_path_rate, stochastic_depth_decay=True): self.max_drop_path_rate = max_drop_path_rate self.stochastic_depth_decay = stochastic_depth_decay self.allocated = [] self.allocating = [] def __enter__(self): self.allocating = [] def __exit__(self, exc_type, exc_val, exc_tb): if len(self.allocating) != 0: self.allocated.append(self.allocating) self.allocating = None if not self.stochastic_depth_decay: for depth_module in self.allocated: for module in depth_module: if isinstance(module, DropPath): module.drop_prob = self.max_drop_path_rate else: depth = self.get_depth() dpr = [x.item() for x in torch.linspace(0, self.max_drop_path_rate, depth)] assert len(dpr) == len(self.allocated) for drop_path_rate, depth_modules in zip(dpr, self.allocated): for module in depth_modules: if isinstance(module, DropPath): module.drop_prob = drop_path_rate def __len__(self): length = 0 for depth_modules in self.allocated: length += len(depth_modules) return length def increase_depth(self): self.allocated.append(self.allocating) self.allocating = [] def get_depth(self): return len(self.allocated) def allocate(self): if self.max_drop_path_rate == 0 or (self.stochastic_depth_decay and self.get_depth() == 0): drop_path_module = Identity() else: drop_path_module = DropPath() self.allocating.append(drop_path_module) return drop_path_module def get_all_allocated(self): allocated = [] for depth_module in self.allocated: for module in depth_module: allocated.append(module) return allocated def build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, dim, z_size, x_size, drop_path): z_shape = [z_size, z_size] x_shape = [x_size, x_size] encoder_layers = [] for i in range(encoder_layer): encoder_layers.append( FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop, drop_path=drop_path.allocate(), attn_pos_encoding_only=True) ) z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1]) x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1]) z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape) x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape) z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape) x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape) z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1) x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1) z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1) x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1) return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index, x_self_attn_rel_pos_index, z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index, z_self_attn_rel_pos_bias_table, x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table, x_z_cross_attn_rel_pos_bias_table) class TargetQueryDecoderLayer(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm): super(TargetQueryDecoderLayer, self).__init__() self.norm_1 = norm_layer(dim) self.self_attn1 = nn.MultiheadAttention(dim, num_heads, dropout=drop) self.norm_2_query = norm_layer(dim) self.norm_2_memory = norm_layer(dim) self.multihead_attn = nn.MultiheadAttention(dim, num_heads, dropout=drop) self.norm_3 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlpz = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.drop_path = drop_path def forward(self, query, memoryz, memoryx, query_pos, pos_z, pos_x, identity, identity_search, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None, ): ''' Args: query (torch.Tensor): (B, num_queries, C) memory (torch.Tensor): (B, L, C) query_pos (torch.Tensor): (1 or B, num_queries, C) memory_pos (torch.Tensor): (1 or B, L, C) Returns: torch.Tensor: (B, num_queries, C) ''' tgt = query q = k = self.norm_1(query) + query_pos query = query + self.drop_path(self.self_attn1(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]) q2 = self.norm_2_query(query) + query_pos memory = torch.cat((memoryz, memoryx), dim=1) pos = torch.cat((pos_z, pos_x), dim=1) ide = torch.cat( (identity[:, 0, :].repeat(1, pos_z.shape[1], 1), identity[:, 1, :].repeat(1, pos_x.shape[1], 1)), dim=1) k2 = (self.norm_2_memory(memory) + pos + ide).permute(1, 0, 2) memory_in = memory.permute(1, 0, 2) query = query + self.drop_path( self.multihead_attn(query=q2, key=k2, value=memory_in, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0]) query = query + self.drop_path(self.mlpz(self.norm_3(query))) return query def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) class TargetQueryDecoderBlock(nn.Module): def __init__(self, dim, decoder_layers, num_layer): super(TargetQueryDecoderBlock, self).__init__() self.layers = nn.ModuleList(decoder_layers) self.num_layers = num_layer self.norm = nn.LayerNorm(dim) def forward(self, tgt, z, x, pos_z, pos_x, identity, identity_search, query_pos: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None): ''' Args: z (torch.Tensor): (B, L_z, C) x (torch.Tensor): (B, L_x, C) Returns: torch.Tensor: (B, num_queries, C) ''' output = tgt for layer in self.layers: output = layer(output, z, x, query_pos, pos_z, pos_x, identity, identity_search, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask) output = self.norm(output) return output def build_decoder(decoder_layer, drop_path, dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, z_size, x_size): z_shape = [z_size, z_size] x_shape = [x_size, x_size] num_layers = decoder_layer decoder_layers = [] for _ in range(num_layers): decoder_layers.append( TargetQueryDecoderLayer(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path.allocate())) drop_path.increase_depth() decoder = TargetQueryDecoderBlock(dim, decoder_layers, num_layers) return decoder def generate_square_subsequent_mask(sz): r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). """ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float( '-inf')).masked_fill(mask == 1, float(0.0)) return mask class Pix2Track(nn.Module): def __init__(self, in_channel=64, feat_sz=20, feat_tz=10, range=2, pre_num=7, stride=16, encoder_layer=3, decoder_layer=3, bins=400, num_heads=12, mlp_ratio=2, qkv_bias=True, drop_rate=0.0, attn_drop=0.0, drop_path=nn.Identity): super(Pix2Track, self).__init__() self.bins = bins self.range = range self.pre_num = pre_num self.word_embeddings = nn.Embedding(self.bins * self.range + 2, in_channel, padding_idx=self.bins * self.range, max_norm=1, norm_type=2.0) self.position_embeddings = nn.Embedding( 5, in_channel) self.prev_position_embeddings = nn.Embedding(self.pre_num * 4, in_channel) self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 2)) self.momentum_param = 0.25 self.identity_search = torch.nn.Parameter(torch.zeros(1, 1, 768)) self.identity_search = trunc_normal_(self.identity_search, std=.02) self.encoder_layer = encoder_layer self.drop_path = drop_path self.tz = feat_tz * feat_tz self.sz = feat_sz * feat_sz trunc_normal_(self.word_embeddings.weight, std=.02) if self.encoder_layer > 0: self.encoder = build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, in_channel, feat_tz, feat_sz, self.drop_path) else: self.encoder = None self.decoder = build_decoder(decoder_layer, self.drop_path, in_channel, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, feat_tz, feat_sz) self.magic_num = (self.range-1) * 0.5 def forward(self, zx_feat, pos_z, pos_x, identity, seqs_input=None, stage=None): emb_weight = self.word_embeddings.weight.clone() share_weight = emb_weight.T z_feat = zx_feat[:, :self.tz] x_feat = zx_feat[:, self.tz:] out_list = [] bs = zx_feat.shape[0] if self.encoder != None: z_feat, x_feat = self.encoder(z_feat, x_feat, None, None) output_x_feat = x_feat.clone() if stage == None: seqs_input = seqs_input.to(torch.int64).to(zx_feat.device) tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) query_embed_ = self.position_embeddings.weight.unsqueeze(1) prev_embed = self.prev_position_embeddings.weight.unsqueeze(1) query_embed = torch.cat([prev_embed, query_embed_], dim=0) query_embed = query_embed.repeat(1, bs, 1) decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search, query_embed[:len(tgt)], tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device)) at = torch.matmul(decoder_feat_cls, share_weight) at = at + self.output_bias output = {'feat': at, "state": "train"} else: seqs_origin = seqs_input start_token = torch.ones(bs, 1) * self.bins * self.range start_token = start_token.to(seqs_origin) real_seq = torch.cat([seqs_origin, start_token], dim=1) seqs_input = real_seq.to(zx_feat.device).to(torch.int32) for i in range(4): tgt = self.word_embeddings(seqs_input).permute(1, 0, 2) query_embed_ = self.position_embeddings.weight.unsqueeze(1) prev_embed = self.prev_position_embeddings.weight.unsqueeze(1) query_embed = torch.cat([prev_embed, query_embed_], dim=0) query_embed = query_embed.repeat(1, bs, 1) decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search, query_embed[:len(tgt)], tgt_mask=generate_square_subsequent_mask(len(seqs_input[0])).to( tgt.device)) out = torch.matmul(decoder_feat_cls.transpose(0, 1)[:, -1, :], share_weight) + self.output_bias out_list.append(out.unsqueeze(0)) out = out.softmax(-1) value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1] seqs_input = torch.cat([seqs_input, extra_seq], dim=-1) if i == 0: seqs_output = extra_seq values = value else: seqs_output = torch.cat([seqs_output, extra_seq], dim=-1) values = torch.cat([values, value], dim=-1) if not (not out_list): feat = torch.cat(out_list) output = {'seqs': seqs_output, 'class': values, 'feat': feat, "state": "val/test", "x_feat": output_x_feat.detach()} return output def build_pix_head(cfg, hidden_dim): stride = cfg.MODEL.BACKBONE.STRIDE in_channel = hidden_dim feat_sz = int(cfg.DATA.SEARCH.SIZE / stride) feat_tz = int(cfg.DATA.TEMPLATE.SIZE / stride) decoder_layer = cfg.MODEL.DECODER_LAYER encoder_layer = cfg.MODEL.ENCODER_LAYER pre_num = cfg.MODEL.PRENUM bins = cfg.MODEL.BINS range = cfg.MODEL.RANGE num_heads = cfg.MODEL.NUM_HEADS mlp_ratio = cfg.MODEL.MLP_RATIO qkv_bias = cfg.MODEL.QKV_BIAS drop_rate = cfg.MODEL.DROP_RATE attn_drop = cfg.MODEL.ATTN_DROP drop_path = cfg.MODEL.DROP_PATH drop_path_allocator = DropPathAllocator(drop_path) pix_head = Pix2Track(in_channel=in_channel, feat_sz=feat_sz, feat_tz=feat_tz, range=range, pre_num=pre_num, stride=stride, encoder_layer=encoder_layer, decoder_layer=decoder_layer, bins=bins, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate, attn_drop=attn_drop, drop_path=drop_path_allocator) return pix_head ================================================ FILE: lib/models/layers/mask_decoder.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : vit_decoder.py # Copyright (c) Skye-Song. All Rights Reserved import torch import torch.nn as nn from einops import rearrange from lib.utils.box_ops import box_xywh_to_cxywh, box_cxcywh_to_xyxy from ..mask_decoder.block import Block from ..mask_decoder.pos_embed import get_2d_sincos_pos_embed from external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D from lib.utils.image import * class MaskDecoder(nn.Module): def __init__(self, mask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, pool_size=8, mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False): super().__init__() self.mask_ratio = mask_ratio print(self.mask_ratio) #self.mask_ratio = 0.75 self.num_patches = num_patches self.patch_size = patch_size self.search_prroipool = PrRoIPool2D(pool_size, pool_size, spatial_scale=1.0) self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches, decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding self.decoder_blocks = nn.ModuleList([ Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer) for i in range(decoder_depth)]) self.decoder_norm = norm_layer(decoder_embed_dim) self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size ** 2 * 3, bias=True) # decoder to patch self.norm_pix_loss = norm_pix_loss self.initialize_weights() def initialize_weights(self): # initialize (and freeze) pos_embed by sin-cos embedding decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.num_patches ** .5), cls_token=False) self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) torch.nn.init.normal_(self.mask_token, std=.02) # initialize nn.Linear and nn.LayerNorm self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): # we use xavier_uniform following official JAX ViT: torch.nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def random_masking(self, x): """ Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. x: [N, L, D], sequence """ N, L, D = x.shape # batch, length, dim len_keep = int(L * (1 - self.mask_ratio)) noise = torch.rand(N, L, device=x.device) # noise in [0, 1] # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] x_keep = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, L], device=x.device) mask[:, :len_keep] = 0 # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) # get the masked x mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] - x_keep.shape[1], 1) x_ = torch.cat([x_keep, mask_tokens], dim=1) # no cls token x_masked = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle return x_masked, mask def forward_decoder(self, x, eval=False): # embed tokens x = self.decoder_embed(x) mask = None # append mask tokens to sequence if not eval: x, mask = self.random_masking(x) # add pos embed x = x + self.decoder_pos_embed # apply Transformer blocks for blk in self.decoder_blocks: x = blk(x) x = self.decoder_norm(x) # predictor projection x = self.decoder_pred(x) return x, mask def unpatchify(self, x): """ x: (N, L, patch_size**2 *3) imgs: (N, 3, H, W) """ p = self.patch_size h = w = int(x.shape[1] ** .5) assert h * w == x.shape[1] x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) x = torch.einsum('nhwpqc->nchpwq', x) imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) return imgs def patchify(self, imgs): """ imgs: (N, 3, H, W) x: (N, L, patch_size**2 *3) """ p = self.patch_size assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 h = w = imgs.shape[2] // p x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) x = torch.einsum('nchpwq->nhwpqc', x) x = x.reshape(shape=(imgs.shape[0], h * w, p ** 2 * 3)) return x def forward_loss(self, imgs, pred, mask=None): """ imgs: [N, 3, H, W] pred: [N, L, p*p*3] mask: [N, L], 0 is keep, 1 is remove, """ target = self.patchify(imgs) if self.norm_pix_loss: mean = target.mean(dim=-1, keepdim=True) var = target.var(dim=-1, keepdim=True) target = (target - mean) / (var + 1.e-6) ** .5 loss = (pred - target) ** 2 loss = loss.mean(dim=-1) # [N, L], mean loss per patc if mask == None: loss = loss.sum() / pred.shape[1] / pred.shape[0] # mean loss on removed patches else: loss = loss.sum() / pred.shape[1] / pred.shape[0] return loss def crop_search_feat(self, search_feat, gt_bboxes): crop_bboxes = box_xywh_to_cxywh(gt_bboxes) crop_sz = torch.sqrt(gt_bboxes[:, 2] * gt_bboxes[:, 3]) * 2.0 crop_sz = torch.clamp(crop_sz, min=0., max=1.) crop_bboxes[:, 2] = crop_bboxes[:, 3] = crop_sz crop_bboxes = crop_bboxes * search_feat.shape[-1] crop_bboxes = box_cxcywh_to_xyxy(crop_bboxes.clone().view(-1, 4)) batch_size = crop_bboxes.shape[0] batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(crop_bboxes.device) target_roi = torch.cat((batch_index, crop_bboxes), dim=1) search_box_feat = self.search_prroipool(search_feat, target_roi) return search_box_feat # gt_bboxes = gt_bboxes * search_feat.shape[-1] # gt_bboxes = box_xywh_to_xyxy(gt_bboxes.clone().view(-1, 4)) # batch_size = gt_bboxes.shape[0] # batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(gt_bboxes.device) # # target_roi = torch.cat((batch_index, gt_bboxes), dim=1) # search_box_feat = self.search_prroipool(search_feat, target_roi) # return search_box_feat def forward(self, x, images=None, gt_bboxes=None, eval=False,): # input x = [B,C,H,W] # input images = [b,3 h,w] if gt_bboxes is not None: x = self.crop_search_feat(x, gt_bboxes) x = rearrange(x, 'b c h w -> b (h w) c').contiguous() pred, mask = self.forward_decoder(x, eval) # [N, L, p*p*3] if eval: return self.unpatchify(pred) if mask != None: loss = self.forward_loss(imgs=images, pred=pred, mask=mask) else: loss = self.forward_loss(imgs=images, pred=pred) pred = self.unpatchify(pred) return pred, loss def mask_decoder(): model = MaskDecoder( mask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False) return model def build_maskdecoder(cfg, hidden_dim): pool_size = int(cfg.DATA.TEMPLATE.SIZE / cfg.MODEL.BACKBONE.PATCHSIZE) num_patches = (cfg.DATA.TEMPLATE.SIZE // cfg.MODEL.BACKBONE.PATCHSIZE) ** 2 model = MaskDecoder( mask_ratio=cfg.MODEL.DECODER.MASK_RATIO, patch_size=cfg.MODEL.BACKBONE.PATCHSIZE, num_patches=num_patches, embed_dim=hidden_dim, decoder_embed_dim=cfg.MODEL.DECODER.EMBEDDIM, decoder_depth=cfg.MODEL.DECODER.DEPTH, decoder_num_heads=cfg.MODEL.DECODER.NUMHEADS, pool_size=pool_size, mlp_ratio=cfg.MODEL.DECODER.MLPRATIO, norm_layer=nn.LayerNorm, norm_pix_loss=False) return model ================================================ FILE: lib/models/layers/patch_embed.py ================================================ import torch.nn as nn from timm.models.layers import to_2tuple class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): # allow different input size # B, C, H, W = x.shape # _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") # _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # BCHW -> BNC x = self.norm(x) return x ================================================ FILE: lib/models/layers/rpe.py ================================================ import torch import torch.nn as nn from timm.models.layers import trunc_normal_ def generate_2d_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :] diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :] diff = torch.stack((diff_h, diff_w), dim=-1) _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0) return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1]) def generate_2d_concatenated_self_attention_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h)) concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w)) diff_h = concatenated_2d_index_h[:, None] - concatenated_2d_index_h[None, :] diff_w = concatenated_2d_index_w[:, None] - concatenated_2d_index_w[None, :] z_len = z_shape[0] * z_shape[1] x_len = x_shape[0] * x_shape[1] a = torch.empty((z_len + x_len), dtype=torch.int64) a[:z_len] = 0 a[z_len:] = 1 b=a[:, None].repeat(1, z_len + x_len) c=a[None, :].repeat(z_len + x_len, 1) diff = torch.stack((diff_h, diff_w, b, c), dim=-1) _, indices = torch.unique(diff.view((z_len + x_len) * (z_len + x_len), 4), return_inverse=True, dim=0) return indices.view((z_len + x_len), (z_len + x_len)) def generate_2d_concatenated_cross_attention_relative_positional_encoding_index(z_shape, x_shape): ''' z_shape: (z_h, z_w) x_shape: (x_h, x_w) ''' z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1])) x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1])) z_2d_index_h = z_2d_index_h.flatten(0) z_2d_index_w = z_2d_index_w.flatten(0) x_2d_index_h = x_2d_index_h.flatten(0) x_2d_index_w = x_2d_index_w.flatten(0) concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h)) concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w)) diff_h = x_2d_index_h[:, None] - concatenated_2d_index_h[None, :] diff_w = x_2d_index_w[:, None] - concatenated_2d_index_w[None, :] z_len = z_shape[0] * z_shape[1] x_len = x_shape[0] * x_shape[1] a = torch.empty(z_len + x_len, dtype=torch.int64) a[: z_len] = 0 a[z_len:] = 1 c = a[None, :].repeat(x_len, 1) diff = torch.stack((diff_h, diff_w, c), dim=-1) _, indices = torch.unique(diff.view(x_len * (z_len + x_len), 3), return_inverse=True, dim=0) return indices.view(x_len, (z_len + x_len)) class RelativePosition2DEncoder(nn.Module): def __init__(self, num_heads, embed_size): super(RelativePosition2DEncoder, self).__init__() self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size))) trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, attn_rpe_index): ''' Args: attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size Returns: torch.Tensor: (1, num_heads, *) ''' return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0) ================================================ FILE: lib/models/mask_decoder/__init__.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : __init__.py.py # Copyright (c) Skye-Song. All Rights Reserved ================================================ FILE: lib/models/mask_decoder/attention.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : attention.py # Copyright (c) Skye-Song. All Rights Reserved import torch import torch.nn as nn from einops import rearrange from lib.utils.image import * class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, padding_mask=None, **kwargs): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C//head) attn = (q @ k.transpose(-2, -1)) * self.scale # (B, head, N, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class ClsMixAttention(nn.Module): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, t_h, t_w, s_h, s_w, online_size=1, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_cls, q_t, q_s = torch.split(q, [1, t_h * t_w * (1 + online_size), s_h * s_w], dim=2) k_cls, k_t, k_s = torch.split(k, [1, t_h * t_w * (1 + online_size), s_h * s_w], dim=2) v_cls, v_t, v_s = torch.split(v, [1, t_h * t_w * (1 + online_size), s_h * s_w], dim=2) # cls token attention attn = (q_cls @ k.transpose(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_cls = rearrange(attn @ v, 'b h t d -> b t (h d)') # template attention attn = (q_t @ k_t.transpose(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k.transpose(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v, 'b h t d -> b t (h d)') x = torch.cat([x_cls, x_t, x_s], dim=1) x = self.proj(x) x = self.proj_drop(x) return x class MixAttention(nn.Module): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, t_h, t_w, s_h, s_w, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2) k_t, k_s = torch.split(k, [t_h * t_w * 2, s_h * s_w], dim=2) v_t, v_s = torch.split(v, [t_h * t_w * 2, s_h * s_w], dim=2) # template attention attn = (q_t @ k_t.transpose(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k.transpose(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v, 'b h t d -> b t (h d)') x = torch.cat([x_t, x_s], dim=1) x = self.proj(x) x = self.proj_drop(x) return x class NottAttention(nn.Module): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, t_h, t_w, s_h, s_w, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2) k_t, k_s = torch.split(k, [t_h * t_w * 2, s_h * s_w], dim=2) v_t, v_s = torch.split(v, [t_h * t_w * 2, s_h * s_w], dim=2) # template attention attn = (q_t @ k_s.transpose(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k.transpose(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v, 'b h t d -> b t (h d)') x = torch.cat([x_t, x_s], dim=1) x = self.proj(x) x = self.proj_drop(x) return x class NossAttention(nn.Module): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, t_h, t_w, s_h, s_w, padding_mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2) k_t, k_s = torch.split(k, [t_h * t_w * 2, s_h * s_w], dim=2) v_t, v_s = torch.split(v, [t_h * t_w * 2, s_h * s_w], dim=2) # template attention attn = (q_t @ k.transpose(-2, -1)) * self.scale # (B, head, N_q, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k_t.transpose(-2, -1)) * self.scale # (B, head, N_s, N) if padding_mask is not None: assert padding_mask.size()[0] == B assert padding_mask.size()[1] == N attn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)') x = torch.cat([x_t, x_s], dim=1) x = self.proj(x) x = self.proj_drop(x) return x class CrossAttention(nn.Module): def __init__(self, dim, num_heads, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, t_h, t_w, s_h, s_w): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # (B, head, N, C) q_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2) k_t, k_s = torch.split(k, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], dim=4) v_t, v_s = torch.split(v, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], dim=4) # template attention attn = (q_t @ k_s.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)') # search region attention attn = (q_s @ k_t.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)') x = torch.cat([x_t, x_s], dim=1) x = self.proj(x) x = self.proj_drop(x) return x ================================================ FILE: lib/models/mask_decoder/block.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : block.py # Copyright (c) Skye-Song. All Rights Reserved from .attention import * from .drop import DropPath from .mlp import Mlp class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., attention = "Attention", act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() if norm_layer is None: norm_layer = nn.LayerNorm self.norm1 = norm_layer(dim) self.attn = globals()[attention](dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, **kwargs): x = x + self.drop_path(self.attn(self.norm1(x), **kwargs)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x ================================================ FILE: lib/models/mask_decoder/drop.py ================================================ """ DropBlock, DropPath PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. Papers: DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) Code: DropBlock impl inspired by two Tensorflow impl that I liked: - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F def drop_block_2d( x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. This layer has been tested on a few training runs with success, but needs further validation and possibly optimization for lower runtime impact. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) # seed_drop_rate, the gamma parameter gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) # Forces the block to be inside the feature map. w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) if batchwise: # one mask for whole batch, quite a bit faster uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) else: uniform_noise = torch.rand_like(x) block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) block_mask = -F.max_pool2d( -block_mask, kernel_size=clipped_block_size, # block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) if inplace: x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) else: x = x * block_mask + normal_noise * (1 - block_mask) else: normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x def drop_block_fast_2d( x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid block mask at edges. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) if batchwise: # one mask for whole batch, quite a bit faster block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma else: # mask per batch element block_mask = torch.rand_like(x) < gamma block_mask = F.max_pool2d( block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) if inplace: x.mul_(1. - block_mask).add_(normal_noise * block_mask) else: x = x * (1. - block_mask) + normal_noise * block_mask else: block_mask = 1 - block_mask normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x class DropBlock2d(nn.Module): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf """ def __init__(self, drop_prob=0.1, block_size=7, gamma_scale=1.0, with_noise=False, inplace=False, batchwise=False, fast=True): super(DropBlock2d, self).__init__() self.drop_prob = drop_prob self.gamma_scale = gamma_scale self.block_size = block_size self.with_noise = with_noise self.inplace = inplace self.batchwise = batchwise self.fast = fast # FIXME finish comparisons of fast vs not def forward(self, x): if not self.training or not self.drop_prob: return x if self.fast: return drop_block_fast_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) else: return drop_block_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) ================================================ FILE: lib/models/mask_decoder/mlp.py ================================================ """ MLP module w/ dropout and configurable activation layer Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn import torch.nn.functional as F class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class MultiLayerMlp(nn.Module): """ Very simple multi-layer perceptron (also called FFN)""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) if BN: self.layers = nn.ModuleList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k)) for n, k in zip([input_dim] + h, h + [output_dim])) else: self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class GluMlp(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features // 2, out_features) self.drop = nn.Dropout(drop) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) def forward(self, x): x = self.fc1(x) x, gates = x.chunk(2, dim=-1) x = x * self.act(gates) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class GatedMlp(nn.Module): """ MLP as used in gMLP """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, gate_layer=None, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 # FIXME base reduction on gate property? else: self.gate = nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.gate(x) x = self.fc2(x) x = self.drop(x) return x class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x ================================================ FILE: lib/models/mask_decoder/norm.py ================================================ # -*- coding:utf-8 -*- # author : Skye Song # file : norm.py # Copyright (c) Skye-Song. All Rights Reserved import torch class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() # rsqrt(x): 1/sqrt(x), r: reciprocal bias = b - rm * scale return x * scale + bias ================================================ FILE: lib/models/mask_decoder/patch_embed.py ================================================ """ Image to Patch Embedding using Conv2d A convolution based approach to patchifying a 2D image w/ embedding projection. Based on the impl in https://github.com/google-research/vision_transformer Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from itertools import repeat import collections.abc # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) to_ntuple = _ntuple class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): super().__init__() self.flatten = flatten self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # BCHW -> BNC x = self.norm(x) return x ================================================ FILE: lib/models/mask_decoder/pos_embed.py ================================================ # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # Position embedding utils # -------------------------------------------------------- import numpy as np import torch # -------------------------------------------------------- # 2D sine-cosine position embedding # References: # Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py # MoCo v3: https://github.com/facebookresearch/moco-v3 # -------------------------------------------------------- def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): """ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) """ grid_h = np.arange(grid_size, dtype=np.float32) grid_w = np.arange(grid_size, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if cls_token: pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): assert embed_dim % 2 == 0 # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): """ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) """ assert embed_dim % 2 == 0 omega = np.arange(embed_dim // 2, dtype=np.float) omega /= embed_dim / 2. omega = 1. / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product emb_sin = np.sin(out) # (M, D/2) emb_cos = np.cos(out) # (M, D/2) emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb # -------------------------------------------------------- # Interpolate position embeddings for high-resolution # References: # DeiT: https://github.com/facebookresearch/deit # -------------------------------------------------------- def interpolate_pos_embed(model, checkpoint_model): if 'pos_embed' in checkpoint_model: pos_embed_checkpoint = checkpoint_model['pos_embed'] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.patch_embed.num_patches num_extra_tokens = model.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) checkpoint_model['pos_embed'] = new_pos_embed ================================================ FILE: lib/models/mask_decoder/weight_init.py ================================================ import torch import math import warnings from torch.nn.init import _calculate_fan_in_and_fan_out def _no_grad_trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b) def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == 'fan_in': denom = fan_in elif mode == 'fan_out': denom = fan_out elif mode == 'fan_avg': denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) elif distribution == "normal": tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') ================================================ FILE: lib/test/__init__.py ================================================ ================================================ FILE: lib/test/analysis/__init__.py ================================================ ================================================ FILE: lib/test/analysis/extract_results.py ================================================ import os import sys import numpy as np from lib.test.utils.load_text import load_text import torch import pickle from tqdm import tqdm env_path = os.path.join(os.path.dirname(__file__), '../../..') if env_path not in sys.path: sys.path.append(env_path) from lib.test.evaluation.environment import env_settings def calc_err_center(pred_bb, anno_bb, normalized=False): pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0) anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0) if normalized: pred_center = pred_center / anno_bb[:, 2:] anno_center = anno_center / anno_bb[:, 2:] err_center = ((pred_center - anno_center)**2).sum(1).sqrt() return err_center def calc_iou_overlap(pred_bb, anno_bb): tl = torch.max(pred_bb[:, :2], anno_bb[:, :2]) br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0, anno_bb[:, :2] + anno_bb[:, 2:] - 1.0) sz = (br - tl + 1.0).clamp(0) # Area intersection = sz.prod(dim=1) union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection return intersection / union def calc_seq_err_robust(pred_bb, anno_bb, dataset, target_visible=None): pred_bb = pred_bb.clone() # Check if invalid values are present if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any(): raise Exception('Error: Invalid results') if torch.isnan(anno_bb).any(): if dataset == 'uav': pass else: raise Exception('Warning: NaNs in annotation') if (pred_bb[:, 2:] == 0.0).any(): for i in range(1, pred_bb.shape[0]): if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any(): pred_bb[i, :] = pred_bb[i-1, :] if pred_bb.shape[0] != anno_bb.shape[0]: if dataset == 'lasot': if pred_bb.shape[0] > anno_bb.shape[0]: # For monkey-17, there is a mismatch for some trackers. pred_bb = pred_bb[:anno_bb.shape[0], :] else: raise Exception('Mis-match in tracker prediction and GT lengths') else: # print('Warning: Mis-match in tracker prediction and GT lengths') if pred_bb.shape[0] > anno_bb.shape[0]: pred_bb = pred_bb[:anno_bb.shape[0], :] else: pad = torch.zeros((anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb) pred_bb = torch.cat((pred_bb, pad), dim=0) pred_bb[0, :] = anno_bb[0, :] if target_visible is not None: target_visible = target_visible.bool() valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible else: valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) err_center = calc_err_center(pred_bb, anno_bb) err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True) err_overlap = calc_iou_overlap(pred_bb, anno_bb) # handle invalid anno cases if dataset in ['uav']: err_center[~valid] = -1.0 else: err_center[~valid] = float("Inf") err_center_normalized[~valid] = -1.0 err_overlap[~valid] = -1.0 if dataset == 'lasot': err_center_normalized[~target_visible] = float("Inf") err_center[~target_visible] = float("Inf") if torch.isnan(err_overlap).any(): raise Exception('Nans in calculated overlap') return err_overlap, err_center, err_center_normalized, valid def extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05, exclude_invalid_frames=False): settings = env_settings() eps = 1e-16 result_plot_path = os.path.join(settings.result_plot_path, report_name) if not os.path.exists(result_plot_path): os.makedirs(result_plot_path) threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64) threshold_set_center = torch.arange(0, 51, dtype=torch.float64) threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0 avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64) ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()), dtype=torch.float32) ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()), dtype=torch.float32) ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()), dtype=torch.float32) valid_sequence = torch.ones(len(dataset), dtype=torch.uint8) for seq_id, seq in enumerate(tqdm(dataset)): # Load anno anno_bb = torch.tensor(seq.ground_truth_rect) target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None for trk_id, trk in enumerate(trackers): # Load results base_results_path = '{}/{}'.format(trk.results_dir, seq.name) results_path = '{}.txt'.format(base_results_path) if os.path.isfile(results_path): pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\t', ','), dtype=np.float64)) else: if skip_missing_seq: valid_sequence[seq_id] = 0 break else: raise Exception('Result not found. {}'.format(results_path)) # Calculate measures err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust( pred_bb, anno_bb, seq.dataset, target_visible) avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean() if exclude_invalid_frames: seq_length = valid_frame.long().sum() else: seq_length = anno_bb.shape[0] if seq_length <= 0: raise Exception('Seq length zero') ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length print('\n\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) # Prepare dictionary for saving data seq_names = [s.name for s in dataset] tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name} for t in trackers] eval_data = {'sequences': seq_names, 'trackers': tracker_names, 'valid_sequence': valid_sequence.tolist(), 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(), 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(), 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(), 'avg_overlap_all': avg_overlap_all.tolist(), 'threshold_set_overlap': threshold_set_overlap.tolist(), 'threshold_set_center': threshold_set_center.tolist(), 'threshold_set_center_norm': threshold_set_center_norm.tolist()} with open(result_plot_path + '/eval_data.pkl', 'wb') as fh: pickle.dump(eval_data, fh) return eval_data ================================================ FILE: lib/test/analysis/plot_results.py ================================================ import tikzplotlib import matplotlib import matplotlib.pyplot as plt import os import torch import pickle import json from lib.test.evaluation.environment import env_settings from lib.test.analysis.extract_results import extract_results def get_plot_draw_styles(): plot_draw_style = [{'color': (1.0, 0.0, 0.0), 'line_style': '-'}, {'color': (0.0, 1.0, 0.0), 'line_style': '-'}, {'color': (0.0, 0.0, 1.0), 'line_style': '-'}, {'color': (0.0, 0.0, 0.0), 'line_style': '-'}, {'color': (1.0, 0.0, 1.0), 'line_style': '-'}, {'color': (0.0, 1.0, 1.0), 'line_style': '-'}, {'color': (0.5, 0.5, 0.5), 'line_style': '-'}, {'color': (136.0 / 255.0, 0.0, 21.0 / 255.0), 'line_style': '-'}, {'color': (1.0, 127.0 / 255.0, 39.0 / 255.0), 'line_style': '-'}, {'color': (0.0, 162.0 / 255.0, 232.0 / 255.0), 'line_style': '-'}, {'color': (0.0, 0.5, 0.0), 'line_style': '-'}, {'color': (1.0, 0.5, 0.2), 'line_style': '-'}, {'color': (0.1, 0.4, 0.0), 'line_style': '-'}, {'color': (0.6, 0.3, 0.9), 'line_style': '-'}, {'color': (0.4, 0.7, 0.1), 'line_style': '-'}, {'color': (0.2, 0.1, 0.7), 'line_style': '-'}, {'color': (0.7, 0.6, 0.2), 'line_style': '-'}] return plot_draw_style def check_eval_data_is_valid(eval_data, trackers, dataset): """ Checks if the pre-computed results are valid""" seq_names = [s.name for s in dataset] seq_names_saved = eval_data['sequences'] tracker_names_f = [(t.name, t.parameter_name, t.run_id) for t in trackers] tracker_names_f_saved = [(t['name'], t['param'], t['run_id']) for t in eval_data['trackers']] return seq_names == seq_names_saved and tracker_names_f == tracker_names_f_saved def merge_multiple_runs(eval_data): new_tracker_names = [] ave_success_rate_plot_overlap_merged = [] ave_success_rate_plot_center_merged = [] ave_success_rate_plot_center_norm_merged = [] avg_overlap_all_merged = [] ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) trackers = eval_data['trackers'] merged = torch.zeros(len(trackers), dtype=torch.uint8) for i in range(len(trackers)): if merged[i]: continue base_tracker = trackers[i] new_tracker_names.append(base_tracker) match = [t['name'] == base_tracker['name'] and t['param'] == base_tracker['param'] for t in trackers] match = torch.tensor(match) ave_success_rate_plot_overlap_merged.append(ave_success_rate_plot_overlap[:, match, :].mean(1)) ave_success_rate_plot_center_merged.append(ave_success_rate_plot_center[:, match, :].mean(1)) ave_success_rate_plot_center_norm_merged.append(ave_success_rate_plot_center_norm[:, match, :].mean(1)) avg_overlap_all_merged.append(avg_overlap_all[:, match].mean(1)) merged[match] = 1 ave_success_rate_plot_overlap_merged = torch.stack(ave_success_rate_plot_overlap_merged, dim=1) ave_success_rate_plot_center_merged = torch.stack(ave_success_rate_plot_center_merged, dim=1) ave_success_rate_plot_center_norm_merged = torch.stack(ave_success_rate_plot_center_norm_merged, dim=1) avg_overlap_all_merged = torch.stack(avg_overlap_all_merged, dim=1) eval_data['trackers'] = new_tracker_names eval_data['ave_success_rate_plot_overlap'] = ave_success_rate_plot_overlap_merged.tolist() eval_data['ave_success_rate_plot_center'] = ave_success_rate_plot_center_merged.tolist() eval_data['ave_success_rate_plot_center_norm'] = ave_success_rate_plot_center_norm_merged.tolist() eval_data['avg_overlap_all'] = avg_overlap_all_merged.tolist() return eval_data def get_tracker_display_name(tracker): if tracker['disp_name'] is None: if tracker['run_id'] is None: disp_name = '{}_{}'.format(tracker['name'], tracker['param']) else: disp_name = '{}_{}_{:03d}'.format(tracker['name'], tracker['param'], tracker['run_id']) else: disp_name = tracker['disp_name'] return disp_name def plot_draw_save(y, x, scores, trackers, plot_draw_styles, result_plot_path, plot_opts): plt.rcParams['text.usetex']=True plt.rcParams["font.family"] = "Times New Roman" # Plot settings font_size = plot_opts.get('font_size', 20) font_size_axis = plot_opts.get('font_size_axis', 20) line_width = plot_opts.get('line_width', 2) font_size_legend = plot_opts.get('font_size_legend', 20) plot_type = plot_opts['plot_type'] legend_loc = plot_opts['legend_loc'] xlabel = plot_opts['xlabel'] ylabel = plot_opts['ylabel'] ylabel = "%s"%(ylabel.replace('%','\%')) xlim = plot_opts['xlim'] ylim = plot_opts['ylim'] title = r"$\bf{%s}$" %(plot_opts['title']) matplotlib.rcParams.update({'font.size': font_size}) matplotlib.rcParams.update({'axes.titlesize': font_size_axis}) matplotlib.rcParams.update({'axes.titleweight': 'black'}) matplotlib.rcParams.update({'axes.labelsize': font_size_axis}) fig, ax = plt.subplots() index_sort = scores.argsort(descending=False) plotted_lines = [] legend_text = [] for id, id_sort in enumerate(index_sort): line = ax.plot(x.tolist(), y[id_sort, :].tolist(), linewidth=line_width, color=plot_draw_styles[index_sort.numel() - id - 1]['color'], linestyle=plot_draw_styles[index_sort.numel() - id - 1]['line_style']) plotted_lines.append(line[0]) tracker = trackers[id_sort] disp_name = get_tracker_display_name(tracker) legend_text.append('{} [{:.1f}]'.format(disp_name, scores[id_sort])) try: # add bold to our method for i in range(1,2): legend_text[-i] = r'\textbf{%s}'%(legend_text[-i]) ax.legend(plotted_lines[::-1], legend_text[::-1], loc=legend_loc, fancybox=False, edgecolor='black', fontsize=font_size_legend, framealpha=1.0) except: pass ax.set(xlabel=xlabel, ylabel=ylabel, xlim=xlim, ylim=ylim, title=title) ax.grid(True, linestyle='-.') fig.tight_layout() tikzplotlib.save('{}/{}_plot.tex'.format(result_plot_path, plot_type)) fig.savefig('{}/{}_plot.pdf'.format(result_plot_path, plot_type), dpi=300, format='pdf', transparent=True) plt.draw() def check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation=False, **kwargs): # Load data settings = env_settings() # Load pre-computed results result_plot_path = os.path.join(settings.result_plot_path, report_name) eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl') if os.path.isfile(eval_data_path) and not force_evaluation: with open(eval_data_path, 'rb') as fh: eval_data = pickle.load(fh) else: # print('Pre-computed evaluation data not found. Computing results!') eval_data = extract_results(trackers, dataset, report_name, **kwargs) if not check_eval_data_is_valid(eval_data, trackers, dataset): # print('Pre-computed evaluation data invalid. Re-computing results!') eval_data = extract_results(trackers, dataset, report_name, **kwargs) # pass else: # Update display names tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name} for t in trackers] eval_data['trackers'] = tracker_names with open(eval_data_path, 'wb') as fh: pickle.dump(eval_data, fh) return eval_data def get_auc_curve(ave_success_rate_plot_overlap, valid_sequence): ave_success_rate_plot_overlap = ave_success_rate_plot_overlap[valid_sequence, :, :] auc_curve = ave_success_rate_plot_overlap.mean(0) * 100.0 auc = auc_curve.mean(-1) return auc_curve, auc def get_prec_curve(ave_success_rate_plot_center, valid_sequence): ave_success_rate_plot_center = ave_success_rate_plot_center[valid_sequence, :, :] prec_curve = ave_success_rate_plot_center.mean(0) * 100.0 prec_score = prec_curve[:, 20] return prec_curve, prec_score def plot_results(trackers, dataset, report_name, merge_results=False, plot_types=('success'), force_evaluation=False, **kwargs): """ Plot results for the given trackers args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success', 'prec' (precision), and 'norm_prec' (normalized precision) """ # Load data settings = env_settings() plot_draw_styles = get_plot_draw_styles() # Load pre-computed results result_plot_path = os.path.join(settings.result_plot_path, report_name) eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nPlotting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) print('\nGenerating plots for: {}'.format(report_name)) # ******************************** Success Plot ************************************** if 'success' in plot_types: ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold', 'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 88), 'title': 'Success'} plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts) # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) threshold_set_center = torch.tensor(eval_data['threshold_set_center']) precision_plot_opts = {'plot_type': 'precision', 'legend_loc': 'lower right', 'xlabel': 'Location error threshold [pixels]', 'ylabel': 'Distance Precision [%]', 'xlim': (0, 50), 'ylim': (0, 100), 'title': 'Precision plot'} plot_draw_save(prec_curve, threshold_set_center, prec_score, tracker_names, plot_draw_styles, result_plot_path, precision_plot_opts) # ******************************** Norm Precision Plot ************************************** if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) threshold_set_center_norm = torch.tensor(eval_data['threshold_set_center_norm']) norm_precision_plot_opts = {'plot_type': 'norm_precision', 'legend_loc': 'lower right', 'xlabel': 'Location error threshold', 'ylabel': 'Distance Precision [%]', 'xlim': (0, 0.5), 'ylim': (0, 85), 'title': 'Normalized Precision'} plot_draw_save(prec_curve, threshold_set_center_norm, prec_score, tracker_names, plot_draw_styles, result_plot_path, norm_precision_plot_opts) plt.show() def generate_formatted_report(row_labels, scores, table_name=''): name_width = max([len(d) for d in row_labels] + [len(table_name)]) + 5 min_score_width = 10 report_text = '\n{label: <{width}} |'.format(label=table_name, width=name_width) score_widths = [max(min_score_width, len(k) + 3) for k in scores.keys()] for s, s_w in zip(scores.keys(), score_widths): report_text = '{prev} {s: <{width}} |'.format(prev=report_text, s=s, width=s_w) report_text = '{prev}\n'.format(prev=report_text) for trk_id, d_name in enumerate(row_labels): # display name report_text = '{prev}{tracker: <{width}} |'.format(prev=report_text, tracker=d_name, width=name_width) for (score_type, score_value), s_w in zip(scores.items(), score_widths): report_text = '{prev} {score: <{width}} |'.format(prev=report_text, score='{:0.2f}'.format(score_value[trk_id].item()), width=s_w) report_text = '{prev}\n'.format(prev=report_text) return report_text def print_results(trackers, dataset, report_name, merge_results=False, plot_types=('success'), **kwargs): """ Print the results for the given trackers in a formatted table args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores), 'prec' (prints precision score), and 'norm_prec' (prints normalized precision score) """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) scores = {} # ******************************** Success Plot ************************************** if 'success' in plot_types: threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) scores['AUC'] = auc scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50] scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75] # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) scores['Precision'] = prec_score # ******************************** Norm Precision Plot ********************************* if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) scores['Norm Precision'] = norm_prec_score # Print tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name) print(report_text) def plot_got_success(trackers, report_name): """ Plot success plot for GOT-10k dataset using the json reports. Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to env_settings.got_reports_path The tracker name in the experiment file should be set to the name of the report file for that tracker, e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json args: trackers - List of trackers to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved """ # Load data settings = env_settings() plot_draw_styles = get_plot_draw_styles() result_plot_path = os.path.join(settings.result_plot_path, report_name) auc_curve = torch.zeros((len(trackers), 101)) scores = torch.zeros(len(trackers)) # Load results tracker_names = [] for trk_id, trk in enumerate(trackers): json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name) if os.path.isfile(json_path): with open(json_path, 'r') as f: eval_data = json.load(f) else: raise Exception('Report not found {}'.format(json_path)) if len(eval_data.keys()) > 1: raise Exception # First field is the tracker name. Index it out eval_data = eval_data[list(eval_data.keys())[0]] if 'succ_curve' in eval_data.keys(): curve = eval_data['succ_curve'] ao = eval_data['ao'] elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys(): curve = eval_data['overall']['succ_curve'] ao = eval_data['overall']['ao'] else: raise Exception('Invalid JSON file {}'.format(json_path)) auc_curve[trk_id, :] = torch.tensor(curve) * 100.0 scores[trk_id] = ao * 100.0 tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id, 'disp_name': trk.display_name}) threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64) success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold', 'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'} plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts) plt.show() def print_per_sequence_results(trackers, dataset, report_name, merge_results=False, filter_criteria=None, **kwargs): """ Print per-sequence results for the given trackers. Additionally, the sequences to list can be filtered using the filter criteria. args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged filter_criteria - Filter sequence results which are reported. Following modes are supported None: No filtering. Display results for all sequences in dataset 'ao_min': Only display sequences for which the minimum average overlap (AO) score over the trackers is less than a threshold filter_criteria['threshold']. This mode can be used to select sequences where at least one tracker performs poorly. 'ao_max': Only display sequences for which the maximum average overlap (AO) score over the trackers is less than a threshold filter_criteria['threshold']. This mode can be used to select sequences all tracker performs poorly. 'delta_ao': Only display sequences for which the performance of different trackers vary by at least filter_criteria['threshold'] in average overlap (AO) score. This mode can be used to select sequences where the behaviour of the trackers greatly differ between each other. """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) sequence_names = eval_data['sequences'] avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) * 100.0 # Filter sequences if filter_criteria is not None: if filter_criteria['mode'] == 'ao_min': min_ao = avg_overlap_all.min(dim=1)[0] valid_sequence = valid_sequence & (min_ao < filter_criteria['threshold']) elif filter_criteria['mode'] == 'ao_max': max_ao = avg_overlap_all.max(dim=1)[0] valid_sequence = valid_sequence & (max_ao < filter_criteria['threshold']) elif filter_criteria['mode'] == 'delta_ao': min_ao = avg_overlap_all.min(dim=1)[0] max_ao = avg_overlap_all.max(dim=1)[0] valid_sequence = valid_sequence & ((max_ao - min_ao) > filter_criteria['threshold']) else: raise Exception avg_overlap_all = avg_overlap_all[valid_sequence, :] sequence_names = [s + ' (ID={})'.format(i) for i, (s, v) in enumerate(zip(sequence_names, valid_sequence.tolist())) if v] tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] scores_per_tracker = {k: avg_overlap_all[:, i] for i, k in enumerate(tracker_disp_names)} report_text = generate_formatted_report(sequence_names, scores_per_tracker) print(report_text) def print_results_per_video(trackers, dataset, report_name, merge_results=False, plot_types=('success'), per_video=False, **kwargs): """ Print the results for the given trackers in a formatted table args: trackers - List of trackers to evaluate dataset - List of sequences to evaluate report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved merge_results - If True, multiple random runs for a non-deterministic trackers are averaged plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores), 'prec' (prints precision score), and 'norm_prec' (prints normalized precision score) """ # Load pre-computed results eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs) # Merge results from multiple runs if merge_results: eval_data = merge_multiple_runs(eval_data) seq_lens = len(eval_data['sequences']) eval_datas = [{} for _ in range(seq_lens)] if per_video: for key, value in eval_data.items(): if len(value) == seq_lens: for i in range(seq_lens): eval_datas[i][key] = [value[i]] else: for i in range(seq_lens): eval_datas[i][key] = value tracker_names = eval_data['trackers'] valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) print('\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0])) scores = {} # ******************************** Success Plot ************************************** if 'success' in plot_types: threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) scores['AUC'] = auc scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50] scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75] # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) scores['Precision'] = prec_score # ******************************** Norm Precision Plot ********************************* if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) scores['Norm Precision'] = norm_prec_score # Print tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name) print(report_text) if per_video: for i in range(seq_lens): eval_data = eval_datas[i] print('\n{} sequences'.format(eval_data['sequences'][0])) scores = {} valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool) # ******************************** Success Plot ************************************** if 'success' in plot_types: threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap']) ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap']) # Index out valid sequences auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence) scores['AUC'] = auc scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50] scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75] # ******************************** Precision Plot ************************************** if 'prec' in plot_types: ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center']) # Index out valid sequences prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence) scores['Precision'] = prec_score # ******************************** Norm Precision Plot ********************************* if 'norm_prec' in plot_types: ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm']) # Index out valid sequences norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence) scores['Norm Precision'] = norm_prec_score # Print tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names] report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name) print(report_text) ================================================ FILE: lib/test/evaluation/__init__.py ================================================ from .data import Sequence from .tracker import Tracker, trackerlist from .datasets import get_dataset from .environment import create_default_local_file_ITP_test ================================================ FILE: lib/test/evaluation/data.py ================================================ import numpy as np from lib.test.evaluation.environment import env_settings from lib.train.data.image_loader import imread_indexed from collections import OrderedDict class BaseDataset: """Base class for all datasets.""" def __init__(self): self.env_settings = env_settings() def __len__(self): """Overload this function in your dataset. This should return number of sequences in the dataset.""" raise NotImplementedError def get_sequence_list(self): """Overload this in your dataset. Should return the list of sequences in the dataset.""" raise NotImplementedError class Sequence: """Class for the sequence in an evaluation.""" def __init__(self, name, frames, dataset, ground_truth_rect, ground_truth_seg=None, init_data=None, object_class=None, target_visible=None, object_ids=None, multiobj_mode=False): self.name = name self.frames = frames self.dataset = dataset self.ground_truth_rect = ground_truth_rect self.ground_truth_seg = ground_truth_seg self.object_class = object_class self.target_visible = target_visible self.object_ids = object_ids self.multiobj_mode = multiobj_mode self.init_data = self._construct_init_data(init_data) self._ensure_start_frame() def _ensure_start_frame(self): # Ensure start frame is 0 start_frame = min(list(self.init_data.keys())) if start_frame > 0: self.frames = self.frames[start_frame:] if self.ground_truth_rect is not None: if isinstance(self.ground_truth_rect, (dict, OrderedDict)): for obj_id, gt in self.ground_truth_rect.items(): self.ground_truth_rect[obj_id] = gt[start_frame:,:] else: self.ground_truth_rect = self.ground_truth_rect[start_frame:,:] if self.ground_truth_seg is not None: self.ground_truth_seg = self.ground_truth_seg[start_frame:] assert len(self.frames) == len(self.ground_truth_seg) if self.target_visible is not None: self.target_visible = self.target_visible[start_frame:] self.init_data = {frame-start_frame: val for frame, val in self.init_data.items()} def _construct_init_data(self, init_data): if init_data is not None: if not self.multiobj_mode: assert self.object_ids is None or len(self.object_ids) == 1 for frame, init_val in init_data.items(): if 'bbox' in init_val and isinstance(init_val['bbox'], (dict, OrderedDict)): init_val['bbox'] = init_val['bbox'][self.object_ids[0]] # convert to list for frame, init_val in init_data.items(): if 'bbox' in init_val: if isinstance(init_val['bbox'], (dict, OrderedDict)): init_val['bbox'] = OrderedDict({obj_id: list(init) for obj_id, init in init_val['bbox'].items()}) else: init_val['bbox'] = list(init_val['bbox']) else: init_data = {0: dict()} # Assume start from frame 0 if self.object_ids is not None: init_data[0]['object_ids'] = self.object_ids if self.ground_truth_rect is not None: if self.multiobj_mode: assert isinstance(self.ground_truth_rect, (dict, OrderedDict)) init_data[0]['bbox'] = OrderedDict({obj_id: list(gt[0,:]) for obj_id, gt in self.ground_truth_rect.items()}) else: assert self.object_ids is None or len(self.object_ids) == 1 if isinstance(self.ground_truth_rect, (dict, OrderedDict)): init_data[0]['bbox'] = list(self.ground_truth_rect[self.object_ids[0]][0, :]) else: init_data[0]['bbox'] = list(self.ground_truth_rect[0,:]) if self.ground_truth_seg is not None: init_data[0]['mask'] = self.ground_truth_seg[0] return init_data def init_info(self): info = self.frame_info(frame_num=0) return info def frame_info(self, frame_num): info = self.object_init_data(frame_num=frame_num) return info def init_bbox(self, frame_num=0): return self.object_init_data(frame_num=frame_num).get('init_bbox') def init_mask(self, frame_num=0): return self.object_init_data(frame_num=frame_num).get('init_mask') def get_info(self, keys, frame_num=None): info = dict() for k in keys: val = self.get(k, frame_num=frame_num) if val is not None: info[k] = val return info def object_init_data(self, frame_num=None) -> dict: if frame_num is None: frame_num = 0 if frame_num not in self.init_data: return dict() init_data = dict() for key, val in self.init_data[frame_num].items(): if val is None: continue init_data['init_'+key] = val if 'init_mask' in init_data and init_data['init_mask'] is not None: anno = imread_indexed(init_data['init_mask']) if not self.multiobj_mode and self.object_ids is not None: assert len(self.object_ids) == 1 anno = (anno == int(self.object_ids[0])).astype(np.uint8) init_data['init_mask'] = anno if self.object_ids is not None: init_data['object_ids'] = self.object_ids init_data['sequence_object_ids'] = self.object_ids return init_data def target_class(self, frame_num=None): return self.object_class def get(self, name, frame_num=None): return getattr(self, name)(frame_num) def __repr__(self): return "{self.__class__.__name__} {self.name}, length={len} frames".format(self=self, len=len(self.frames)) class SequenceList(list): """List of sequences. Supports the addition operator to concatenate sequence lists.""" def __getitem__(self, item): if isinstance(item, str): for seq in self: if seq.name == item: return seq raise IndexError('Sequence name not in the dataset.') elif isinstance(item, int): return super(SequenceList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return SequenceList([super(SequenceList, self).__getitem__(i) for i in item]) else: return SequenceList(super(SequenceList, self).__getitem__(item)) def __add__(self, other): return SequenceList(super(SequenceList, self).__add__(other)) def copy(self): return SequenceList(super(SequenceList, self).copy()) ================================================ FILE: lib/test/evaluation/datasets.py ================================================ from collections import namedtuple import importlib from lib.test.evaluation.data import SequenceList DatasetInfo = namedtuple('DatasetInfo', ['module', 'class_name', 'kwargs']) pt = "lib.test.evaluation.%sdataset" # Useful abbreviations to reduce the clutter dataset_dict = dict( otb=DatasetInfo(module=pt % "otb", class_name="OTBDataset", kwargs=dict()), nfs=DatasetInfo(module=pt % "nfs", class_name="NFSDataset", kwargs=dict()), uav=DatasetInfo(module=pt % "uav", class_name="UAVDataset", kwargs=dict()), tc128=DatasetInfo(module=pt % "tc128", class_name="TC128Dataset", kwargs=dict()), tc128ce=DatasetInfo(module=pt % "tc128ce", class_name="TC128CEDataset", kwargs=dict()), trackingnet=DatasetInfo(module=pt % "trackingnet", class_name="TrackingNetDataset", kwargs=dict()), got10k_test=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='test')), got10k_val=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='val')), got10k_ltrval=DatasetInfo(module=pt % "got10k", class_name="GOT10KDataset", kwargs=dict(split='ltrval')), lasot=DatasetInfo(module=pt % "lasot", class_name="LaSOTDataset", kwargs=dict()), lasot_lmdb=DatasetInfo(module=pt % "lasot_lmdb", class_name="LaSOTlmdbDataset", kwargs=dict()), vot18=DatasetInfo(module=pt % "vot", class_name="VOTDataset", kwargs=dict()), vot22=DatasetInfo(module=pt % "vot", class_name="VOTDataset", kwargs=dict(year=22)), itb=DatasetInfo(module=pt % "itb", class_name="ITBDataset", kwargs=dict()), tnl2k=DatasetInfo(module=pt % "tnl2k", class_name="TNL2kDataset", kwargs=dict()), lasot_extension_subset=DatasetInfo(module=pt % "lasotextensionsubset", class_name="LaSOTExtensionSubsetDataset", kwargs=dict()), ) def load_dataset(name: str): """ Import and load a single dataset.""" name = name.lower() dset_info = dataset_dict.get(name) if dset_info is None: raise ValueError('Unknown dataset \'%s\'' % name) m = importlib.import_module(dset_info.module) dataset = getattr(m, dset_info.class_name)(**dset_info.kwargs) # Call the constructor return dataset.get_sequence_list() def get_dataset(*args): """ Get a single or set of datasets.""" dset = SequenceList() for name in args: dset.extend(load_dataset(name)) return dset ================================================ FILE: lib/test/evaluation/environment.py ================================================ import importlib import os class EnvSettings: def __init__(self): test_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) self.results_path = '{}/tracking_results/'.format(test_path) self.segmentation_path = '{}/segmentation_results/'.format(test_path) self.network_path = '{}/networks/'.format(test_path) self.result_plot_path = '{}/result_plots/'.format(test_path) self.otb_path = '' self.nfs_path = '' self.uav_path = '' self.tpl_path = '' self.vot_path = '' self.got10k_path = '' self.lasot_path = '' self.trackingnet_path = '' self.davis_dir = '' self.youtubevos_dir = '' self.got_packed_results_path = '' self.got_reports_path = '' self.tn_packed_results_path = '' def create_default_local_file(): comment = {'results_path': 'Where to store tracking results', 'network_path': 'Where tracking networks are stored.'} path = os.path.join(os.path.dirname(__file__), 'local.py') with open(path, 'w') as f: settings = EnvSettings() f.write('from test.evaluation.environment import EnvSettings\n\n') f.write('def local_env_settings():\n') f.write(' settings = EnvSettings()\n\n') f.write(' # Set your local paths here.\n\n') for attr in dir(settings): comment_str = None if attr in comment: comment_str = comment[attr] attr_val = getattr(settings, attr) if not attr.startswith('__') and not callable(attr_val): if comment_str is None: f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) f.write('\n return settings\n\n') class EnvSettings_ITP: def __init__(self, workspace_dir, data_dir, save_dir): self.prj_dir = workspace_dir self.save_dir = save_dir self.results_path = os.path.join(save_dir, 'test/tracking_results') self.segmentation_path = os.path.join(save_dir, 'test/segmentation_results') self.network_path = os.path.join(save_dir, 'test/networks') self.result_plot_path = os.path.join(save_dir, 'test/result_plots') self.otb_path = os.path.join(data_dir, 'otb') self.nfs_path = os.path.join(data_dir, 'nfs') self.uav_path = os.path.join(data_dir, 'uav') self.tc128_path = os.path.join(data_dir, 'TC128') self.tpl_path = '' self.vot_path = os.path.join(data_dir, 'VOT2019') self.got10k_path = os.path.join(data_dir, 'got10k') self.got10k_lmdb_path = os.path.join(data_dir, 'got10k_lmdb') self.lasot_path = os.path.join(data_dir, 'lasot') self.lasot_lmdb_path = os.path.join(data_dir, 'lasot_lmdb') self.trackingnet_path = os.path.join(data_dir, 'trackingnet') self.vot18_path = os.path.join(data_dir, 'vot2018') self.vot22_path = os.path.join(data_dir, 'vot2022') self.itb_path = os.path.join(data_dir, 'itb') self.tnl2k_path = os.path.join(data_dir, 'tnl2k') self.lasot_extension_subset_path_path = os.path.join(data_dir, 'lasot_extension_subset') self.davis_dir = '' self.youtubevos_dir = '' self.got_packed_results_path = '' self.got_reports_path = '' self.tn_packed_results_path = '' def create_default_local_file_ITP_test(workspace_dir, data_dir, save_dir): comment = {'results_path': 'Where to store tracking results', 'network_path': 'Where tracking networks are stored.'} path = os.path.join(os.path.dirname(__file__), 'local.py') with open(path, 'w') as f: settings = EnvSettings_ITP(workspace_dir, data_dir, save_dir) f.write('from lib.test.evaluation.environment import EnvSettings\n\n') f.write('def local_env_settings():\n') f.write(' settings = EnvSettings()\n\n') f.write(' # Set your local paths here.\n\n') for attr in dir(settings): comment_str = None if attr in comment: comment_str = comment[attr] attr_val = getattr(settings, attr) if not attr.startswith('__') and not callable(attr_val): if comment_str is None: f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) f.write('\n return settings\n\n') def env_settings(): env_module_name = 'lib.test.evaluation.local' try: env_module = importlib.import_module(env_module_name) return env_module.local_env_settings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') # Create a default file create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. ' 'Then try to run again.'.format(env_file)) ================================================ FILE: lib/test/evaluation/got10kdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text import os class GOT10KDataset(BaseDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, split): super().__init__() # Split can be test, val, or ltrval (a validation split consisting of videos from the official train set) if split == 'test' or split == 'val': self.base_path = os.path.join(self.env_settings.got10k_path, split) else: self.base_path = os.path.join(self.env_settings.got10k_path, 'train') self.sequence_list = self._get_sequence_list(split) self.split = split def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) frames_path = '{}/{}'.format(self.base_path, sequence_name) frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")] frame_list.sort(key=lambda f: int(f[:-4])) frames_list = [os.path.join(frames_path, frame) for frame in frame_list] return Sequence(sequence_name, frames_list, 'got10k', ground_truth_rect.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self, split): with open('{}/list.txt'.format(self.base_path)) as f: sequence_list = f.read().splitlines() if split == 'ltrval': with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f: seq_ids = f.read().splitlines() sequence_list = [sequence_list[int(x)] for x in seq_ids] return sequence_list ================================================ FILE: lib/test/evaluation/itbdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text import os class ITBDataset(BaseDataset): """ NUS-PRO dataset """ def __init__(self): super().__init__() self.base_path = self.env_settings.itb_path self.sequence_info_list = self._get_sequence_info_list(self.base_path) def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame + init_omit, end_frame + 1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) # NOTE: NUS has some weird annos which panda cannot handle ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:, :], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def get_fileNames(self, rootdir): fs = [] fs_all = [] for root, dirs, files in os.walk(rootdir, topdown=True): files.sort() files.sort(key=len) if files is not None: for name in files: _, ending = os.path.splitext(name) if ending == ".jpg": _, root_ = os.path.split(root) fs.append(os.path.join(root_, name)) fs_all.append(os.path.join(root, name)) return fs_all, fs def _get_sequence_info_list(self, base_path): sequence_info_list = [] for scene in os.listdir(base_path): if '.' in scene: continue videos = os.listdir(os.path.join(base_path, scene)) for video in videos: _, fs = self.get_fileNames(os.path.join(base_path, scene, video)) video_tmp = {"name": video, "path": scene + '/' + video, "startFrame": 1, "endFrame": len(fs), "nz": len(fs[0].split('/')[-1].split('.')[0]), "ext": "jpg", "anno_path": scene + '/' + video + "/groundtruth.txt", "object_class": "unknown"} sequence_info_list.append(video_tmp) return sequence_info_list # sequence_info_list_50 # ================================================ FILE: lib/test/evaluation/lasot_lmdbdataset.py ================================================ from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.utils.lmdb_utils import * '''2021.1.27 LaSOT dataset using lmdb data''' class LaSOTlmdbDataset(BaseDataset): """ LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper) Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.lasot_lmdb_path self.sequence_list = self._get_sequence_list() self.clean_list = self.clean_seq_list() def clean_seq_list(self): clean_lst = [] for i in range(len(self.sequence_list)): cls, _ = self.sequence_list[i].split('-') clean_lst.append(cls) return clean_lst def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): class_name = sequence_name.split('-')[0] anno_path = str('{}/{}/groundtruth.txt'.format(class_name, sequence_name)) # decode the groundtruth gt_str_list = decode_str(self.base_path, anno_path).split('\n')[:-1] # the last line is empty gt_list = [list(map(float, line.split(','))) for line in gt_str_list] ground_truth_rect = np.array(gt_list).astype(np.float64) # decode occlusion file occlusion_label_path = str('{}/{}/full_occlusion.txt'.format(class_name, sequence_name)) occ_list = list(map(int, decode_str(self.base_path, occlusion_label_path).split(','))) full_occlusion = np.array(occ_list).astype(np.float64) # decode out of view file out_of_view_label_path = str('{}/{}/out_of_view.txt'.format(class_name, sequence_name)) out_of_view_list = list(map(int, decode_str(self.base_path, out_of_view_label_path).split(','))) out_of_view = np.array(out_of_view_list).astype(np.float64) target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0) frames_path = '{}/{}/img'.format(class_name, sequence_name) frames_list = [[self.base_path, '{}/{:08d}.jpg'.format(frames_path, frame_number)] for frame_number in range(1, ground_truth_rect.shape[0] + 1)] target_class = class_name return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4), object_class=target_class, target_visible=target_visible) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list = ['airplane-1', 'airplane-9', 'airplane-13', 'airplane-15', 'basketball-1', 'basketball-6', 'basketball-7', 'basketball-11', 'bear-2', 'bear-4', 'bear-6', 'bear-17', 'bicycle-2', 'bicycle-7', 'bicycle-9', 'bicycle-18', 'bird-2', 'bird-3', 'bird-15', 'bird-17', 'boat-3', 'boat-4', 'boat-12', 'boat-17', 'book-3', 'book-10', 'book-11', 'book-19', 'bottle-1', 'bottle-12', 'bottle-14', 'bottle-18', 'bus-2', 'bus-5', 'bus-17', 'bus-19', 'car-2', 'car-6', 'car-9', 'car-17', 'cat-1', 'cat-3', 'cat-18', 'cat-20', 'cattle-2', 'cattle-7', 'cattle-12', 'cattle-13', 'spider-14', 'spider-16', 'spider-18', 'spider-20', 'coin-3', 'coin-6', 'coin-7', 'coin-18', 'crab-3', 'crab-6', 'crab-12', 'crab-18', 'surfboard-12', 'surfboard-4', 'surfboard-5', 'surfboard-8', 'cup-1', 'cup-4', 'cup-7', 'cup-17', 'deer-4', 'deer-8', 'deer-10', 'deer-14', 'dog-1', 'dog-7', 'dog-15', 'dog-19', 'guitar-3', 'guitar-8', 'guitar-10', 'guitar-16', 'person-1', 'person-5', 'person-10', 'person-12', 'pig-2', 'pig-10', 'pig-13', 'pig-18', 'rubicCube-1', 'rubicCube-6', 'rubicCube-14', 'rubicCube-19', 'swing-10', 'swing-14', 'swing-17', 'swing-20', 'drone-13', 'drone-15', 'drone-2', 'drone-7', 'pool-12', 'pool-15', 'pool-3', 'pool-7', 'rabbit-10', 'rabbit-13', 'rabbit-17', 'rabbit-19', 'racing-10', 'racing-15', 'racing-16', 'racing-20', 'robot-1', 'robot-19', 'robot-5', 'robot-8', 'sepia-13', 'sepia-16', 'sepia-6', 'sepia-8', 'sheep-3', 'sheep-5', 'sheep-7', 'sheep-9', 'skateboard-16', 'skateboard-19', 'skateboard-3', 'skateboard-8', 'tank-14', 'tank-16', 'tank-6', 'tank-9', 'tiger-12', 'tiger-18', 'tiger-4', 'tiger-6', 'train-1', 'train-11', 'train-20', 'train-7', 'truck-16', 'truck-3', 'truck-6', 'truck-7', 'turtle-16', 'turtle-5', 'turtle-8', 'turtle-9', 'umbrella-17', 'umbrella-19', 'umbrella-2', 'umbrella-9', 'yoyo-15', 'yoyo-17', 'yoyo-19', 'yoyo-7', 'zebra-10', 'zebra-14', 'zebra-16', 'zebra-17', 'elephant-1', 'elephant-12', 'elephant-16', 'elephant-18', 'goldfish-3', 'goldfish-7', 'goldfish-8', 'goldfish-10', 'hat-1', 'hat-2', 'hat-5', 'hat-18', 'kite-4', 'kite-6', 'kite-10', 'kite-15', 'motorcycle-1', 'motorcycle-3', 'motorcycle-9', 'motorcycle-18', 'mouse-1', 'mouse-8', 'mouse-9', 'mouse-17', 'flag-3', 'flag-9', 'flag-5', 'flag-2', 'frog-3', 'frog-4', 'frog-20', 'frog-9', 'gametarget-1', 'gametarget-2', 'gametarget-7', 'gametarget-13', 'hand-2', 'hand-3', 'hand-9', 'hand-16', 'helmet-5', 'helmet-11', 'helmet-19', 'helmet-13', 'licenseplate-6', 'licenseplate-12', 'licenseplate-13', 'licenseplate-15', 'electricfan-1', 'electricfan-10', 'electricfan-18', 'electricfan-20', 'chameleon-3', 'chameleon-6', 'chameleon-11', 'chameleon-20', 'crocodile-3', 'crocodile-4', 'crocodile-10', 'crocodile-14', 'gecko-1', 'gecko-5', 'gecko-16', 'gecko-19', 'fox-2', 'fox-3', 'fox-5', 'fox-20', 'giraffe-2', 'giraffe-10', 'giraffe-13', 'giraffe-15', 'gorilla-4', 'gorilla-6', 'gorilla-9', 'gorilla-13', 'hippo-1', 'hippo-7', 'hippo-9', 'hippo-20', 'horse-1', 'horse-4', 'horse-12', 'horse-15', 'kangaroo-2', 'kangaroo-5', 'kangaroo-11', 'kangaroo-14', 'leopard-1', 'leopard-7', 'leopard-16', 'leopard-20', 'lion-1', 'lion-5', 'lion-12', 'lion-20', 'lizard-1', 'lizard-3', 'lizard-6', 'lizard-13', 'microphone-2', 'microphone-6', 'microphone-14', 'microphone-16', 'monkey-3', 'monkey-4', 'monkey-9', 'monkey-17', 'shark-2', 'shark-3', 'shark-5', 'shark-6', 'squirrel-8', 'squirrel-11', 'squirrel-13', 'squirrel-19', 'volleyball-1', 'volleyball-13', 'volleyball-18', 'volleyball-19'] return sequence_list ================================================ FILE: lib/test/evaluation/lasotdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text class LaSOTDataset(BaseDataset): """ LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper) Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.lasot_path self.sequence_list = self._get_sequence_list() self.clean_list = self.clean_seq_list() def clean_seq_list(self): clean_lst = [] for i in range(len(self.sequence_list)): cls, _ = self.sequence_list[i].split('-') clean_lst.append(cls) return clean_lst def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): class_name = sequence_name.split('-')[0] anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name) # NOTE: pandas backed seems super super slow for loading occlusion/oov masks full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy') out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name) out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy') target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0) frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name) frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)] target_class = class_name return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4), object_class=target_class, target_visible=target_visible) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list = ['airplane-1', 'airplane-9', 'airplane-13', 'airplane-15', 'basketball-1', 'basketball-6', 'basketball-7', 'basketball-11', 'bear-2', 'bear-4', 'bear-6', 'bear-17', 'bicycle-2', 'bicycle-7', 'bicycle-9', 'bicycle-18', 'bird-2', 'bird-3', 'bird-15', 'bird-17', 'boat-3', 'boat-4', 'boat-12', 'boat-17', 'book-3', 'book-10', 'book-11', 'book-19', 'bottle-1', 'bottle-12', 'bottle-14', 'bottle-18', 'bus-2', 'bus-5', 'bus-17', 'bus-19', 'car-2', 'car-6', 'car-9', 'car-17', 'cat-1', 'cat-3', 'cat-18', 'cat-20', 'cattle-2', 'cattle-7', 'cattle-12', 'cattle-13', 'spider-14', 'spider-16', 'spider-18', 'spider-20', 'coin-3', 'coin-6', 'coin-7', 'coin-18', 'crab-3', 'crab-6', 'crab-12', 'crab-18', 'surfboard-12', 'surfboard-4', 'surfboard-5', 'surfboard-8', 'cup-1', 'cup-4', 'cup-7', 'cup-17', 'deer-4', 'deer-8', 'deer-10', 'deer-14', 'dog-1', 'dog-7', 'dog-15', 'dog-19', 'guitar-3', 'guitar-8', 'guitar-10', 'guitar-16', 'person-1', 'person-5', 'person-10', 'person-12', 'pig-2', 'pig-10', 'pig-13', 'pig-18', 'rubicCube-1', 'rubicCube-6', 'rubicCube-14', 'rubicCube-19', 'swing-10', 'swing-14', 'swing-17', 'swing-20', 'drone-13', 'drone-15', 'drone-2', 'drone-7', 'pool-12', 'pool-15', 'pool-3', 'pool-7', 'rabbit-10', 'rabbit-13', 'rabbit-17', 'rabbit-19', 'racing-10', 'racing-15', 'racing-16', 'racing-20', 'robot-1', 'robot-19', 'robot-5', 'robot-8', 'sepia-13', 'sepia-16', 'sepia-6', 'sepia-8', 'sheep-3', 'sheep-5', 'sheep-7', 'sheep-9', 'skateboard-16', 'skateboard-19', 'skateboard-3', 'skateboard-8', 'tank-14', 'tank-16', 'tank-6', 'tank-9', 'tiger-12', 'tiger-18', 'tiger-4', 'tiger-6', 'train-1', 'train-11', 'train-20', 'train-7', 'truck-16', 'truck-3', 'truck-6', 'truck-7', 'turtle-16', 'turtle-5', 'turtle-8', 'turtle-9', 'umbrella-17', 'umbrella-19', 'umbrella-2', 'umbrella-9', 'yoyo-15', 'yoyo-17', 'yoyo-19', 'yoyo-7', 'zebra-10', 'zebra-14', 'zebra-16', 'zebra-17', 'elephant-1', 'elephant-12', 'elephant-16', 'elephant-18', 'goldfish-3', 'goldfish-7', 'goldfish-8', 'goldfish-10', 'hat-1', 'hat-2', 'hat-5', 'hat-18', 'kite-4', 'kite-6', 'kite-10', 'kite-15', 'motorcycle-1', 'motorcycle-3', 'motorcycle-9', 'motorcycle-18', 'mouse-1', 'mouse-8', 'mouse-9', 'mouse-17', 'flag-3', 'flag-9', 'flag-5', 'flag-2', 'frog-3', 'frog-4', 'frog-20', 'frog-9', 'gametarget-1', 'gametarget-2', 'gametarget-7', 'gametarget-13', 'hand-2', 'hand-3', 'hand-9', 'hand-16', 'helmet-5', 'helmet-11', 'helmet-19', 'helmet-13', 'licenseplate-6', 'licenseplate-12', 'licenseplate-13', 'licenseplate-15', 'electricfan-1', 'electricfan-10', 'electricfan-18', 'electricfan-20', 'chameleon-3', 'chameleon-6', 'chameleon-11', 'chameleon-20', 'crocodile-3', 'crocodile-4', 'crocodile-10', 'crocodile-14', 'gecko-1', 'gecko-5', 'gecko-16', 'gecko-19', 'fox-2', 'fox-3', 'fox-5', 'fox-20', 'giraffe-2', 'giraffe-10', 'giraffe-13', 'giraffe-15', 'gorilla-4', 'gorilla-6', 'gorilla-9', 'gorilla-13', 'hippo-1', 'hippo-7', 'hippo-9', 'hippo-20', 'horse-1', 'horse-4', 'horse-12', 'horse-15', 'kangaroo-2', 'kangaroo-5', 'kangaroo-11', 'kangaroo-14', 'leopard-1', 'leopard-7', 'leopard-16', 'leopard-20', 'lion-1', 'lion-5', 'lion-12', 'lion-20', 'lizard-1', 'lizard-3', 'lizard-6', 'lizard-13', 'microphone-2', 'microphone-6', 'microphone-14', 'microphone-16', 'monkey-3', 'monkey-4', 'monkey-9', 'monkey-17', 'shark-2', 'shark-3', 'shark-5', 'shark-6', 'squirrel-8', 'squirrel-11', 'squirrel-13', 'squirrel-19', 'volleyball-1', 'volleyball-13', 'volleyball-18', 'volleyball-19'] return sequence_list ================================================ FILE: lib/test/evaluation/lasotextensionsubsetdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text class LaSOTExtensionSubsetDataset(BaseDataset): """ LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper) Publication: LaSOT: A High-quality Large-scale Single Object Tracking Benchmark Heng Fan, Hexin Bai, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Harshit, Mingzhen Huang, Juehuan Liu, Yong Xu, Chunyuan Liao, Lin Yuan, Haibin Ling IJCV, 2020 https://arxiv.org/pdf/2009.03465.pdf Download the dataset from http://vision.cs.stonybrook.edu/~lasot/download.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.lasot_extension_subset_path self.sequence_list = self._get_sequence_list() self.clean_list = self.clean_seq_list() def clean_seq_list(self): clean_lst = [] for i in range(len(self.sequence_list)): cls, _ = self.sequence_list[i].split('-') clean_lst.append(cls) return clean_lst def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): class_name = sequence_name.split('-')[0] anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name) # NOTE: pandas backed seems super super slow for loading occlusion/oov masks full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy') out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name) out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy') target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0) frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name) frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)] target_class = class_name return Sequence(sequence_name, frames_list, 'lasot_extension_subset', ground_truth_rect.reshape(-1, 4), object_class=target_class, target_visible=target_visible) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list = ['atv-1', 'atv-2', 'atv-3', 'atv-4', 'atv-5', 'atv-6', 'atv-7', 'atv-8', 'atv-9', 'atv-10', 'badminton-1', 'badminton-2', 'badminton-3', 'badminton-4', 'badminton-5', 'badminton-6', 'badminton-7', 'badminton-8', 'badminton-9', 'badminton-10', 'cosplay-1', 'cosplay-10', 'cosplay-2', 'cosplay-3', 'cosplay-4', 'cosplay-5', 'cosplay-6', 'cosplay-7', 'cosplay-8', 'cosplay-9', 'dancingshoe-1', 'dancingshoe-2', 'dancingshoe-3', 'dancingshoe-4', 'dancingshoe-5', 'dancingshoe-6', 'dancingshoe-7', 'dancingshoe-8', 'dancingshoe-9', 'dancingshoe-10', 'footbag-1', 'footbag-2', 'footbag-3', 'footbag-4', 'footbag-5', 'footbag-6', 'footbag-7', 'footbag-8', 'footbag-9', 'footbag-10', 'frisbee-1', 'frisbee-2', 'frisbee-3', 'frisbee-4', 'frisbee-5', 'frisbee-6', 'frisbee-7', 'frisbee-8', 'frisbee-9', 'frisbee-10', 'jianzi-1', 'jianzi-2', 'jianzi-3', 'jianzi-4', 'jianzi-5', 'jianzi-6', 'jianzi-7', 'jianzi-8', 'jianzi-9', 'jianzi-10', 'lantern-1', 'lantern-2', 'lantern-3', 'lantern-4', 'lantern-5', 'lantern-6', 'lantern-7', 'lantern-8', 'lantern-9', 'lantern-10', 'misc-1', 'misc-2', 'misc-3', 'misc-4', 'misc-5', 'misc-6', 'misc-7', 'misc-8', 'misc-9', 'misc-10', 'opossum-1', 'opossum-2', 'opossum-3', 'opossum-4', 'opossum-5', 'opossum-6', 'opossum-7', 'opossum-8', 'opossum-9', 'opossum-10', 'paddle-1', 'paddle-2', 'paddle-3', 'paddle-4', 'paddle-5', 'paddle-6', 'paddle-7', 'paddle-8', 'paddle-9', 'paddle-10', 'raccoon-1', 'raccoon-2', 'raccoon-3', 'raccoon-4', 'raccoon-5', 'raccoon-6', 'raccoon-7', 'raccoon-8', 'raccoon-9', 'raccoon-10', 'rhino-1', 'rhino-2', 'rhino-3', 'rhino-4', 'rhino-5', 'rhino-6', 'rhino-7', 'rhino-8', 'rhino-9', 'rhino-10', 'skatingshoe-1', 'skatingshoe-2', 'skatingshoe-3', 'skatingshoe-4', 'skatingshoe-5', 'skatingshoe-6', 'skatingshoe-7', 'skatingshoe-8', 'skatingshoe-9', 'skatingshoe-10', 'wingsuit-1', 'wingsuit-2', 'wingsuit-3', 'wingsuit-4', 'wingsuit-5', 'wingsuit-6', 'wingsuit-7', 'wingsuit-8', 'wingsuit-9', 'wingsuit-10'] return sequence_list ================================================ FILE: lib/test/evaluation/local.py ================================================ from lib.test.evaluation.environment import EnvSettings def local_env_settings(): settings = EnvSettings() # Set your local paths here. settings.davis_dir = '' settings.got10k_lmdb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/got10k_lmdb' settings.got10k_path = '/home/baiyifan/GOT-10k/' settings.got_packed_results_path = '' settings.got_reports_path = '' settings.itb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/itb' settings.lasot_extension_subset_path_path = '/home/wangguijie/code/Siamese-ResNet-track/data/lasot_extension_subset' settings.lasot_lmdb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/lasot_lmdb' settings.lasot_path = '/home/wangguijie/code/Siamese-ResNet-track/data/lasot' settings.network_path = '/data1/baiyifan/artrackv2_256_got/' # Where tracking networks are stored. settings.nfs_path = '/home/wangguijie/code/Siamese-ResNet-track/data/nfs' settings.otb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/otb' settings.prj_dir = '/home/baiyifan/code/AR2_github/ARTrack-main/' settings.result_plot_path = '/data1/baiyifan/artrackv2_256_got/' settings.results_path = '/data1/baiyifan/artrackv2_256_got/' # Where to store tracking results settings.save_dir = '/data1/baiyifan/artrackv2_256_got/' settings.segmentation_path = '/home/wangguijie/code/Siamese-ResNet-track/output/test/segmentation_results' settings.tc128_path = '/home/wangguijie/code/Siamese-ResNet-track/data/TC128' settings.tn_packed_results_path = '' settings.tnl2k_path = '/home/wangguijie/code/Siamese-ResNet-track/data/tnl2k' settings.tpl_path = '' settings.trackingnet_path = '/home/wangguijie/code/Siamese-ResNet-track/data/trackingnet' settings.uav_path = '/home/wangguijie/code/Siamese-ResNet-track/data/uav' settings.vot18_path = '/home/wangguijie/code/Siamese-ResNet-track/data/vot2018' settings.vot22_path = '/home/wangguijie/code/Siamese-ResNet-track/data/vot2022' settings.vot_path = '/home/wangguijie/code/Siamese-ResNet-track/data/VOT2019' settings.youtubevos_dir = '' return settings ================================================ FILE: lib/test/evaluation/nfsdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text class NFSDataset(BaseDataset): """ NFS dataset. Publication: Need for Speed: A Benchmark for Higher Frame Rate Object Tracking H. Kiani Galoogahi, A. Fagg, C. Huang, D. Ramanan, and S.Lucey ICCV, 2017 http://openaccess.thecvf.com/content_ICCV_2017/papers/Galoogahi_Need_for_Speed_ICCV_2017_paper.pdf Download the dataset from http://ci2cv.net/nfs/index.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.nfs_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter='\t', dtype=np.float64) return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "nfs_Gymnastics", "path": "sequences/Gymnastics", "startFrame": 1, "endFrame": 368, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Gymnastics.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_MachLoop_jet", "path": "sequences/MachLoop_jet", "startFrame": 1, "endFrame": 99, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_MachLoop_jet.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_Skiing_red", "path": "sequences/Skiing_red", "startFrame": 1, "endFrame": 69, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Skiing_red.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_Skydiving", "path": "sequences/Skydiving", "startFrame": 1, "endFrame": 196, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_Skydiving.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_airboard_1", "path": "sequences/airboard_1", "startFrame": 1, "endFrame": 425, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airboard_1.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_airplane_landing", "path": "sequences/airplane_landing", "startFrame": 1, "endFrame": 81, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airplane_landing.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_airtable_3", "path": "sequences/airtable_3", "startFrame": 1, "endFrame": 482, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_airtable_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_1", "path": "sequences/basketball_1", "startFrame": 1, "endFrame": 282, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_1.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_2", "path": "sequences/basketball_2", "startFrame": 1, "endFrame": 102, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_3", "path": "sequences/basketball_3", "startFrame": 1, "endFrame": 421, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_6", "path": "sequences/basketball_6", "startFrame": 1, "endFrame": 224, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_basketball_7", "path": "sequences/basketball_7", "startFrame": 1, "endFrame": 240, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_7.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_basketball_player", "path": "sequences/basketball_player", "startFrame": 1, "endFrame": 369, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_player.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_basketball_player_2", "path": "sequences/basketball_player_2", "startFrame": 1, "endFrame": 437, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_basketball_player_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_beach_flipback_person", "path": "sequences/beach_flipback_person", "startFrame": 1, "endFrame": 61, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_beach_flipback_person.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_bee", "path": "sequences/bee", "startFrame": 1, "endFrame": 45, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bee.txt", "object_class": "insect", 'occlusion': False}, {"name": "nfs_biker_acrobat", "path": "sequences/biker_acrobat", "startFrame": 1, "endFrame": 128, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_acrobat.txt", "object_class": "bicycle", 'occlusion': False}, {"name": "nfs_biker_all_1", "path": "sequences/biker_all_1", "startFrame": 1, "endFrame": 113, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_all_1.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_biker_head_2", "path": "sequences/biker_head_2", "startFrame": 1, "endFrame": 132, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_head_2.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_biker_head_3", "path": "sequences/biker_head_3", "startFrame": 1, "endFrame": 254, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_head_3.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_biker_upper_body", "path": "sequences/biker_upper_body", "startFrame": 1, "endFrame": 194, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_upper_body.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_biker_whole_body", "path": "sequences/biker_whole_body", "startFrame": 1, "endFrame": 572, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_biker_whole_body.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_billiard_2", "path": "sequences/billiard_2", "startFrame": 1, "endFrame": 604, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_3", "path": "sequences/billiard_3", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_6", "path": "sequences/billiard_6", "startFrame": 1, "endFrame": 771, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_7", "path": "sequences/billiard_7", "startFrame": 1, "endFrame": 724, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_7.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_billiard_8", "path": "sequences/billiard_8", "startFrame": 1, "endFrame": 778, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_billiard_8.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_bird_2", "path": "sequences/bird_2", "startFrame": 1, "endFrame": 476, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bird_2.txt", "object_class": "bird", 'occlusion': False}, {"name": "nfs_book", "path": "sequences/book", "startFrame": 1, "endFrame": 288, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_book.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_bottle", "path": "sequences/bottle", "startFrame": 1, "endFrame": 2103, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bottle.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_bowling_1", "path": "sequences/bowling_1", "startFrame": 1, "endFrame": 303, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_1.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_2", "path": "sequences/bowling_2", "startFrame": 1, "endFrame": 710, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_2.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_3", "path": "sequences/bowling_3", "startFrame": 1, "endFrame": 271, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_3.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bowling_6", "path": "sequences/bowling_6", "startFrame": 1, "endFrame": 260, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_6.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_bowling_ball", "path": "sequences/bowling_ball", "startFrame": 1, "endFrame": 275, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bowling_ball.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_bunny", "path": "sequences/bunny", "startFrame": 1, "endFrame": 705, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_bunny.txt", "object_class": "mammal", 'occlusion': False}, {"name": "nfs_car", "path": "sequences/car", "startFrame": 1, "endFrame": 2020, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car.txt", "object_class": "car", 'occlusion': True}, {"name": "nfs_car_camaro", "path": "sequences/car_camaro", "startFrame": 1, "endFrame": 36, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_camaro.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_drifting", "path": "sequences/car_drifting", "startFrame": 1, "endFrame": 173, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_drifting.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_jumping", "path": "sequences/car_jumping", "startFrame": 1, "endFrame": 22, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_jumping.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_rc_rolling", "path": "sequences/car_rc_rolling", "startFrame": 1, "endFrame": 62, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_rc_rolling.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_rc_rotating", "path": "sequences/car_rc_rotating", "startFrame": 1, "endFrame": 80, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_rc_rotating.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_side", "path": "sequences/car_side", "startFrame": 1, "endFrame": 108, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_side.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_car_white", "path": "sequences/car_white", "startFrame": 1, "endFrame": 2063, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_car_white.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_cheetah", "path": "sequences/cheetah", "startFrame": 1, "endFrame": 167, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cheetah.txt", "object_class": "mammal", 'occlusion': True}, {"name": "nfs_cup", "path": "sequences/cup", "startFrame": 1, "endFrame": 1281, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cup.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_cup_2", "path": "sequences/cup_2", "startFrame": 1, "endFrame": 182, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_cup_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_dog", "path": "sequences/dog", "startFrame": 1, "endFrame": 1030, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dog_1", "path": "sequences/dog_1", "startFrame": 1, "endFrame": 168, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_1.txt", "object_class": "dog", 'occlusion': False}, {"name": "nfs_dog_2", "path": "sequences/dog_2", "startFrame": 1, "endFrame": 594, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_2.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dog_3", "path": "sequences/dog_3", "startFrame": 1, "endFrame": 200, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dog_3.txt", "object_class": "dog", 'occlusion': False}, {"name": "nfs_dogs", "path": "sequences/dogs", "startFrame": 1, "endFrame": 198, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dogs.txt", "object_class": "dog", 'occlusion': True}, {"name": "nfs_dollar", "path": "sequences/dollar", "startFrame": 1, "endFrame": 1426, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_dollar.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_drone", "path": "sequences/drone", "startFrame": 1, "endFrame": 70, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_drone.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_ducks_lake", "path": "sequences/ducks_lake", "startFrame": 1, "endFrame": 107, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_ducks_lake.txt", "object_class": "bird", 'occlusion': False}, {"name": "nfs_exit", "path": "sequences/exit", "startFrame": 1, "endFrame": 359, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_exit.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_first", "path": "sequences/first", "startFrame": 1, "endFrame": 435, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_first.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_flower", "path": "sequences/flower", "startFrame": 1, "endFrame": 448, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_flower.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_footbal_skill", "path": "sequences/footbal_skill", "startFrame": 1, "endFrame": 131, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_footbal_skill.txt", "object_class": "ball", 'occlusion': True}, {"name": "nfs_helicopter", "path": "sequences/helicopter", "startFrame": 1, "endFrame": 310, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_helicopter.txt", "object_class": "aircraft", 'occlusion': False}, {"name": "nfs_horse_jumping", "path": "sequences/horse_jumping", "startFrame": 1, "endFrame": 117, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_horse_jumping.txt", "object_class": "horse", 'occlusion': True}, {"name": "nfs_horse_running", "path": "sequences/horse_running", "startFrame": 1, "endFrame": 139, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_horse_running.txt", "object_class": "horse", 'occlusion': False}, {"name": "nfs_iceskating_6", "path": "sequences/iceskating_6", "startFrame": 1, "endFrame": 603, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_iceskating_6.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_jellyfish_5", "path": "sequences/jellyfish_5", "startFrame": 1, "endFrame": 746, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_jellyfish_5.txt", "object_class": "invertebrate", 'occlusion': False}, {"name": "nfs_kid_swing", "path": "sequences/kid_swing", "startFrame": 1, "endFrame": 169, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_kid_swing.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_motorcross", "path": "sequences/motorcross", "startFrame": 1, "endFrame": 39, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_motorcross.txt", "object_class": "vehicle", 'occlusion': True}, {"name": "nfs_motorcross_kawasaki", "path": "sequences/motorcross_kawasaki", "startFrame": 1, "endFrame": 65, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_motorcross_kawasaki.txt", "object_class": "vehicle", 'occlusion': False}, {"name": "nfs_parkour", "path": "sequences/parkour", "startFrame": 1, "endFrame": 58, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_parkour.txt", "object_class": "person head", 'occlusion': False}, {"name": "nfs_person_scooter", "path": "sequences/person_scooter", "startFrame": 1, "endFrame": 413, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_person_scooter.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_pingpong_2", "path": "sequences/pingpong_2", "startFrame": 1, "endFrame": 1277, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_pingpong_7", "path": "sequences/pingpong_7", "startFrame": 1, "endFrame": 1290, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_7.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_pingpong_8", "path": "sequences/pingpong_8", "startFrame": 1, "endFrame": 296, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_pingpong_8.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_purse", "path": "sequences/purse", "startFrame": 1, "endFrame": 968, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_purse.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_rubber", "path": "sequences/rubber", "startFrame": 1, "endFrame": 1328, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_rubber.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_running", "path": "sequences/running", "startFrame": 1, "endFrame": 677, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_running_100_m", "path": "sequences/running_100_m", "startFrame": 1, "endFrame": 313, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_100_m.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_running_100_m_2", "path": "sequences/running_100_m_2", "startFrame": 1, "endFrame": 337, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_100_m_2.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_running_2", "path": "sequences/running_2", "startFrame": 1, "endFrame": 363, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_running_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_shuffleboard_1", "path": "sequences/shuffleboard_1", "startFrame": 1, "endFrame": 42, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_1.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_2", "path": "sequences/shuffleboard_2", "startFrame": 1, "endFrame": 41, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_4", "path": "sequences/shuffleboard_4", "startFrame": 1, "endFrame": 62, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_4.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_5", "path": "sequences/shuffleboard_5", "startFrame": 1, "endFrame": 32, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_5.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffleboard_6", "path": "sequences/shuffleboard_6", "startFrame": 1, "endFrame": 52, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffleboard_6.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_2", "path": "sequences/shuffletable_2", "startFrame": 1, "endFrame": 372, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_2.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_3", "path": "sequences/shuffletable_3", "startFrame": 1, "endFrame": 368, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_3.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_shuffletable_4", "path": "sequences/shuffletable_4", "startFrame": 1, "endFrame": 101, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_shuffletable_4.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_ski_long", "path": "sequences/ski_long", "startFrame": 1, "endFrame": 274, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_ski_long.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_soccer_ball", "path": "sequences/soccer_ball", "startFrame": 1, "endFrame": 163, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_ball_2", "path": "sequences/soccer_ball_2", "startFrame": 1, "endFrame": 1934, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball_2.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_ball_3", "path": "sequences/soccer_ball_3", "startFrame": 1, "endFrame": 1381, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_ball_3.txt", "object_class": "ball", 'occlusion': False}, {"name": "nfs_soccer_player_2", "path": "sequences/soccer_player_2", "startFrame": 1, "endFrame": 475, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_player_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_soccer_player_3", "path": "sequences/soccer_player_3", "startFrame": 1, "endFrame": 319, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_soccer_player_3.txt", "object_class": "person", 'occlusion': True}, {"name": "nfs_stop_sign", "path": "sequences/stop_sign", "startFrame": 1, "endFrame": 302, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_stop_sign.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_suv", "path": "sequences/suv", "startFrame": 1, "endFrame": 2584, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_suv.txt", "object_class": "car", 'occlusion': False}, {"name": "nfs_tiger", "path": "sequences/tiger", "startFrame": 1, "endFrame": 1556, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_tiger.txt", "object_class": "mammal", 'occlusion': False}, {"name": "nfs_walking", "path": "sequences/walking", "startFrame": 1, "endFrame": 555, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_walking.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_walking_3", "path": "sequences/walking_3", "startFrame": 1, "endFrame": 1427, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_walking_3.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_water_ski_2", "path": "sequences/water_ski_2", "startFrame": 1, "endFrame": 47, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_water_ski_2.txt", "object_class": "person", 'occlusion': False}, {"name": "nfs_yoyo", "path": "sequences/yoyo", "startFrame": 1, "endFrame": 67, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_yoyo.txt", "object_class": "other", 'occlusion': False}, {"name": "nfs_zebra_fish", "path": "sequences/zebra_fish", "startFrame": 1, "endFrame": 671, "nz": 5, "ext": "jpg", "anno_path": "anno/nfs_zebra_fish.txt", "object_class": "fish", 'occlusion': False}, ] return sequence_info_list ================================================ FILE: lib/test/evaluation/otbdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text class OTBDataset(BaseDataset): """ OTB-2015 dataset Publication: Object Tracking Benchmark Wu, Yi, Jongwoo Lim, and Ming-hsuan Yan TPAMI, 2015 http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html """ def __init__(self): super().__init__() self.base_path = self.env_settings.otb_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) # NOTE: OTB has some weird annos which panda cannot handle ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "Basketball", "path": "Basketball/img", "startFrame": 1, "endFrame": 725, "nz": 4, "ext": "jpg", "anno_path": "Basketball/groundtruth_rect.txt", "object_class": "person"}, {"name": "Biker", "path": "Biker/img", "startFrame": 1, "endFrame": 142, "nz": 4, "ext": "jpg", "anno_path": "Biker/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Bird1", "path": "Bird1/img", "startFrame": 1, "endFrame": 408, "nz": 4, "ext": "jpg", "anno_path": "Bird1/groundtruth_rect.txt", "object_class": "bird"}, {"name": "Bird2", "path": "Bird2/img", "startFrame": 1, "endFrame": 99, "nz": 4, "ext": "jpg", "anno_path": "Bird2/groundtruth_rect.txt", "object_class": "bird"}, {"name": "BlurBody", "path": "BlurBody/img", "startFrame": 1, "endFrame": 334, "nz": 4, "ext": "jpg", "anno_path": "BlurBody/groundtruth_rect.txt", "object_class": "person"}, {"name": "BlurCar1", "path": "BlurCar1/img", "startFrame": 247, "endFrame": 988, "nz": 4, "ext": "jpg", "anno_path": "BlurCar1/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar2", "path": "BlurCar2/img", "startFrame": 1, "endFrame": 585, "nz": 4, "ext": "jpg", "anno_path": "BlurCar2/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar3", "path": "BlurCar3/img", "startFrame": 3, "endFrame": 359, "nz": 4, "ext": "jpg", "anno_path": "BlurCar3/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurCar4", "path": "BlurCar4/img", "startFrame": 18, "endFrame": 397, "nz": 4, "ext": "jpg", "anno_path": "BlurCar4/groundtruth_rect.txt", "object_class": "car"}, {"name": "BlurFace", "path": "BlurFace/img", "startFrame": 1, "endFrame": 493, "nz": 4, "ext": "jpg", "anno_path": "BlurFace/groundtruth_rect.txt", "object_class": "face"}, {"name": "BlurOwl", "path": "BlurOwl/img", "startFrame": 1, "endFrame": 631, "nz": 4, "ext": "jpg", "anno_path": "BlurOwl/groundtruth_rect.txt", "object_class": "other"}, {"name": "Board", "path": "Board/img", "startFrame": 1, "endFrame": 698, "nz": 5, "ext": "jpg", "anno_path": "Board/groundtruth_rect.txt", "object_class": "other"}, {"name": "Bolt", "path": "Bolt/img", "startFrame": 1, "endFrame": 350, "nz": 4, "ext": "jpg", "anno_path": "Bolt/groundtruth_rect.txt", "object_class": "person"}, {"name": "Bolt2", "path": "Bolt2/img", "startFrame": 1, "endFrame": 293, "nz": 4, "ext": "jpg", "anno_path": "Bolt2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Box", "path": "Box/img", "startFrame": 1, "endFrame": 1161, "nz": 4, "ext": "jpg", "anno_path": "Box/groundtruth_rect.txt", "object_class": "other"}, {"name": "Boy", "path": "Boy/img", "startFrame": 1, "endFrame": 602, "nz": 4, "ext": "jpg", "anno_path": "Boy/groundtruth_rect.txt", "object_class": "face"}, {"name": "Car1", "path": "Car1/img", "startFrame": 1, "endFrame": 1020, "nz": 4, "ext": "jpg", "anno_path": "Car1/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car2", "path": "Car2/img", "startFrame": 1, "endFrame": 913, "nz": 4, "ext": "jpg", "anno_path": "Car2/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car24", "path": "Car24/img", "startFrame": 1, "endFrame": 3059, "nz": 4, "ext": "jpg", "anno_path": "Car24/groundtruth_rect.txt", "object_class": "car"}, {"name": "Car4", "path": "Car4/img", "startFrame": 1, "endFrame": 659, "nz": 4, "ext": "jpg", "anno_path": "Car4/groundtruth_rect.txt", "object_class": "car"}, {"name": "CarDark", "path": "CarDark/img", "startFrame": 1, "endFrame": 393, "nz": 4, "ext": "jpg", "anno_path": "CarDark/groundtruth_rect.txt", "object_class": "car"}, {"name": "CarScale", "path": "CarScale/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "CarScale/groundtruth_rect.txt", "object_class": "car"}, {"name": "ClifBar", "path": "ClifBar/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "ClifBar/groundtruth_rect.txt", "object_class": "other"}, {"name": "Coke", "path": "Coke/img", "startFrame": 1, "endFrame": 291, "nz": 4, "ext": "jpg", "anno_path": "Coke/groundtruth_rect.txt", "object_class": "other"}, {"name": "Couple", "path": "Couple/img", "startFrame": 1, "endFrame": 140, "nz": 4, "ext": "jpg", "anno_path": "Couple/groundtruth_rect.txt", "object_class": "person"}, {"name": "Coupon", "path": "Coupon/img", "startFrame": 1, "endFrame": 327, "nz": 4, "ext": "jpg", "anno_path": "Coupon/groundtruth_rect.txt", "object_class": "other"}, {"name": "Crossing", "path": "Crossing/img", "startFrame": 1, "endFrame": 120, "nz": 4, "ext": "jpg", "anno_path": "Crossing/groundtruth_rect.txt", "object_class": "person"}, {"name": "Crowds", "path": "Crowds/img", "startFrame": 1, "endFrame": 347, "nz": 4, "ext": "jpg", "anno_path": "Crowds/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dancer", "path": "Dancer/img", "startFrame": 1, "endFrame": 225, "nz": 4, "ext": "jpg", "anno_path": "Dancer/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dancer2", "path": "Dancer2/img", "startFrame": 1, "endFrame": 150, "nz": 4, "ext": "jpg", "anno_path": "Dancer2/groundtruth_rect.txt", "object_class": "person"}, {"name": "David", "path": "David/img", "startFrame": 300, "endFrame": 770, "nz": 4, "ext": "jpg", "anno_path": "David/groundtruth_rect.txt", "object_class": "face"}, {"name": "David2", "path": "David2/img", "startFrame": 1, "endFrame": 537, "nz": 4, "ext": "jpg", "anno_path": "David2/groundtruth_rect.txt", "object_class": "face"}, {"name": "David3", "path": "David3/img", "startFrame": 1, "endFrame": 252, "nz": 4, "ext": "jpg", "anno_path": "David3/groundtruth_rect.txt", "object_class": "person"}, {"name": "Deer", "path": "Deer/img", "startFrame": 1, "endFrame": 71, "nz": 4, "ext": "jpg", "anno_path": "Deer/groundtruth_rect.txt", "object_class": "mammal"}, {"name": "Diving", "path": "Diving/img", "startFrame": 1, "endFrame": 215, "nz": 4, "ext": "jpg", "anno_path": "Diving/groundtruth_rect.txt", "object_class": "person"}, {"name": "Dog", "path": "Dog/img", "startFrame": 1, "endFrame": 127, "nz": 4, "ext": "jpg", "anno_path": "Dog/groundtruth_rect.txt", "object_class": "dog"}, {"name": "Dog1", "path": "Dog1/img", "startFrame": 1, "endFrame": 1350, "nz": 4, "ext": "jpg", "anno_path": "Dog1/groundtruth_rect.txt", "object_class": "dog"}, {"name": "Doll", "path": "Doll/img", "startFrame": 1, "endFrame": 3872, "nz": 4, "ext": "jpg", "anno_path": "Doll/groundtruth_rect.txt", "object_class": "other"}, {"name": "DragonBaby", "path": "DragonBaby/img", "startFrame": 1, "endFrame": 113, "nz": 4, "ext": "jpg", "anno_path": "DragonBaby/groundtruth_rect.txt", "object_class": "face"}, {"name": "Dudek", "path": "Dudek/img", "startFrame": 1, "endFrame": 1145, "nz": 4, "ext": "jpg", "anno_path": "Dudek/groundtruth_rect.txt", "object_class": "face"}, {"name": "FaceOcc1", "path": "FaceOcc1/img", "startFrame": 1, "endFrame": 892, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc1/groundtruth_rect.txt", "object_class": "face"}, {"name": "FaceOcc2", "path": "FaceOcc2/img", "startFrame": 1, "endFrame": 812, "nz": 4, "ext": "jpg", "anno_path": "FaceOcc2/groundtruth_rect.txt", "object_class": "face"}, {"name": "Fish", "path": "Fish/img", "startFrame": 1, "endFrame": 476, "nz": 4, "ext": "jpg", "anno_path": "Fish/groundtruth_rect.txt", "object_class": "other"}, {"name": "FleetFace", "path": "FleetFace/img", "startFrame": 1, "endFrame": 707, "nz": 4, "ext": "jpg", "anno_path": "FleetFace/groundtruth_rect.txt", "object_class": "face"}, {"name": "Football", "path": "Football/img", "startFrame": 1, "endFrame": 362, "nz": 4, "ext": "jpg", "anno_path": "Football/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Football1", "path": "Football1/img", "startFrame": 1, "endFrame": 74, "nz": 4, "ext": "jpg", "anno_path": "Football1/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman1", "path": "Freeman1/img", "startFrame": 1, "endFrame": 326, "nz": 4, "ext": "jpg", "anno_path": "Freeman1/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman3", "path": "Freeman3/img", "startFrame": 1, "endFrame": 460, "nz": 4, "ext": "jpg", "anno_path": "Freeman3/groundtruth_rect.txt", "object_class": "face"}, {"name": "Freeman4", "path": "Freeman4/img", "startFrame": 1, "endFrame": 283, "nz": 4, "ext": "jpg", "anno_path": "Freeman4/groundtruth_rect.txt", "object_class": "face"}, {"name": "Girl", "path": "Girl/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Girl/groundtruth_rect.txt", "object_class": "face"}, {"name": "Girl2", "path": "Girl2/img", "startFrame": 1, "endFrame": 1500, "nz": 4, "ext": "jpg", "anno_path": "Girl2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Gym", "path": "Gym/img", "startFrame": 1, "endFrame": 767, "nz": 4, "ext": "jpg", "anno_path": "Gym/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human2", "path": "Human2/img", "startFrame": 1, "endFrame": 1128, "nz": 4, "ext": "jpg", "anno_path": "Human2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human3", "path": "Human3/img", "startFrame": 1, "endFrame": 1698, "nz": 4, "ext": "jpg", "anno_path": "Human3/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human4_2", "path": "Human4/img", "startFrame": 1, "endFrame": 667, "nz": 4, "ext": "jpg", "anno_path": "Human4/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Human5", "path": "Human5/img", "startFrame": 1, "endFrame": 713, "nz": 4, "ext": "jpg", "anno_path": "Human5/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human6", "path": "Human6/img", "startFrame": 1, "endFrame": 792, "nz": 4, "ext": "jpg", "anno_path": "Human6/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human7", "path": "Human7/img", "startFrame": 1, "endFrame": 250, "nz": 4, "ext": "jpg", "anno_path": "Human7/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human8", "path": "Human8/img", "startFrame": 1, "endFrame": 128, "nz": 4, "ext": "jpg", "anno_path": "Human8/groundtruth_rect.txt", "object_class": "person"}, {"name": "Human9", "path": "Human9/img", "startFrame": 1, "endFrame": 305, "nz": 4, "ext": "jpg", "anno_path": "Human9/groundtruth_rect.txt", "object_class": "person"}, {"name": "Ironman", "path": "Ironman/img", "startFrame": 1, "endFrame": 166, "nz": 4, "ext": "jpg", "anno_path": "Ironman/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Jogging_1", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.1.txt", "object_class": "person"}, {"name": "Jogging_2", "path": "Jogging/img", "startFrame": 1, "endFrame": 307, "nz": 4, "ext": "jpg", "anno_path": "Jogging/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Jump", "path": "Jump/img", "startFrame": 1, "endFrame": 122, "nz": 4, "ext": "jpg", "anno_path": "Jump/groundtruth_rect.txt", "object_class": "person"}, {"name": "Jumping", "path": "Jumping/img", "startFrame": 1, "endFrame": 313, "nz": 4, "ext": "jpg", "anno_path": "Jumping/groundtruth_rect.txt", "object_class": "face"}, {"name": "KiteSurf", "path": "KiteSurf/img", "startFrame": 1, "endFrame": 84, "nz": 4, "ext": "jpg", "anno_path": "KiteSurf/groundtruth_rect.txt", "object_class": "face"}, {"name": "Lemming", "path": "Lemming/img", "startFrame": 1, "endFrame": 1336, "nz": 4, "ext": "jpg", "anno_path": "Lemming/groundtruth_rect.txt", "object_class": "other"}, {"name": "Liquor", "path": "Liquor/img", "startFrame": 1, "endFrame": 1741, "nz": 4, "ext": "jpg", "anno_path": "Liquor/groundtruth_rect.txt", "object_class": "other"}, {"name": "Man", "path": "Man/img", "startFrame": 1, "endFrame": 134, "nz": 4, "ext": "jpg", "anno_path": "Man/groundtruth_rect.txt", "object_class": "face"}, {"name": "Matrix", "path": "Matrix/img", "startFrame": 1, "endFrame": 100, "nz": 4, "ext": "jpg", "anno_path": "Matrix/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Mhyang", "path": "Mhyang/img", "startFrame": 1, "endFrame": 1490, "nz": 4, "ext": "jpg", "anno_path": "Mhyang/groundtruth_rect.txt", "object_class": "face"}, {"name": "MotorRolling", "path": "MotorRolling/img", "startFrame": 1, "endFrame": 164, "nz": 4, "ext": "jpg", "anno_path": "MotorRolling/groundtruth_rect.txt", "object_class": "vehicle"}, {"name": "MountainBike", "path": "MountainBike/img", "startFrame": 1, "endFrame": 228, "nz": 4, "ext": "jpg", "anno_path": "MountainBike/groundtruth_rect.txt", "object_class": "bicycle"}, {"name": "Panda", "path": "Panda/img", "startFrame": 1, "endFrame": 1000, "nz": 4, "ext": "jpg", "anno_path": "Panda/groundtruth_rect.txt", "object_class": "mammal"}, {"name": "RedTeam", "path": "RedTeam/img", "startFrame": 1, "endFrame": 1918, "nz": 4, "ext": "jpg", "anno_path": "RedTeam/groundtruth_rect.txt", "object_class": "vehicle"}, {"name": "Rubik", "path": "Rubik/img", "startFrame": 1, "endFrame": 1997, "nz": 4, "ext": "jpg", "anno_path": "Rubik/groundtruth_rect.txt", "object_class": "other"}, {"name": "Shaking", "path": "Shaking/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Shaking/groundtruth_rect.txt", "object_class": "face"}, {"name": "Singer1", "path": "Singer1/img", "startFrame": 1, "endFrame": 351, "nz": 4, "ext": "jpg", "anno_path": "Singer1/groundtruth_rect.txt", "object_class": "person"}, {"name": "Singer2", "path": "Singer2/img", "startFrame": 1, "endFrame": 366, "nz": 4, "ext": "jpg", "anno_path": "Singer2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skater", "path": "Skater/img", "startFrame": 1, "endFrame": 160, "nz": 4, "ext": "jpg", "anno_path": "Skater/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skater2", "path": "Skater2/img", "startFrame": 1, "endFrame": 435, "nz": 4, "ext": "jpg", "anno_path": "Skater2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skating1", "path": "Skating1/img", "startFrame": 1, "endFrame": 400, "nz": 4, "ext": "jpg", "anno_path": "Skating1/groundtruth_rect.txt", "object_class": "person"}, {"name": "Skating2_1", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.1.txt", "object_class": "person"}, {"name": "Skating2_2", "path": "Skating2/img", "startFrame": 1, "endFrame": 473, "nz": 4, "ext": "jpg", "anno_path": "Skating2/groundtruth_rect.2.txt", "object_class": "person"}, {"name": "Skiing", "path": "Skiing/img", "startFrame": 1, "endFrame": 81, "nz": 4, "ext": "jpg", "anno_path": "Skiing/groundtruth_rect.txt", "object_class": "person"}, {"name": "Soccer", "path": "Soccer/img", "startFrame": 1, "endFrame": 392, "nz": 4, "ext": "jpg", "anno_path": "Soccer/groundtruth_rect.txt", "object_class": "face"}, {"name": "Subway", "path": "Subway/img", "startFrame": 1, "endFrame": 175, "nz": 4, "ext": "jpg", "anno_path": "Subway/groundtruth_rect.txt", "object_class": "person"}, {"name": "Surfer", "path": "Surfer/img", "startFrame": 1, "endFrame": 376, "nz": 4, "ext": "jpg", "anno_path": "Surfer/groundtruth_rect.txt", "object_class": "person head"}, {"name": "Suv", "path": "Suv/img", "startFrame": 1, "endFrame": 945, "nz": 4, "ext": "jpg", "anno_path": "Suv/groundtruth_rect.txt", "object_class": "car"}, {"name": "Sylvester", "path": "Sylvester/img", "startFrame": 1, "endFrame": 1345, "nz": 4, "ext": "jpg", "anno_path": "Sylvester/groundtruth_rect.txt", "object_class": "other"}, {"name": "Tiger1", "path": "Tiger1/img", "startFrame": 1, "endFrame": 354, "nz": 4, "ext": "jpg", "anno_path": "Tiger1/groundtruth_rect.txt", "initOmit": 5, "object_class": "other"}, {"name": "Tiger2", "path": "Tiger2/img", "startFrame": 1, "endFrame": 365, "nz": 4, "ext": "jpg", "anno_path": "Tiger2/groundtruth_rect.txt", "object_class": "other"}, {"name": "Toy", "path": "Toy/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Toy/groundtruth_rect.txt", "object_class": "other"}, {"name": "Trans", "path": "Trans/img", "startFrame": 1, "endFrame": 124, "nz": 4, "ext": "jpg", "anno_path": "Trans/groundtruth_rect.txt", "object_class": "other"}, {"name": "Trellis", "path": "Trellis/img", "startFrame": 1, "endFrame": 569, "nz": 4, "ext": "jpg", "anno_path": "Trellis/groundtruth_rect.txt", "object_class": "face"}, {"name": "Twinnings", "path": "Twinnings/img", "startFrame": 1, "endFrame": 472, "nz": 4, "ext": "jpg", "anno_path": "Twinnings/groundtruth_rect.txt", "object_class": "other"}, {"name": "Vase", "path": "Vase/img", "startFrame": 1, "endFrame": 271, "nz": 4, "ext": "jpg", "anno_path": "Vase/groundtruth_rect.txt", "object_class": "other"}, {"name": "Walking", "path": "Walking/img", "startFrame": 1, "endFrame": 412, "nz": 4, "ext": "jpg", "anno_path": "Walking/groundtruth_rect.txt", "object_class": "person"}, {"name": "Walking2", "path": "Walking2/img", "startFrame": 1, "endFrame": 500, "nz": 4, "ext": "jpg", "anno_path": "Walking2/groundtruth_rect.txt", "object_class": "person"}, {"name": "Woman", "path": "Woman/img", "startFrame": 1, "endFrame": 597, "nz": 4, "ext": "jpg", "anno_path": "Woman/groundtruth_rect.txt", "object_class": "person"} ] return sequence_info_list ================================================ FILE: lib/test/evaluation/running.py ================================================ import numpy as np import multiprocessing import os import sys from itertools import product from collections import OrderedDict from lib.test.evaluation import Sequence, Tracker import torch def _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict): """Saves the output of the tracker.""" if not os.path.exists(tracker.results_dir): print("create tracking result dir:", tracker.results_dir) os.makedirs(tracker.results_dir) if seq.dataset in ['trackingnet', 'got10k']: if not os.path.exists(os.path.join(tracker.results_dir, seq.dataset)): os.makedirs(os.path.join(tracker.results_dir, seq.dataset)) '''2021.1.5 create new folder for these two datasets''' if seq.dataset in ['trackingnet', 'got10k']: base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name) else: base_results_path = os.path.join(tracker.results_dir, seq.name) def save_bb(file, data): tracked_bb = np.array(data).astype(int) np.savetxt(file, tracked_bb, delimiter='\t', fmt='%d') def save_time(file, data): exec_times = np.array(data).astype(float) np.savetxt(file, exec_times, delimiter='\t', fmt='%f') def save_score(file, data): scores = np.array(data).astype(float) np.savetxt(file, scores, delimiter='\t', fmt='%.2f') def _convert_dict(input_dict): data_dict = {} for elem in input_dict: for k, v in elem.items(): if k in data_dict.keys(): data_dict[k].append(v) else: data_dict[k] = [v, ] return data_dict for key, data in output.items(): # If data is empty if not data: continue if key == 'target_bbox': if isinstance(data[0], (dict, OrderedDict)): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): bbox_file = '{}_{}.txt'.format(base_results_path, obj_id) save_bb(bbox_file, d) else: # Single-object mode bbox_file = '{}.txt'.format(base_results_path) save_bb(bbox_file, data) if key == 'all_boxes': if isinstance(data[0], (dict, OrderedDict)): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): bbox_file = '{}_{}_all_boxes.txt'.format(base_results_path, obj_id) save_bb(bbox_file, d) else: # Single-object mode bbox_file = '{}_all_boxes.txt'.format(base_results_path) save_bb(bbox_file, data) if key == 'all_scores': if isinstance(data[0], (dict, OrderedDict)): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): bbox_file = '{}_{}_all_scores.txt'.format(base_results_path, obj_id) save_score(bbox_file, d) else: # Single-object mode print("saving scores...") bbox_file = '{}_all_scores.txt'.format(base_results_path) save_score(bbox_file, data) elif key == 'time': if isinstance(data[0], dict): data_dict = _convert_dict(data) for obj_id, d in data_dict.items(): timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id) save_time(timings_file, d) else: timings_file = '{}_time.txt'.format(base_results_path) save_time(timings_file, data) def run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8): """Runs a tracker on a sequence.""" '''2021.1.2 Add multiple gpu support''' try: worker_name = multiprocessing.current_process().name worker_id = int(worker_name[worker_name.find('-') + 1:]) - 1 gpu_id = worker_id % num_gpu torch.cuda.set_device(gpu_id) except: pass def _results_exist(): if seq.object_ids is None: if seq.dataset in ['trackingnet', 'got10k']: base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name) bbox_file = '{}.txt'.format(base_results_path) else: bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name) return os.path.isfile(bbox_file) else: bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids] missing = [not os.path.isfile(f) for f in bbox_files] return sum(missing) == 0 if _results_exist() and not debug: print('FPS: {}'.format(-1)) return print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name)) if debug: output = tracker.run_sequence(seq, debug=debug) else: try: output = tracker.run_sequence(seq, debug=debug) except Exception as e: print(e) return sys.stdout.flush() if isinstance(output['time'][0], (dict, OrderedDict)): exec_time = sum([sum(times.values()) for times in output['time']]) num_frames = len(output['time']) else: exec_time = sum(output['time']) num_frames = len(output['time']) print('FPS: {}'.format(num_frames / exec_time)) if not debug: _save_tracker_output(seq, tracker, output) def run_dataset(dataset, trackers, debug=False, threads=0, num_gpus=8): """Runs a list of trackers on a dataset. args: dataset: List of Sequence instances, forming a dataset. trackers: List of Tracker instances. debug: Debug level. threads: Number of threads to use (default 0). """ multiprocessing.set_start_method('spawn', force=True) print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset))) multiprocessing.set_start_method('spawn', force=True) if threads == 0: mode = 'sequential' else: mode = 'parallel' if mode == 'sequential': for seq in dataset: for tracker_info in trackers: run_sequence(seq, tracker_info, debug=debug) elif mode == 'parallel': param_list = [(seq, tracker_info, debug, num_gpus) for seq, tracker_info in product(dataset, trackers)] with multiprocessing.Pool(processes=threads) as pool: pool.starmap(run_sequence, param_list) print('Done') ================================================ FILE: lib/test/evaluation/tc128cedataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList import os import glob import six class TC128CEDataset(BaseDataset): """ TC-128 Dataset (78 newly added sequences) modified from the implementation in got10k-toolkit (https://github.com/got-10k/toolkit) """ def __init__(self): super().__init__() self.base_path = self.env_settings.tc128_path self.anno_files = sorted(glob.glob( os.path.join(self.base_path, '*/*_gt.txt'))) """filter the newly added sequences (_ce)""" self.anno_files = [s for s in self.anno_files if "_ce" in s] self.seq_dirs = [os.path.dirname(f) for f in self.anno_files] self.seq_names = [os.path.basename(d) for d in self.seq_dirs] # valid frame range for each sequence self.range_files = [glob.glob(os.path.join(d, '*_frames.txt'))[0] for d in self.seq_dirs] def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.seq_names]) def _construct_sequence(self, sequence_name): if isinstance(sequence_name, six.string_types): if not sequence_name in self.seq_names: raise Exception('Sequence {} not found.'.format(sequence_name)) index = self.seq_names.index(sequence_name) # load valid frame range frames = np.loadtxt(self.range_files[index], dtype=int, delimiter=',') img_files = [os.path.join(self.seq_dirs[index], 'img/%04d.jpg' % f) for f in range(frames[0], frames[1] + 1)] # load annotations anno = np.loadtxt(self.anno_files[index], delimiter=',') assert len(img_files) == len(anno) assert anno.shape[1] == 4 # return img_files, anno return Sequence(sequence_name, img_files, 'tc128', anno.reshape(-1, 4)) def __len__(self): return len(self.seq_names) ================================================ FILE: lib/test/evaluation/tc128dataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList import os import glob import six class TC128Dataset(BaseDataset): """ TC-128 Dataset modified from the implementation in got10k-toolkit (https://github.com/got-10k/toolkit) """ def __init__(self): super().__init__() self.base_path = self.env_settings.tc128_path self.anno_files = sorted(glob.glob( os.path.join(self.base_path, '*/*_gt.txt'))) self.seq_dirs = [os.path.dirname(f) for f in self.anno_files] self.seq_names = [os.path.basename(d) for d in self.seq_dirs] # valid frame range for each sequence self.range_files = [glob.glob(os.path.join(d, '*_frames.txt'))[0] for d in self.seq_dirs] def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.seq_names]) def _construct_sequence(self, sequence_name): if isinstance(sequence_name, six.string_types): if not sequence_name in self.seq_names: raise Exception('Sequence {} not found.'.format(sequence_name)) index = self.seq_names.index(sequence_name) # load valid frame range frames = np.loadtxt(self.range_files[index], dtype=int, delimiter=',') img_files = [os.path.join(self.seq_dirs[index], 'img/%04d.jpg' % f) for f in range(frames[0], frames[1] + 1)] # load annotations anno = np.loadtxt(self.anno_files[index], delimiter=',') assert len(img_files) == len(anno) assert anno.shape[1] == 4 # return img_files, anno return Sequence(sequence_name, img_files, 'tc128', anno.reshape(-1, 4)) def __len__(self): return len(self.seq_names) ================================================ FILE: lib/test/evaluation/tnl2kdataset.py ================================================ import os import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text, load_str ############ # current 00000492.png of test_015_Sord_video_Q01_done is damaged and replaced by a copy of 00000491.png ############ class TNL2kDataset(BaseDataset): """ TNL2k test set """ def __init__(self): super().__init__() self.base_path = self.env_settings.tnl2k_path self.sequence_list = self._get_sequence_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): # class_name = sequence_name.split('-')[0] anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64) text_dsp_path = '{}/{}/language.txt'.format(self.base_path, sequence_name) text_dsp = load_str(text_dsp_path) frames_path = '{}/{}/imgs'.format(self.base_path, sequence_name) frames_list = [f for f in os.listdir(frames_path)] frames_list = sorted(frames_list) frames_list = ['{}/{}'.format(frames_path, frame_i) for frame_i in frames_list] # target_class = class_name return Sequence(sequence_name, frames_list, 'tnl2k', ground_truth_rect.reshape(-1, 4), text_dsp=text_dsp) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self): sequence_list = [] for seq in os.listdir(self.base_path): if os.path.isdir(os.path.join(self.base_path, seq)): sequence_list.append(seq) return sequence_list ================================================ FILE: lib/test/evaluation/tracker.py ================================================ import importlib import os from collections import OrderedDict from lib.test.evaluation.environment import env_settings import time import cv2 as cv from lib.utils.lmdb_utils import decode_img from pathlib import Path import numpy as np def trackerlist(name: str, parameter_name: str, dataset_name: str, run_ids = None, display_name: str = None, result_only=False): """Generate list of trackers. args: name: Name of tracking method. parameter_name: Name of parameter file. run_ids: A single or list of run_ids. display_name: Name to be displayed in the result plots. """ if run_ids is None or isinstance(run_ids, int): run_ids = [run_ids] return [Tracker(name, parameter_name, dataset_name, run_id, display_name, result_only) for run_id in run_ids] class Tracker: """Wraps the tracker for evaluation and running purposes. args: name: Name of tracking method. parameter_name: Name of parameter file. run_id: The run id. display_name: Name to be displayed in the result plots. """ def __init__(self, name: str, parameter_name: str, dataset_name: str, run_id: int = None, display_name: str = None, result_only=False): assert run_id is None or isinstance(run_id, int) self.name = name self.parameter_name = parameter_name self.dataset_name = dataset_name self.run_id = run_id self.display_name = display_name env = env_settings() if self.run_id is None: self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name) else: self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id) if result_only: self.results_dir = '{}/{}'.format(env.results_path, self.name) tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tracker', '%s.py' % self.name)) if os.path.isfile(tracker_module_abspath): tracker_module = importlib.import_module('lib.test.tracker.{}'.format(self.name)) self.tracker_class = tracker_module.get_tracker_class() else: self.tracker_class = None def create_tracker(self, params): tracker = self.tracker_class(params, self.dataset_name) return tracker def run_sequence(self, seq, debug=None): """Run tracker on sequence. args: seq: Sequence to run the tracker on. visualization: Set visualization flag (None means default value specified in the parameters). debug: Set debug level (None means default value specified in the parameters). multiobj_mode: Which mode to use for multiple objects. """ params = self.get_parameters() debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) params.debug = debug_ # Get init information init_info = seq.init_info() tracker = self.create_tracker(params) output = self._track_sequence(tracker, seq, init_info) return output def _track_sequence(self, tracker, seq, init_info): # Define outputs # Each field in output is a list containing tracker prediction for each frame. # In case of single object tracking mode: # target_bbox[i] is the predicted bounding box for frame i # time[i] is the processing time for frame i # In case of multi object tracking mode: # target_bbox[i] is an OrderedDict, where target_bbox[i][obj_id] is the predicted box for target obj_id in # frame i # time[i] is either the processing time for frame i, or an OrderedDict containing processing times for each # object in frame i output = {'target_bbox': [], 'time': []} if tracker.params.save_all_boxes: output['all_boxes'] = [] output['all_scores'] = [] def _store_outputs(tracker_out: dict, defaults=None): defaults = {} if defaults is None else defaults for key in output.keys(): val = tracker_out.get(key, defaults.get(key, None)) if key in tracker_out or val is not None: output[key].append(val) # Initialize image = self._read_image(seq.frames[0]) start_time = time.time() out = tracker.initialize(image, init_info) if out is None: out = {} prev_output = OrderedDict(out) init_default = {'target_bbox': init_info.get('init_bbox'), 'time': time.time() - start_time} if tracker.params.save_all_boxes: init_default['all_boxes'] = out['all_boxes'] init_default['all_scores'] = out['all_scores'] _store_outputs(out, init_default) for frame_num, frame_path in enumerate(seq.frames[1:], start=1): image = self._read_image(frame_path) start_time = time.time() info = seq.frame_info(frame_num) info['previous_output'] = prev_output if len(seq.ground_truth_rect) > 1: info['gt_bbox'] = seq.ground_truth_rect[frame_num] out = tracker.track(image, info) prev_output = OrderedDict(out) _store_outputs(out, {'time': time.time() - start_time}) for key in ['target_bbox', 'all_boxes', 'all_scores']: if key in output and len(output[key]) <= 1: output.pop(key) return output def run_video(self, videofilepath, optional_box=None, debug=None, visdom_info=None, save_results=False): """Run the tracker with the vieofile. args: debug: Debug level. """ params = self.get_parameters() debug_ = debug if debug is None: debug_ = getattr(params, 'debug', 0) params.debug = debug_ params.tracker_name = self.name params.param_name = self.parameter_name # self._init_visdom(visdom_info, debug_) multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default')) if multiobj_mode == 'default': tracker = self.create_tracker(params) elif multiobj_mode == 'parallel': tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True) else: raise ValueError('Unknown multi object mode {}'.format(multiobj_mode)) assert os.path.isfile(videofilepath), "Invalid param {}".format(videofilepath) ", videofilepath must be a valid videofile" output_boxes = [] cap = cv.VideoCapture(videofilepath) display_name = 'Display: ' + tracker.params.tracker_name cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO) cv.resizeWindow(display_name, 960, 720) success, frame = cap.read() cv.imshow(display_name, frame) def _build_init_info(box): return {'init_bbox': box} if success is not True: print("Read frame from {} failed.".format(videofilepath)) exit(-1) if optional_box is not None: assert isinstance(optional_box, (list, tuple)) assert len(optional_box) == 4, "valid box's foramt is [x,y,w,h]" tracker.initialize(frame, _build_init_info(optional_box)) output_boxes.append(optional_box) else: while True: # cv.waitKey() frame_disp = frame.copy() cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 1) x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False) init_state = [x, y, w, h] tracker.initialize(frame, _build_init_info(init_state)) output_boxes.append(init_state) break while True: ret, frame = cap.read() if frame is None: break frame_disp = frame.copy() # Draw box out = tracker.track(frame) state = [int(s) for s in out['target_bbox']] output_boxes.append(state) cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]), (0, 255, 0), 5) font_color = (0, 0, 0) cv.putText(frame_disp, 'Tracking!', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) cv.putText(frame_disp, 'Press q to quit', (20, 80), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1) # Display the resulting frame cv.imshow(display_name, frame_disp) key = cv.waitKey(1) if key == ord('q'): break elif key == ord('r'): ret, frame = cap.read() frame_disp = frame.copy() cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 1) cv.imshow(display_name, frame_disp) x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False) init_state = [x, y, w, h] tracker.initialize(frame, _build_init_info(init_state)) output_boxes.append(init_state) # When everything done, release the capture cap.release() cv.destroyAllWindows() if save_results: if not os.path.exists(self.results_dir): os.makedirs(self.results_dir) video_name = Path(videofilepath).stem base_results_path = os.path.join(self.results_dir, 'video_{}'.format(video_name)) tracked_bb = np.array(output_boxes).astype(int) bbox_file = '{}.txt'.format(base_results_path) np.savetxt(bbox_file, tracked_bb, delimiter='\t', fmt='%d') def get_parameters(self): """Get parameters.""" param_module = importlib.import_module('lib.test.parameter.{}'.format(self.name)) params = param_module.parameters(self.parameter_name) return params def _read_image(self, image_file: str): if isinstance(image_file, str): im = cv.imread(image_file) return cv.cvtColor(im, cv.COLOR_BGR2RGB) elif isinstance(image_file, list) and len(image_file) == 2: return decode_img(image_file[0], image_file[1]) else: raise ValueError("type of image_file should be str or list") ================================================ FILE: lib/test/evaluation/trackingnetdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList import os from lib.test.utils.load_text import load_text class TrackingNetDataset(BaseDataset): """ TrackingNet test set. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self): super().__init__() self.base_path = self.env_settings.trackingnet_path sets = 'TEST' if not isinstance(sets, (list, tuple)): if sets == 'TEST': sets = ['TEST'] elif sets == 'TRAIN': sets = ['TRAIN_{}'.format(i) for i in range(5)] self.sequence_list = self._list_sequences(self.base_path, sets) def get_sequence_list(self): return SequenceList([self._construct_sequence(set, seq_name) for set, seq_name in self.sequence_list]) def _construct_sequence(self, set, sequence_name): anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy') frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name) frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")] frame_list.sort(key=lambda f: int(f[:-4])) frames_list = [os.path.join(frames_path, frame) for frame in frame_list] return Sequence(sequence_name, frames_list, 'trackingnet', ground_truth_rect.reshape(-1, 4)) def __len__(self): return len(self.sequence_list) def _list_sequences(self, root, set_ids): sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, s, "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list ================================================ FILE: lib/test/evaluation/uavdataset.py ================================================ import numpy as np from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList from lib.test.utils.load_text import load_text class UAVDataset(BaseDataset): """ UAV123 dataset. Publication: A Benchmark and Simulator for UAV Tracking. Matthias Mueller, Neil Smith and Bernard Ghanem ECCV, 2016 https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx """ def __init__(self): super().__init__() self.base_path = self.env_settings.uav_path self.sequence_info_list = self._get_sequence_info_list() def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list]) def _construct_sequence(self, sequence_info): sequence_path = sequence_info['path'] nz = sequence_info['nz'] ext = sequence_info['ext'] start_frame = sequence_info['startFrame'] end_frame = sequence_info['endFrame'] init_omit = 0 if 'initOmit' in sequence_info: init_omit = sequence_info['initOmit'] frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)] anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path']) ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy') return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:], object_class=sequence_info['object_class']) def __len__(self): return len(self.sequence_info_list) def _get_sequence_info_list(self): sequence_info_list = [ {"name": "uav_bike1", "path": "data_seq/UAV123/bike1", "startFrame": 1, "endFrame": 3085, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike1.txt", "object_class": "vehicle"}, {"name": "uav_bike2", "path": "data_seq/UAV123/bike2", "startFrame": 1, "endFrame": 553, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike2.txt", "object_class": "vehicle"}, {"name": "uav_bike3", "path": "data_seq/UAV123/bike3", "startFrame": 1, "endFrame": 433, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bike3.txt", "object_class": "vehicle"}, {"name": "uav_bird1_1", "path": "data_seq/UAV123/bird1", "startFrame": 1, "endFrame": 253, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_1.txt", "object_class": "bird"}, {"name": "uav_bird1_2", "path": "data_seq/UAV123/bird1", "startFrame": 775, "endFrame": 1477, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_2.txt", "object_class": "bird"}, {"name": "uav_bird1_3", "path": "data_seq/UAV123/bird1", "startFrame": 1573, "endFrame": 2437, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/bird1_3.txt", "object_class": "bird"}, {"name": "uav_boat1", "path": "data_seq/UAV123/boat1", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat1.txt", "object_class": "vessel"}, {"name": "uav_boat2", "path": "data_seq/UAV123/boat2", "startFrame": 1, "endFrame": 799, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat2.txt", "object_class": "vessel"}, {"name": "uav_boat3", "path": "data_seq/UAV123/boat3", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat3.txt", "object_class": "vessel"}, {"name": "uav_boat4", "path": "data_seq/UAV123/boat4", "startFrame": 1, "endFrame": 553, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat4.txt", "object_class": "vessel"}, {"name": "uav_boat5", "path": "data_seq/UAV123/boat5", "startFrame": 1, "endFrame": 505, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat5.txt", "object_class": "vessel"}, {"name": "uav_boat6", "path": "data_seq/UAV123/boat6", "startFrame": 1, "endFrame": 805, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat6.txt", "object_class": "vessel"}, {"name": "uav_boat7", "path": "data_seq/UAV123/boat7", "startFrame": 1, "endFrame": 535, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat7.txt", "object_class": "vessel"}, {"name": "uav_boat8", "path": "data_seq/UAV123/boat8", "startFrame": 1, "endFrame": 685, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat8.txt", "object_class": "vessel"}, {"name": "uav_boat9", "path": "data_seq/UAV123/boat9", "startFrame": 1, "endFrame": 1399, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/boat9.txt", "object_class": "vessel"}, {"name": "uav_building1", "path": "data_seq/UAV123/building1", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building1.txt", "object_class": "other"}, {"name": "uav_building2", "path": "data_seq/UAV123/building2", "startFrame": 1, "endFrame": 577, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building2.txt", "object_class": "other"}, {"name": "uav_building3", "path": "data_seq/UAV123/building3", "startFrame": 1, "endFrame": 829, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building3.txt", "object_class": "other"}, {"name": "uav_building4", "path": "data_seq/UAV123/building4", "startFrame": 1, "endFrame": 787, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building4.txt", "object_class": "other"}, {"name": "uav_building5", "path": "data_seq/UAV123/building5", "startFrame": 1, "endFrame": 481, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/building5.txt", "object_class": "other"}, {"name": "uav_car1_1", "path": "data_seq/UAV123/car1", "startFrame": 1, "endFrame": 751, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_1.txt", "object_class": "car"}, {"name": "uav_car1_2", "path": "data_seq/UAV123/car1", "startFrame": 751, "endFrame": 1627, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_2.txt", "object_class": "car"}, {"name": "uav_car1_3", "path": "data_seq/UAV123/car1", "startFrame": 1627, "endFrame": 2629, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_3.txt", "object_class": "car"}, {"name": "uav_car10", "path": "data_seq/UAV123/car10", "startFrame": 1, "endFrame": 1405, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car10.txt", "object_class": "car"}, {"name": "uav_car11", "path": "data_seq/UAV123/car11", "startFrame": 1, "endFrame": 337, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car11.txt", "object_class": "car"}, {"name": "uav_car12", "path": "data_seq/UAV123/car12", "startFrame": 1, "endFrame": 499, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car12.txt", "object_class": "car"}, {"name": "uav_car13", "path": "data_seq/UAV123/car13", "startFrame": 1, "endFrame": 415, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car13.txt", "object_class": "car"}, {"name": "uav_car14", "path": "data_seq/UAV123/car14", "startFrame": 1, "endFrame": 1327, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car14.txt", "object_class": "car"}, {"name": "uav_car15", "path": "data_seq/UAV123/car15", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car15.txt", "object_class": "car"}, {"name": "uav_car16_1", "path": "data_seq/UAV123/car16", "startFrame": 1, "endFrame": 415, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car16_1.txt", "object_class": "car"}, {"name": "uav_car16_2", "path": "data_seq/UAV123/car16", "startFrame": 415, "endFrame": 1993, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car16_2.txt", "object_class": "car"}, {"name": "uav_car17", "path": "data_seq/UAV123/car17", "startFrame": 1, "endFrame": 1057, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car17.txt", "object_class": "car"}, {"name": "uav_car18", "path": "data_seq/UAV123/car18", "startFrame": 1, "endFrame": 1207, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car18.txt", "object_class": "car"}, {"name": "uav_car1_s", "path": "data_seq/UAV123/car1_s", "startFrame": 1, "endFrame": 1475, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car1_s.txt", "object_class": "car"}, {"name": "uav_car2", "path": "data_seq/UAV123/car2", "startFrame": 1, "endFrame": 1321, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car2.txt", "object_class": "car"}, {"name": "uav_car2_s", "path": "data_seq/UAV123/car2_s", "startFrame": 1, "endFrame": 320, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car2_s.txt", "object_class": "car"}, {"name": "uav_car3", "path": "data_seq/UAV123/car3", "startFrame": 1, "endFrame": 1717, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car3.txt", "object_class": "car"}, {"name": "uav_car3_s", "path": "data_seq/UAV123/car3_s", "startFrame": 1, "endFrame": 1300, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car3_s.txt", "object_class": "car"}, {"name": "uav_car4", "path": "data_seq/UAV123/car4", "startFrame": 1, "endFrame": 1345, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car4.txt", "object_class": "car"}, {"name": "uav_car4_s", "path": "data_seq/UAV123/car4_s", "startFrame": 1, "endFrame": 830, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car4_s.txt", "object_class": "car"}, {"name": "uav_car5", "path": "data_seq/UAV123/car5", "startFrame": 1, "endFrame": 745, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car5.txt", "object_class": "car"}, {"name": "uav_car6_1", "path": "data_seq/UAV123/car6", "startFrame": 1, "endFrame": 487, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_1.txt", "object_class": "car"}, {"name": "uav_car6_2", "path": "data_seq/UAV123/car6", "startFrame": 487, "endFrame": 1807, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_2.txt", "object_class": "car"}, {"name": "uav_car6_3", "path": "data_seq/UAV123/car6", "startFrame": 1807, "endFrame": 2953, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_3.txt", "object_class": "car"}, {"name": "uav_car6_4", "path": "data_seq/UAV123/car6", "startFrame": 2953, "endFrame": 3925, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_4.txt", "object_class": "car"}, {"name": "uav_car6_5", "path": "data_seq/UAV123/car6", "startFrame": 3925, "endFrame": 4861, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car6_5.txt", "object_class": "car"}, {"name": "uav_car7", "path": "data_seq/UAV123/car7", "startFrame": 1, "endFrame": 1033, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car7.txt", "object_class": "car"}, {"name": "uav_car8_1", "path": "data_seq/UAV123/car8", "startFrame": 1, "endFrame": 1357, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car8_1.txt", "object_class": "car"}, {"name": "uav_car8_2", "path": "data_seq/UAV123/car8", "startFrame": 1357, "endFrame": 2575, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car8_2.txt", "object_class": "car"}, {"name": "uav_car9", "path": "data_seq/UAV123/car9", "startFrame": 1, "endFrame": 1879, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/car9.txt", "object_class": "car"}, {"name": "uav_group1_1", "path": "data_seq/UAV123/group1", "startFrame": 1, "endFrame": 1333, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_1.txt", "object_class": "person"}, {"name": "uav_group1_2", "path": "data_seq/UAV123/group1", "startFrame": 1333, "endFrame": 2515, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_2.txt", "object_class": "person"}, {"name": "uav_group1_3", "path": "data_seq/UAV123/group1", "startFrame": 2515, "endFrame": 3925, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_3.txt", "object_class": "person"}, {"name": "uav_group1_4", "path": "data_seq/UAV123/group1", "startFrame": 3925, "endFrame": 4873, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group1_4.txt", "object_class": "person"}, {"name": "uav_group2_1", "path": "data_seq/UAV123/group2", "startFrame": 1, "endFrame": 907, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_1.txt", "object_class": "person"}, {"name": "uav_group2_2", "path": "data_seq/UAV123/group2", "startFrame": 907, "endFrame": 1771, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_2.txt", "object_class": "person"}, {"name": "uav_group2_3", "path": "data_seq/UAV123/group2", "startFrame": 1771, "endFrame": 2683, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group2_3.txt", "object_class": "person"}, {"name": "uav_group3_1", "path": "data_seq/UAV123/group3", "startFrame": 1, "endFrame": 1567, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_1.txt", "object_class": "person"}, {"name": "uav_group3_2", "path": "data_seq/UAV123/group3", "startFrame": 1567, "endFrame": 2827, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_2.txt", "object_class": "person"}, {"name": "uav_group3_3", "path": "data_seq/UAV123/group3", "startFrame": 2827, "endFrame": 4369, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_3.txt", "object_class": "person"}, {"name": "uav_group3_4", "path": "data_seq/UAV123/group3", "startFrame": 4369, "endFrame": 5527, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/group3_4.txt", "object_class": "person"}, {"name": "uav_person1", "path": "data_seq/UAV123/person1", "startFrame": 1, "endFrame": 799, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person1.txt", "object_class": "person"}, {"name": "uav_person10", "path": "data_seq/UAV123/person10", "startFrame": 1, "endFrame": 1021, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person10.txt", "object_class": "person"}, {"name": "uav_person11", "path": "data_seq/UAV123/person11", "startFrame": 1, "endFrame": 721, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person11.txt", "object_class": "person"}, {"name": "uav_person12_1", "path": "data_seq/UAV123/person12", "startFrame": 1, "endFrame": 601, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person12_1.txt", "object_class": "person"}, {"name": "uav_person12_2", "path": "data_seq/UAV123/person12", "startFrame": 601, "endFrame": 1621, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person12_2.txt", "object_class": "person"}, {"name": "uav_person13", "path": "data_seq/UAV123/person13", "startFrame": 1, "endFrame": 883, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person13.txt", "object_class": "person"}, {"name": "uav_person14_1", "path": "data_seq/UAV123/person14", "startFrame": 1, "endFrame": 847, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_1.txt", "object_class": "person"}, {"name": "uav_person14_2", "path": "data_seq/UAV123/person14", "startFrame": 847, "endFrame": 1813, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_2.txt", "object_class": "person"}, {"name": "uav_person14_3", "path": "data_seq/UAV123/person14", "startFrame": 1813, "endFrame": 2923, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_3.txt", "object_class": "person"}, {"name": "uav_person15", "path": "data_seq/UAV123/person15", "startFrame": 1, "endFrame": 1339, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person15.txt", "object_class": "person"}, {"name": "uav_person16", "path": "data_seq/UAV123/person16", "startFrame": 1, "endFrame": 1147, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person16.txt", "object_class": "person"}, {"name": "uav_person17_1", "path": "data_seq/UAV123/person17", "startFrame": 1, "endFrame": 1501, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_1.txt", "object_class": "person"}, {"name": "uav_person17_2", "path": "data_seq/UAV123/person17", "startFrame": 1501, "endFrame": 2347, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_2.txt", "object_class": "person"}, {"name": "uav_person18", "path": "data_seq/UAV123/person18", "startFrame": 1, "endFrame": 1393, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person18.txt", "object_class": "person"}, {"name": "uav_person19_1", "path": "data_seq/UAV123/person19", "startFrame": 1, "endFrame": 1243, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_1.txt", "object_class": "person"}, {"name": "uav_person19_2", "path": "data_seq/UAV123/person19", "startFrame": 1243, "endFrame": 2791, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_2.txt", "object_class": "person"}, {"name": "uav_person19_3", "path": "data_seq/UAV123/person19", "startFrame": 2791, "endFrame": 4357, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_3.txt", "object_class": "person"}, {"name": "uav_person1_s", "path": "data_seq/UAV123/person1_s", "startFrame": 1, "endFrame": 1600, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person1_s.txt", "object_class": "person"}, {"name": "uav_person2_1", "path": "data_seq/UAV123/person2", "startFrame": 1, "endFrame": 1189, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_1.txt", "object_class": "person"}, {"name": "uav_person2_2", "path": "data_seq/UAV123/person2", "startFrame": 1189, "endFrame": 2623, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_2.txt", "object_class": "person"}, {"name": "uav_person20", "path": "data_seq/UAV123/person20", "startFrame": 1, "endFrame": 1783, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person20.txt", "object_class": "person"}, {"name": "uav_person21", "path": "data_seq/UAV123/person21", "startFrame": 1, "endFrame": 487, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person21.txt", "object_class": "person"}, {"name": "uav_person22", "path": "data_seq/UAV123/person22", "startFrame": 1, "endFrame": 199, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person22.txt", "object_class": "person"}, {"name": "uav_person23", "path": "data_seq/UAV123/person23", "startFrame": 1, "endFrame": 397, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person23.txt", "object_class": "person"}, {"name": "uav_person2_s", "path": "data_seq/UAV123/person2_s", "startFrame": 1, "endFrame": 250, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person2_s.txt", "object_class": "person"}, {"name": "uav_person3", "path": "data_seq/UAV123/person3", "startFrame": 1, "endFrame": 643, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person3.txt", "object_class": "person"}, {"name": "uav_person3_s", "path": "data_seq/UAV123/person3_s", "startFrame": 1, "endFrame": 505, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person3_s.txt", "object_class": "person"}, {"name": "uav_person4_1", "path": "data_seq/UAV123/person4", "startFrame": 1, "endFrame": 1501, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person4_1.txt", "object_class": "person"}, {"name": "uav_person4_2", "path": "data_seq/UAV123/person4", "startFrame": 1501, "endFrame": 2743, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person4_2.txt", "object_class": "person"}, {"name": "uav_person5_1", "path": "data_seq/UAV123/person5", "startFrame": 1, "endFrame": 877, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person5_1.txt", "object_class": "person"}, {"name": "uav_person5_2", "path": "data_seq/UAV123/person5", "startFrame": 877, "endFrame": 2101, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person5_2.txt", "object_class": "person"}, {"name": "uav_person6", "path": "data_seq/UAV123/person6", "startFrame": 1, "endFrame": 901, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person6.txt", "object_class": "person"}, {"name": "uav_person7_1", "path": "data_seq/UAV123/person7", "startFrame": 1, "endFrame": 1249, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person7_1.txt", "object_class": "person"}, {"name": "uav_person7_2", "path": "data_seq/UAV123/person7", "startFrame": 1249, "endFrame": 2065, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person7_2.txt", "object_class": "person"}, {"name": "uav_person8_1", "path": "data_seq/UAV123/person8", "startFrame": 1, "endFrame": 1075, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person8_1.txt", "object_class": "person"}, {"name": "uav_person8_2", "path": "data_seq/UAV123/person8", "startFrame": 1075, "endFrame": 1525, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person8_2.txt", "object_class": "person"}, {"name": "uav_person9", "path": "data_seq/UAV123/person9", "startFrame": 1, "endFrame": 661, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person9.txt", "object_class": "person"}, {"name": "uav_truck1", "path": "data_seq/UAV123/truck1", "startFrame": 1, "endFrame": 463, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck1.txt", "object_class": "truck"}, {"name": "uav_truck2", "path": "data_seq/UAV123/truck2", "startFrame": 1, "endFrame": 385, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck2.txt", "object_class": "truck"}, {"name": "uav_truck3", "path": "data_seq/UAV123/truck3", "startFrame": 1, "endFrame": 535, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck3.txt", "object_class": "truck"}, {"name": "uav_truck4_1", "path": "data_seq/UAV123/truck4", "startFrame": 1, "endFrame": 577, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck4_1.txt", "object_class": "truck"}, {"name": "uav_truck4_2", "path": "data_seq/UAV123/truck4", "startFrame": 577, "endFrame": 1261, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/truck4_2.txt", "object_class": "truck"}, {"name": "uav_uav1_1", "path": "data_seq/UAV123/uav1", "startFrame": 1, "endFrame": 1555, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_1.txt", "object_class": "aircraft"}, {"name": "uav_uav1_2", "path": "data_seq/UAV123/uav1", "startFrame": 1555, "endFrame": 2377, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_2.txt", "object_class": "aircraft"}, {"name": "uav_uav1_3", "path": "data_seq/UAV123/uav1", "startFrame": 2473, "endFrame": 3469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav1_3.txt", "object_class": "aircraft"}, {"name": "uav_uav2", "path": "data_seq/UAV123/uav2", "startFrame": 1, "endFrame": 133, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav2.txt", "object_class": "aircraft"}, {"name": "uav_uav3", "path": "data_seq/UAV123/uav3", "startFrame": 1, "endFrame": 265, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav3.txt", "object_class": "aircraft"}, {"name": "uav_uav4", "path": "data_seq/UAV123/uav4", "startFrame": 1, "endFrame": 157, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav4.txt", "object_class": "aircraft"}, {"name": "uav_uav5", "path": "data_seq/UAV123/uav5", "startFrame": 1, "endFrame": 139, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav5.txt", "object_class": "aircraft"}, {"name": "uav_uav6", "path": "data_seq/UAV123/uav6", "startFrame": 1, "endFrame": 109, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav6.txt", "object_class": "aircraft"}, {"name": "uav_uav7", "path": "data_seq/UAV123/uav7", "startFrame": 1, "endFrame": 373, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav7.txt", "object_class": "aircraft"}, {"name": "uav_uav8", "path": "data_seq/UAV123/uav8", "startFrame": 1, "endFrame": 301, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/uav8.txt", "object_class": "aircraft"}, {"name": "uav_wakeboard1", "path": "data_seq/UAV123/wakeboard1", "startFrame": 1, "endFrame": 421, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard1.txt", "object_class": "person"}, {"name": "uav_wakeboard10", "path": "data_seq/UAV123/wakeboard10", "startFrame": 1, "endFrame": 469, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard10.txt", "object_class": "person"}, {"name": "uav_wakeboard2", "path": "data_seq/UAV123/wakeboard2", "startFrame": 1, "endFrame": 733, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard2.txt", "object_class": "person"}, {"name": "uav_wakeboard3", "path": "data_seq/UAV123/wakeboard3", "startFrame": 1, "endFrame": 823, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard3.txt", "object_class": "person"}, {"name": "uav_wakeboard4", "path": "data_seq/UAV123/wakeboard4", "startFrame": 1, "endFrame": 697, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard4.txt", "object_class": "person"}, {"name": "uav_wakeboard5", "path": "data_seq/UAV123/wakeboard5", "startFrame": 1, "endFrame": 1675, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard5.txt", "object_class": "person"}, {"name": "uav_wakeboard6", "path": "data_seq/UAV123/wakeboard6", "startFrame": 1, "endFrame": 1165, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard6.txt", "object_class": "person"}, {"name": "uav_wakeboard7", "path": "data_seq/UAV123/wakeboard7", "startFrame": 1, "endFrame": 199, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard7.txt", "object_class": "person"}, {"name": "uav_wakeboard8", "path": "data_seq/UAV123/wakeboard8", "startFrame": 1, "endFrame": 1543, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard8.txt", "object_class": "person"}, {"name": "uav_wakeboard9", "path": "data_seq/UAV123/wakeboard9", "startFrame": 1, "endFrame": 355, "nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard9.txt", "object_class": "person"} ] return sequence_info_list ================================================ FILE: lib/test/evaluation/votdataset.py ================================================ from typing import Union, TextIO import numpy as np from numba import jit from lib.test.evaluation.data import SequenceList, BaseDataset, Sequence class VOTDataset(BaseDataset): """ VOT2018 dataset Publication: The sixth Visual Object Tracking VOT2018 challenge results. Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir, Goutam Bhat, Alan Lukezic et al. ECCV, 2018 https://prints.vicos.si/publications/365 Download the dataset from http://www.votchallenge.net/vot2018/dataset.html """ def __init__(self, year=18): super().__init__() self.year = year if year == 18: self.base_path = self.env_settings.vot18_path elif year == 20: self.base_path = self.env_settings.vot20_path elif year == 22: self.base_path = self.env_settings.vot22_path self.sequence_list = self._get_sequence_list(year) def get_sequence_list(self): return SequenceList([self._construct_sequence(s) for s in self.sequence_list]) def _construct_sequence(self, sequence_name): sequence_path = sequence_name nz = 8 ext = 'jpg' start_frame = 1 anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name) if self.year == 18 or self.year == 22: try: ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64) except: ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64) end_frame = ground_truth_rect.shape[0] frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame, end_frame+1)] # Convert gt if ground_truth_rect.shape[1] > 4: gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]] gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]] x1 = np.amin(gt_x_all, 1).reshape(-1,1) y1 = np.amin(gt_y_all, 1).reshape(-1,1) x2 = np.amax(gt_x_all, 1).reshape(-1,1) y2 = np.amax(gt_y_all, 1).reshape(-1,1) ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1) elif self.year == 20: ground_truth_rect = read_file(str(anno_path)) ground_truth_rect = np.array(ground_truth_rect, dtype=np.float64) end_frame = ground_truth_rect.shape[0] frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame, end_frame + 1)] else: raise NotImplementedError return Sequence(sequence_name, frames, 'vot', ground_truth_rect) def __len__(self): return len(self.sequence_list) def _get_sequence_list(self, year): if year == 18: sequence_list= ['ants1', 'ants3', 'bag', 'ball1', 'ball2', 'basketball', 'birds1', 'blanket', 'bmx', 'bolt1', 'bolt2', 'book', 'butterfly', 'car1', 'conduction1', 'crabs1', 'crossing', 'dinosaur', 'drone_across', 'drone_flip', 'drone1', 'fernando', 'fish1', 'fish2', 'fish3', 'flamingo1', 'frisbee', 'girl', 'glove', 'godfather', 'graduate', 'gymnastics1', 'gymnastics2', 'gymnastics3', 'hand', 'handball1', 'handball2', 'helicopter', 'iceskater1', 'iceskater2', 'leaves', 'matrix', 'motocross1', 'motocross2', 'nature', 'pedestrian1', 'rabbit', 'racing', 'road', 'shaking', 'sheep', 'singer2', 'singer3', 'soccer1', 'soccer2', 'soldier', 'tiger', 'traffic', 'wiper', 'zebrafish1'] elif year == 20: sequence_list= ['agility', 'ants1', 'ball2', 'ball3', 'basketball', 'birds1', 'bolt1', 'book', 'butterfly', 'car1', 'conduction1', 'crabs1', 'dinosaur', 'dribble', 'drone1', 'drone_across', 'drone_flip', 'fernando', 'fish1', 'fish2', 'flamingo1', 'frisbee', 'girl', 'glove', 'godfather', 'graduate', 'gymnastics1', 'gymnastics2', 'gymnastics3', 'hand', 'hand02', 'hand2', 'handball1', 'handball2', 'helicopter', 'iceskater1', 'iceskater2', 'lamb', 'leaves', 'marathon', 'matrix', 'monkey', 'motocross1', 'nature', 'polo', 'rabbit', 'rabbit2', 'road', 'rowing', 'shaking', 'singer2', 'singer3', 'soccer1', 'soccer2', 'soldier', 'surfing', 'tiger', 'wheel', 'wiper', 'zebrafish1'] elif year == 22: sequence_list= ['agility', 'animal', 'ants1', 'bag', 'ball2', 'ball3', 'basketball', 'birds1', 'birds2', 'bolt1', 'book', 'bubble', 'butterfly', 'car1', 'conduction1', 'crabs1', 'dinosaur', 'diver', 'drone1', 'drone_across', 'fernando', 'fish1', 'fish2', 'flamingo1', 'frisbee', 'girl', 'graduate', 'gymnastics1', 'gymnastics2', 'gymnastics3', 'hand', 'hand2', 'handball1', 'handball2', 'helicopter', 'iceskater1', 'iceskater2', 'kangaroo', 'lamb', 'leaves', 'marathon', 'matrix', 'monkey', 'motocross1', 'nature', 'polo', 'rabbit', 'rabbit2', 'rowing', 'shaking', 'singer2', 'singer3', 'snake', 'soccer1', 'soccer2', 'soldier', 'surfing', 'tennis', 'tiger', 'wheel', 'wiper', 'zebrafish1'] else: raise NotImplementedError return sequence_list def parse(string): """ parse string to the appropriate region format and return region object """ from vot.region.shapes import Rectangle, Polygon, Mask if string[0] == 'm': # input is a mask - decode it m_, offset_, region = create_mask_from_string(string[1:].split(',')) # return Mask(m_, offset=offset_) return region else: # input is not a mask - check if special, rectangle or polygon raise NotImplementedError print('Unknown region format.') return None def read_file(fp: Union[str, TextIO]): if isinstance(fp, str): with open(fp) as file: lines = file.readlines() else: lines = fp.readlines() regions = [] # iterate over all lines in the file for i, line in enumerate(lines): regions.append(parse(line.strip())) return regions def create_mask_from_string(mask_encoding): """ mask_encoding: a string in the following format: x0, y0, w, h, RLE output: mask, offset mask: 2-D binary mask, size defined in the mask encoding offset: (x, y) offset of the mask in the image coordinates """ elements = [int(el) for el in mask_encoding] tl_x, tl_y, region_w, region_h = elements[:4] rle = np.array([el for el in elements[4:]], dtype=np.int32) # create mask from RLE within target region mask = rle_to_mask(rle, region_w, region_h) region = [tl_x, tl_y, region_w, region_h] return mask, (tl_x, tl_y), region @jit(nopython=True) def rle_to_mask(rle, width, height): """ rle: input rle mask encoding each evenly-indexed element represents number of consecutive 0s each oddly indexed element represents number of consecutive 1s width and height are dimensions of the mask output: 2-D binary mask """ # allocate list of zeros v = [0] * (width * height) # set id of the last different element to the beginning of the vector idx_ = 0 for i in range(len(rle)): if i % 2 != 0: # write as many 1s as RLE says (zeros are already in the vector) for j in range(rle[i]): v[idx_+j] = 1 idx_ += rle[i] ================================================ FILE: lib/test/parameter/__init__.py ================================================ ================================================ FILE: lib/test/parameter/artrack.py ================================================ from lib.test.utils import TrackerParams import os from lib.test.evaluation.environment import env_settings from lib.config.artrack.config import cfg, update_config_from_file def parameters(yaml_name: str): params = TrackerParams() prj_dir = env_settings().prj_dir save_dir = env_settings().save_dir # update default config from yaml file yaml_file = os.path.join(prj_dir, 'experiments/artrack/%s.yaml' % yaml_name) update_config_from_file(yaml_file) params.cfg = cfg print("test config: ", cfg) # template and search region params.template_factor = cfg.TEST.TEMPLATE_FACTOR params.template_size = cfg.TEST.TEMPLATE_SIZE params.search_factor = cfg.TEST.SEARCH_FACTOR params.search_size = cfg.TEST.SEARCH_SIZE # Network checkpoint path params.checkpoint = os.path.join(save_dir, "checkpoints/train/artrack/%s/ARTrack_ep%04d.pth.tar" % (yaml_name, cfg.TEST.EPOCH)) # whether to save boxes from all queries params.save_all_boxes = False return params ================================================ FILE: lib/test/parameter/artrack_seq.py ================================================ from lib.test.utils import TrackerParams import os from lib.test.evaluation.environment import env_settings from lib.config.artrack_seq.config import cfg, update_config_from_file def parameters(yaml_name: str): params = TrackerParams() prj_dir = env_settings().prj_dir save_dir = env_settings().save_dir # update default config from yaml file yaml_file = os.path.join(prj_dir, 'experiments/artrack_seq/%s.yaml' % yaml_name) update_config_from_file(yaml_file) params.cfg = cfg print("test config: ", cfg) # template and search region params.template_factor = cfg.TEST.TEMPLATE_FACTOR params.template_size = cfg.TEST.TEMPLATE_SIZE params.search_factor = cfg.TEST.SEARCH_FACTOR params.search_size = cfg.TEST.SEARCH_SIZE # Network checkpoint path params.checkpoint = os.path.join(save_dir, "checkpoints/train/artrack_seq/%s/ARTrackSeq_ep%04d.pth.tar" % (yaml_name, cfg.TEST.EPOCH)) # whether to save boxes from all queries params.save_all_boxes = False return params ================================================ FILE: lib/test/parameter/artrackv2.py ================================================ from lib.test.utils import TrackerParams import os from lib.test.evaluation.environment import env_settings from lib.config.artrackv2.config import cfg, update_config_from_file def parameters(yaml_name: str): params = TrackerParams() prj_dir = env_settings().prj_dir save_dir = env_settings().save_dir # update default config from yaml file yaml_file = os.path.join(prj_dir, 'experiments/artrackv2/%s.yaml' % yaml_name) update_config_from_file(yaml_file) params.cfg = cfg print("test config: ", cfg) # template and search region params.template_factor = cfg.TEST.TEMPLATE_FACTOR params.template_size = cfg.TEST.TEMPLATE_SIZE params.search_factor = cfg.TEST.SEARCH_FACTOR params.search_size = cfg.TEST.SEARCH_SIZE # Network checkpoint path params.checkpoint = os.path.join(save_dir, "checkpoints/train/artrackv2/%s/ARTrackV2_ep%04d.pth.tar" % (yaml_name, cfg.TEST.EPOCH)) # whether to save boxes from all queries params.save_all_boxes = False return params ================================================ FILE: lib/test/parameter/artrackv2_seq.py ================================================ from lib.test.utils import TrackerParams import os from lib.test.evaluation.environment import env_settings from lib.config.artrackv2_seq.config import cfg, update_config_from_file def parameters(yaml_name: str): params = TrackerParams() prj_dir = env_settings().prj_dir save_dir = env_settings().save_dir # update default config from yaml file yaml_file = os.path.join(prj_dir, 'experiments/artrackv2_seq/%s.yaml' % yaml_name) update_config_from_file(yaml_file) params.cfg = cfg print("test config: ", cfg) # template and search region params.template_factor = cfg.TEST.TEMPLATE_FACTOR params.template_size = cfg.TEST.TEMPLATE_SIZE params.search_factor = cfg.TEST.SEARCH_FACTOR params.search_size = cfg.TEST.SEARCH_SIZE # Network checkpoint path params.checkpoint = os.path.join(save_dir, "checkpoints/train/artrackv2_seq/%s/ARTrackV2Seq_ep%04d.pth.tar" % (yaml_name, cfg.TEST.EPOCH)) # whether to save boxes from all queries params.save_all_boxes = False return params ================================================ FILE: lib/test/tracker/__init__.py ================================================ ================================================ FILE: lib/test/tracker/artrack.py ================================================ import math from lib.models.artrack import build_artrack from lib.test.tracker.basetracker import BaseTracker import torch from lib.test.tracker.vis_utils import gen_visualization from lib.test.utils.hann import hann2d from lib.train.data.processing_utils import sample_target # for debug import cv2 import os from lib.test.tracker.data_utils import Preprocessor from lib.utils.box_ops import clip_box from lib.utils.ce_utils import generate_mask_cond import random class RandomErasing(object): def __init__(self, EPSILON=0.5, sl=0.02, sh=0.33, r1=0.3, mean=[0.4914, 0.4822, 0.4465]): self.EPSILON = EPSILON self.mean = mean self.sl = sl self.sh = sh self.r1 = r1 def __call__(self, img): if random.uniform(0, 1) > self.EPSILON: return img for attempt in range(100): print(img.size()) area = img.size()[1] * img.size()[2] target_area = random.uniform(self.sl, self.sh) * area aspect_ratio = random.uniform(self.r1, 1 / self.r1) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img.size()[2] and h < img.size()[1]: x1 = random.randint(0, img.size()[1] - h) y1 = random.randint(0, img.size()[2] - w) if img.size()[0] == 3: # img[0, x1:x1+h, y1:y1+w] = random.uniform(0, 1) # img[1, x1:x1+h, y1:y1+w] = random.uniform(0, 1) # img[2, x1:x1+h, y1:y1+w] = random.uniform(0, 1) img[0, x1:x1 + h, y1:y1 + w] = self.mean[0] img[1, x1:x1 + h, y1:y1 + w] = self.mean[1] img[2, x1:x1 + h, y1:y1 + w] = self.mean[2] # img[:, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(3, h, w)) else: img[0, x1:x1 + h, y1:y1 + w] = self.mean[1] # img[0, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(1, h, w)) return img return img class ARTrack(BaseTracker): def __init__(self, params, dataset_name): super(ARTrack, self).__init__(params) network = build_artrack(params.cfg, training=False) print(self.params.checkpoint) network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True) self.cfg = params.cfg self.bins = self.cfg.MODEL.BINS self.network = network.cuda() self.network.eval() self.preprocessor = Preprocessor() self.state = None self.range = self.cfg.MODEL.RANGE self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE # motion constrain self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda() # for debug self.debug = params.debug self.use_visdom = params.debug self.frame_id = 0 self.erase = RandomErasing() if self.debug: if not self.use_visdom: self.save_dir = "debug" if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) else: # self.add_hook() self._init_visdom(None, 1) # for save boxes from all queries self.save_all_boxes = params.save_all_boxes self.z_dict1 = {} def initialize(self, image, info: dict): # forward the template once z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor, output_sz=self.params.template_size)#output_sz=self.params.template_size self.z_patch_arr = z_patch_arr template = self.preprocessor.process(z_patch_arr, z_amask_arr) with torch.no_grad(): self.z_dict1 = template self.box_mask_z = None #if self.cfg.MODEL.BACKBONE.CE_LOC: # template_bbox = self.transform_bbox_to_crop(info['init_bbox'], resize_factor, # template.tensors.device).squeeze(1) # self.box_mask_z = generate_mask_cond(self.cfg, 1, template.tensors.device, template_bbox) # save states self.state = info['init_bbox'] self.frame_id = 0 if self.save_all_boxes: '''save all predicted boxes''' all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES return {"all_boxes": all_boxes_save} def track(self, image, info: dict = None): magic_num = (self.range - 1) * 0.5 H, W, _ = image.shape self.frame_id += 1 x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor, output_sz=self.params.search_size) # (x1, y1, w, h) search = self.preprocessor.process(x_patch_arr, x_amask_arr) with torch.no_grad(): x_dict = search # merge the template and the search # run the transformer out_dict = self.network.forward( template=self.z_dict1.tensors, search=x_dict.tensors) # add hann windows # pred_score_map = out_dict['score_map'] # response = self.output_window * pred_score_map # pred_boxes = self.network.box_head.cal_bbox(response, out_dict['size_map'], out_dict['offset_map']) # pred_boxes = pred_boxes.view(-1, 4) pred_boxes = out_dict['seqs'][:, 0:4] / (self.bins - 1) - magic_num pred_boxes = pred_boxes.view(-1, 4).mean(dim=0) pred_new = pred_boxes pred_new[2] = pred_boxes[2] - pred_boxes[0] pred_new[3] = pred_boxes[3] - pred_boxes[1] pred_new[0] = pred_boxes[0] + pred_boxes[2]/2 pred_new[1] = pred_boxes[1] + pred_boxes[3]/2 pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist() # Baseline: Take the mean of all pred boxes as the final result #pred_box = (pred_boxes.mean( # dim=0) * self.params.search_size / resize_factor).tolist() # (cx, cy, w, h) [0,1] # get the final box result self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10) # for debug if self.debug: if not self.use_visdom: x1, y1, w, h = self.state image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.rectangle(image_BGR, (int(x1),int(y1)), (int(x1+w),int(y1+h)), color=(0,0,255), thickness=2) save_path = os.path.join(self.save_dir, "%04d.jpg" % self.frame_id) cv2.imwrite(save_path, image_BGR) else: self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking') self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region') self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template') self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map') self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map_hann') if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']: removed_indexes_s = out_dict['removed_indexes_s'] removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s] masked_search = gen_visualization(x_patch_arr, removed_indexes_s) self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search') while self.pause_mode: if self.step: self.step = False break if self.save_all_boxes: '''save all predictions''' all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor) all_boxes_save = all_boxes.view(-1).tolist() # (4N, ) return {"target_bbox": self.state, "all_boxes": all_boxes_save} else: return {"target_bbox": self.state} def map_box_back(self, pred_box: list, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) #cx_real = cx + cx_prev #cy_real = cy + cy_prev return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h] def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box.unbind(-1) # (N,4) --> (N,) half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1) def add_hook(self): conv_features, enc_attn_weights, dec_attn_weights = [], [], [] for i in range(12): self.network.backbone.blocks[i].attn.register_forward_hook( # lambda self, input, output: enc_attn_weights.append(output[1]) lambda self, input, output: enc_attn_weights.append(output[1]) ) self.enc_attn_weights = enc_attn_weights def get_tracker_class(): return ARTrack ================================================ FILE: lib/test/tracker/artrack_seq.py ================================================ import math from lib.models.artrack_seq import build_artrack_seq from lib.test.tracker.basetracker import BaseTracker import torch from lib.test.tracker.vis_utils import gen_visualization from lib.test.utils.hann import hann2d from lib.train.data.processing_utils import sample_target, transform_image_to_crop # for debug import cv2 import os from lib.test.tracker.data_utils import Preprocessor from lib.utils.box_ops import clip_box from lib.utils.ce_utils import generate_mask_cond class ARTrackSeq(BaseTracker): def __init__(self, params, dataset_name): super(ARTrackSeq, self).__init__(params) network = build_artrack_seq(params.cfg, training=False) print(self.params.checkpoint) network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True) self.cfg = params.cfg self.bins = self.cfg.MODEL.BINS self.network = network.cuda() self.network.eval() self.preprocessor = Preprocessor() self.state = None self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE # motion constrain self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda() # for debug self.debug = params.debug self.use_visdom = params.debug self.frame_id = 0 if self.debug: if not self.use_visdom: self.save_dir = "debug" if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) else: # self.add_hook() self._init_visdom(None, 1) # for save boxes from all queries self.save_all_boxes = params.save_all_boxes self.z_dict1 = {} self.store_result = None self.save_all = 7 self.x_feat = None self.update = None self.update_threshold = 5.0 self.update_intervals = 1 def initialize(self, image, info: dict): # forward the template once self.x_feat = None z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor, output_sz=self.params.template_size) # output_sz=self.params.template_size self.z_patch_arr = z_patch_arr template = self.preprocessor.process(z_patch_arr, z_amask_arr) with torch.no_grad(): self.z_dict1 = template self.box_mask_z = None # if self.cfg.MODEL.BACKBONE.CE_LOC: # template_bbox = self.transform_bbox_to_crop(info['init_bbox'], resize_factor, # template.tensors.device).squeeze(1) # self.box_mask_z = generate_mask_cond(self.cfg, 1, template.tensors.device, template_bbox) # save states self.state = info['init_bbox'] self.store_result = [info['init_bbox'].copy()] for i in range(self.save_all - 1): self.store_result.append(info['init_bbox'].copy()) self.frame_id = 0 self.update = None if self.save_all_boxes: '''save all predicted boxes''' all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES return {"all_boxes": all_boxes_save} def track(self, image, info: dict = None): H, W, _ = image.shape self.frame_id += 1 x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor, output_sz=self.params.search_size) # (x1, y1, w, h) for i in range(len(self.store_result)): box_temp = self.store_result[i].copy() box_out_i = transform_image_to_crop(torch.Tensor(self.store_result[i]), torch.Tensor(self.state), resize_factor, torch.Tensor([self.cfg.TEST.SEARCH_SIZE, self.cfg.TEST.SEARCH_SIZE]), normalize=True) box_out_i[2] = box_out_i[2] + box_out_i[0] box_out_i[3] = box_out_i[3] + box_out_i[1] box_out_i = box_out_i.clamp(min=-0.5, max=1.5) box_out_i = (box_out_i + 0.5) * (self.bins - 1) if i == 0: seqs_out = box_out_i else: seqs_out = torch.cat((seqs_out, box_out_i), dim=-1) seqs_out = seqs_out.unsqueeze(0) search = self.preprocessor.process(x_patch_arr, x_amask_arr) with torch.no_grad(): x_dict = search # merge the template and the search # run the transformer out_dict = self.network.forward( template=self.z_dict1.tensors, search=x_dict.tensors, seq_input=seqs_out, stage="sequence", search_feature=self.x_feat, update=None) self.x_feat = out_dict['x_feat'] pred_boxes = out_dict['seqs'][:, 0:4] / (self.bins - 1) - 0.5 pred_boxes = pred_boxes.view(-1, 4).mean(dim=0) pred_new = pred_boxes pred_new[2] = pred_boxes[2] - pred_boxes[0] pred_new[3] = pred_boxes[3] - pred_boxes[1] pred_new[0] = pred_boxes[0] + pred_new[2] / 2 pred_new[1] = pred_boxes[1] + pred_new[3] / 2 pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist() # Baseline: Take the mean of all pred boxes as the final result # pred_box = (pred_boxes.mean( # dim=0) * self.params.search_size / resize_factor).tolist() # (cx, cy, w, h) [0,1] # get the final box result self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10) if len(self.store_result) < self.save_all: self.store_result.append(self.state.copy()) else: for i in range(self.save_all): if i != self.save_all - 1: self.store_result[i] = self.store_result[i + 1] else: self.store_result[i] = self.state.copy() # for debug if self.debug: if not self.use_visdom: x1, y1, w, h = self.state image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.rectangle(image_BGR, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color=(0, 0, 255), thickness=2) save_path = os.path.join(self.save_dir, "%04d.jpg" % self.frame_id) cv2.imwrite(save_path, image_BGR) else: self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking') self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region') self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template') self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map') self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map_hann') if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']: removed_indexes_s = out_dict['removed_indexes_s'] removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s] masked_search = gen_visualization(x_patch_arr, removed_indexes_s) self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search') while self.pause_mode: if self.step: self.step = False break if self.save_all_boxes: '''save all predictions''' all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor) all_boxes_save = all_boxes.view(-1).tolist() # (4N, ) return {"target_bbox": self.state, "all_boxes": all_boxes_save} else: return {"target_bbox": self.state} def map_box_back(self, pred_box: list, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) # cx_real = cx + cx_prev # cy_real = cy + cy_prev return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h] def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box.unbind(-1) # (N,4) --> (N,) half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1) def add_hook(self): conv_features, enc_attn_weights, dec_attn_weights = [], [], [] for i in range(12): self.network.backbone.blocks[i].attn.register_forward_hook( # lambda self, input, output: enc_attn_weights.append(output[1]) lambda self, input, output: enc_attn_weights.append(output[1]) ) self.enc_attn_weights = enc_attn_weights def get_tracker_class(): return ARTrackSeq ================================================ FILE: lib/test/tracker/artrackv2.py ================================================ import math from lib.models.artrackv2 import build_artrackv2 from lib.test.tracker.basetracker import BaseTracker import torch from lib.test.tracker.vis_utils import gen_visualization from lib.test.utils.hann import hann2d from lib.train.data.processing_utils import sample_target # for debug import cv2 import os from lib.test.tracker.data_utils import Preprocessor from lib.utils.box_ops import clip_box from lib.utils.ce_utils import generate_mask_cond import random class RandomErasing(object): def __init__(self, EPSILON=0.5, sl=0.02, sh=0.33, r1=0.3, mean=[0.4914, 0.4822, 0.4465]): self.EPSILON = EPSILON self.mean = mean self.sl = sl self.sh = sh self.r1 = r1 def __call__(self, img): if random.uniform(0, 1) > self.EPSILON: return img for attempt in range(100): print(img.size()) area = img.size()[1] * img.size()[2] target_area = random.uniform(self.sl, self.sh) * area aspect_ratio = random.uniform(self.r1, 1 / self.r1) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img.size()[2] and h < img.size()[1]: x1 = random.randint(0, img.size()[1] - h) y1 = random.randint(0, img.size()[2] - w) if img.size()[0] == 3: img[0, x1:x1 + h, y1:y1 + w] = self.mean[0] img[1, x1:x1 + h, y1:y1 + w] = self.mean[1] img[2, x1:x1 + h, y1:y1 + w] = self.mean[2] else: img[0, x1:x1 + h, y1:y1 + w] = self.mean[1] return img return img class ARTrackV2(BaseTracker): def __init__(self, params, dataset_name): super(ARTrackV2, self).__init__(params) network = build_artrackv2(params.cfg, training=False) network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True) self.cfg = params.cfg self.bins = self.cfg.MODEL.BINS self.network = network.cuda() self.network.eval() self.preprocessor = Preprocessor() self.state = None self.update_ = False self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE # motion constrain self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda() # for debug self.debug = params.debug self.use_visdom = params.debug self.frame_id = 0 self.erase = RandomErasing() if self.debug: if not self.use_visdom: self.save_dir = "debug" if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) else: # self.add_hook() self._init_visdom(None, 1) # for save boxes from all queries self.save_all_boxes = params.save_all_boxes self.z_dict1 = {} def initialize(self, image, info: dict): # forward the template once z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor, output_sz=self.params.template_size) # output_sz=self.params.template_size self.z_patch_arr = z_patch_arr template = self.preprocessor.process(z_patch_arr, z_amask_arr) with torch.no_grad(): # initialize dynamic template as template in first frame self.z_dict1 = template self.z_dict2 = template self.box_mask_z = None self.state = info['init_bbox'] self.frame_id = 0 if self.save_all_boxes: '''save all predicted boxes''' all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES return {"all_boxes": all_boxes_save} def track(self, image, info: dict = None): H, W, _ = image.shape self.frame_id += 1 x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor, output_sz=self.params.search_size) # (x1, y1, w, h) search = self.preprocessor.process(x_patch_arr, x_amask_arr) with torch.no_grad(): x_dict = search # merge the template and the search # run the transformer if self.update_: template = torch.concat([self.z_dict1.tensors.unsqueeze(0), self.z_dict2.unsqueeze(0)], dim=0) else: template = torch.concat([self.z_dict1.tensors.unsqueeze(0), self.z_dict2.tensors.unsqueeze(0)], dim=0) # merge the template and the search # run the transformer out_dict = self.network.forward( template=template, search=x_dict.tensors, ce_template_mask=self.box_mask_z) pred_boxes = out_dict['seqs'][:, 0:4] / (self.bins - 1) - 0.5 pred_boxes = pred_boxes.view(-1, 4).mean(dim=0) pred_new = pred_boxes pred_new[2] = pred_boxes[2] - pred_boxes[0] pred_new[3] = pred_boxes[3] - pred_boxes[1] pred_new[0] = pred_boxes[0] + pred_boxes[2] / 2 pred_new[1] = pred_boxes[1] + pred_boxes[3] / 2 pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist() # Baseline: Take the mean of all pred boxes as the final result # pred_box = (pred_boxes.mean( # dim=0) * self.params.search_size / resize_factor).tolist() # (cx, cy, w, h) [0,1] # get the final box result self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10) # for debug if self.debug: if not self.use_visdom: x1, y1, w, h = self.state image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.rectangle(image_BGR, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color=(0, 0, 255), thickness=2) save_path = os.path.join(self.save_dir, "%04d.jpg" % self.frame_id) cv2.imwrite(save_path, image_BGR) else: self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking') self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region') self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template') self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map') self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map_hann') if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']: removed_indexes_s = out_dict['removed_indexes_s'] removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s] masked_search = gen_visualization(x_patch_arr, removed_indexes_s) self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search') while self.pause_mode: if self.step: self.step = False break if self.save_all_boxes: '''save all predictions''' all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor) all_boxes_save = all_boxes.view(-1).tolist() # (4N, ) return {"target_bbox": self.state, "all_boxes": all_boxes_save} else: return {"target_bbox": self.state} def map_box_back(self, pred_box: list, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) # cx_real = cx + cx_prev # cy_real = cy + cy_prev return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h] def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box.unbind(-1) # (N,4) --> (N,) half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1) def add_hook(self): conv_features, enc_attn_weights, dec_attn_weights = [], [], [] for i in range(12): self.network.backbone.blocks[i].attn.register_forward_hook( # lambda self, input, output: enc_attn_weights.append(output[1]) lambda self, input, output: enc_attn_weights.append(output[1]) ) self.enc_attn_weights = enc_attn_weights def get_tracker_class(): return ARTrackV2 ================================================ FILE: lib/test/tracker/artrackv2_seq.py ================================================ import math from lib.models.artrackv2_seq import build_artrackv2_seq from lib.test.tracker.basetracker import BaseTracker import torch from lib.test.tracker.vis_utils import gen_visualization from lib.test.utils.hann import hann2d from lib.train.data.processing_utils import sample_target, transform_image_to_crop # for debug import cv2 import os from lib.test.tracker.data_utils import Preprocessor from lib.utils.box_ops import clip_box from lib.utils.ce_utils import generate_mask_cond class ARTrackV2Seq(BaseTracker): def __init__(self, params, dataset_name): super(ARTrackV2Seq, self).__init__(params) network = build_artrackv2_seq(params.cfg, training=False) network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True) self.cfg = params.cfg self.bins = params.cfg.MODEL.BINS self.network = network.cuda() self.network.eval() self.preprocessor = Preprocessor() self.state = None self.dz_feat = None self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE # motion constrain self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda() # for debug self.debug = params.debug self.use_visdom = params.debug self.frame_id = 0 if self.debug: if not self.use_visdom: self.save_dir = "debug" if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) else: # self.add_hook() self._init_visdom(None, 1) # for save boxes from all queries self.save_all_boxes = params.save_all_boxes self.z_dict1 = {} self.store_result = None self.prenum = params.cfg.MODEL.PRENUM self.range = params.cfg.MODEL.RANGE self.x_feat = None def initialize(self, image, info: dict): # forward the template once self.x_feat = None self.update_ = False z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor, output_sz=self.params.template_size) # output_sz=self.params.template_size self.z_patch_arr = z_patch_arr template = self.preprocessor.process(z_patch_arr, z_amask_arr) with torch.no_grad(): self.z_dict1 = template self.z_dict2 = template self.dz_feat = None self.box_mask_z = None # save states self.state = info['init_bbox'] self.store_result = [info['init_bbox'].copy()] for i in range(self.prenum - 1): self.store_result.append(info['init_bbox'].copy()) self.frame_id = 0 if self.save_all_boxes: '''save all predicted boxes''' all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES return {"all_boxes": all_boxes_save} def track(self, image, info: dict = None): H, W, _ = image.shape self.frame_id += 1 x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor, output_sz=self.params.search_size) # (x1, y1, w, h) if self.dz_feat == None: self.dz_feat = self.network.backbone.patch_embed(self.z_dict2.tensors) for i in range(len(self.store_result)): box_temp = self.store_result[i].copy() box_out_i = transform_image_to_crop(torch.Tensor(self.store_result[i]), torch.Tensor(self.state), resize_factor, torch.Tensor([self.cfg.TEST.SEARCH_SIZE, self.cfg.TEST.SEARCH_SIZE]), normalize=True) box_out_i[2] = box_out_i[2] + box_out_i[0] box_out_i[3] = box_out_i[3] + box_out_i[1] box_out_i = box_out_i.clamp(min=-0.5, max=1.5) box_out_i = (box_out_i + 0.5) * (self.bins - 1) if i == 0: seqs_out = box_out_i else: seqs_out = torch.cat((seqs_out, box_out_i), dim=-1) seqs_out = seqs_out.unsqueeze(0) search = self.preprocessor.process(x_patch_arr, x_amask_arr) with torch.no_grad(): x_dict = search # merge the template and the search # run the transformer if self.update_: template = torch.concat([self.z_dict1.tensors.unsqueeze(1), self.z_dict2.unsqueeze(1)], dim=1) else: template = torch.concat([self.z_dict1.tensors.unsqueeze(1), self.z_dict2.tensors.unsqueeze(1)], dim=1) out_dict = self.network.forward( template=template, dz_feat=self.dz_feat, search=x_dict.tensors, ce_template_mask=self.box_mask_z, seq_input=seqs_out, stage="sequence", search_feature=self.x_feat) self.dz_feat = out_dict['dz_feat'] self.x_feat = out_dict['x_feat'] pred_boxes = (out_dict['seqs'][:, 0:4] + 0.5) / (self.bins - 1) - 0.5 pred_feat = out_dict['feat'] pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins * self.range + 6) pred = pred_feat[0:4, :, 0:self.bins * self.range] out = pred.softmax(-1).to(pred) mul = torch.range((-1 * self.range * 0.5 + 0.5) + 1 / (self.bins * self.range), (self.range * 0.5 + 0.5) - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) pred_boxes = (ans + pred_boxes) / 2 pred_boxes = pred_boxes.view(-1, 4).mean(dim=0) pred_new = pred_boxes pred_new[2] = pred_boxes[2] - pred_boxes[0] pred_new[3] = pred_boxes[3] - pred_boxes[1] pred_new[0] = pred_boxes[0] + pred_new[2] / 2 pred_new[1] = pred_boxes[1] + pred_new[3] / 2 pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist() self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10) if len(self.store_result) < self.prenum: self.store_result.append(self.state.copy()) else: for i in range(self.prenum): if i != self.prenum - 1: self.store_result[i] = self.store_result[i + 1] else: self.store_result[i] = self.state.copy() # for debug if self.debug: if not self.use_visdom: x1, y1, w, h = self.state image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.rectangle(image_BGR, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color=(0, 0, 255), thickness=2) save_path = os.path.join(self.save_dir, "%04d.jpg" % self.frame_id) cv2.imwrite(save_path, image_BGR) else: self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking') self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region') self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template') self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map') self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map_hann') if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']: removed_indexes_s = out_dict['removed_indexes_s'] removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s] masked_search = gen_visualization(x_patch_arr, removed_indexes_s) self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search') while self.pause_mode: if self.step: self.step = False break if self.save_all_boxes: '''save all predictions''' all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor) all_boxes_save = all_boxes.view(-1).tolist() # (4N, ) return {"target_bbox": self.state, "all_boxes": all_boxes_save} else: return {"target_bbox": self.state} def map_box_back(self, pred_box: list, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) # cx_real = cx + cx_prev # cy_real = cy + cy_prev return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h] def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float): cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3] cx, cy, w, h = pred_box.unbind(-1) # (N,4) --> (N,) half_side = 0.5 * self.params.search_size / resize_factor cx_real = cx + (cx_prev - half_side) cy_real = cy + (cy_prev - half_side) return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1) def add_hook(self): conv_features, enc_attn_weights, dec_attn_weights = [], [], [] for i in range(12): self.network.backbone.blocks[i].attn.register_forward_hook( # lambda self, input, output: enc_attn_weights.append(output[1]) lambda self, input, output: enc_attn_weights.append(output[1]) ) self.enc_attn_weights = enc_attn_weights def get_tracker_class(): return ARTrackV2Seq ================================================ FILE: lib/test/tracker/basetracker.py ================================================ import time import torch from _collections import OrderedDict from lib.train.data.processing_utils import transform_image_to_crop from lib.vis.visdom_cus import Visdom class BaseTracker: """Base class for all trackers.""" def __init__(self, params): self.params = params self.visdom = None def predicts_segmentation_mask(self): return False def initialize(self, image, info: dict) -> dict: """Overload this function in your tracker. This should initialize the model.""" raise NotImplementedError def track(self, image, info: dict = None) -> dict: """Overload this function in your tracker. This should track in the frame and update the model.""" raise NotImplementedError def visdom_draw_tracking(self, image, box, segmentation=None): if isinstance(box, OrderedDict): box = [v for k, v in box.items()] else: box = (box,) if segmentation is None: self.visdom.register((image, *box), 'Tracking', 1, 'Tracking') else: self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking') def transform_bbox_to_crop(self, box_in, resize_factor, device, box_extract=None, crop_type='template'): # box_in: list [x1, y1, w, h], not normalized # box_extract: same as box_in # out bbox: Torch.tensor [1, 1, 4], x1y1wh, normalized if crop_type == 'template': crop_sz = torch.Tensor([self.params.template_size, self.params.template_size]) elif crop_type == 'search': crop_sz = torch.Tensor([self.params.search_size, self.params.search_size]) else: raise NotImplementedError box_in = torch.tensor(box_in) if box_extract is None: box_extract = box_in else: box_extract = torch.tensor(box_extract) template_bbox = transform_image_to_crop(box_in, box_extract, resize_factor, crop_sz, normalize=True) template_bbox = template_bbox.view(1, 1, 4).to(device) return template_bbox def _init_visdom(self, visdom_info, debug): visdom_info = {} if visdom_info is None else visdom_info self.pause_mode = False self.step = False self.next_seq = False if debug > 0 and visdom_info.get('use_visdom', True): try: self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'}, visdom_info=visdom_info) # # Show help # help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \ # 'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \ # 'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \ # 'block list.' # self.visdom.register(help_text, 'text', 1, 'Help') except: time.sleep(0.5) print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n' '!!! Start Visdom in a separate terminal window by typing \'visdom\' !!!') def _visdom_ui_handler(self, data): if data['event_type'] == 'KeyPress': if data['key'] == ' ': self.pause_mode = not self.pause_mode elif data['key'] == 'ArrowRight' and self.pause_mode: self.step = True elif data['key'] == 'n': self.next_seq = True ================================================ FILE: lib/test/tracker/data_utils.py ================================================ import torch import numpy as np from lib.utils.misc import NestedTensor class Preprocessor(object): def __init__(self): self.mean = torch.tensor([0.485, 0.456, 0.406]).view((1, 3, 1, 1)).cuda() self.std = torch.tensor([0.229, 0.224, 0.225]).view((1, 3, 1, 1)).cuda() def process(self, img_arr: np.ndarray, amask_arr: np.ndarray): # Deal with the image patch img_tensor = torch.tensor(img_arr).cuda().float().permute((2,0,1)).unsqueeze(dim=0) img_tensor_norm = ((img_tensor / 255.0) - self.mean) / self.std # (1,3,H,W) # Deal with the attention mask amask_tensor = torch.from_numpy(amask_arr).to(torch.bool).cuda().unsqueeze(dim=0) # (1,H,W) return NestedTensor(img_tensor_norm, amask_tensor) class PreprocessorX(object): def __init__(self): self.mean = torch.tensor([0.485, 0.456, 0.406]).view((1, 3, 1, 1)).cuda() self.std = torch.tensor([0.229, 0.224, 0.225]).view((1, 3, 1, 1)).cuda() def process(self, img_arr: np.ndarray, amask_arr: np.ndarray): # Deal with the image patch img_tensor = torch.tensor(img_arr).cuda().float().permute((2,0,1)).unsqueeze(dim=0) img_tensor_norm = ((img_tensor / 255.0) - self.mean) / self.std # (1,3,H,W) # Deal with the attention mask amask_tensor = torch.from_numpy(amask_arr).to(torch.bool).cuda().unsqueeze(dim=0) # (1,H,W) return img_tensor_norm, amask_tensor class PreprocessorX_onnx(object): def __init__(self): self.mean = np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1)) self.std = np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1)) def process(self, img_arr: np.ndarray, amask_arr: np.ndarray): """img_arr: (H,W,3), amask_arr: (H,W)""" # Deal with the image patch img_arr_4d = img_arr[np.newaxis, :, :, :].transpose(0, 3, 1, 2) img_arr_4d = (img_arr_4d / 255.0 - self.mean) / self.std # (1, 3, H, W) # Deal with the attention mask amask_arr_3d = amask_arr[np.newaxis, :, :] # (1,H,W) return img_arr_4d.astype(np.float32), amask_arr_3d.astype(np.bool) ================================================ FILE: lib/test/tracker/vis_utils.py ================================================ import numpy as np ############## used for visulize eliminated tokens ################# def get_keep_indices(decisions): keep_indices = [] for i in range(3): if i == 0: keep_indices.append(decisions[i]) else: keep_indices.append(keep_indices[-1][decisions[i]]) return keep_indices def gen_masked_tokens(tokens, indices, alpha=0.2): # indices = [i for i in range(196) if i not in indices] indices = indices[0].astype(int) tokens = tokens.copy() tokens[indices] = alpha * tokens[indices] + (1 - alpha) * 255 return tokens def recover_image(tokens, H, W, Hp, Wp, patch_size): # image: (C, 196, 16, 16) image = tokens.reshape(Hp, Wp, patch_size, patch_size, 3).swapaxes(1, 2).reshape(H, W, 3) return image def pad_img(img): height, width, channels = img.shape im_bg = np.ones((height, width + 8, channels)) * 255 im_bg[0:height, 0:width, :] = img return im_bg def gen_visualization(image, mask_indices, patch_size=16): # image [224, 224, 3] # mask_indices, list of masked token indices # mask mask_indices need to cat # mask_indices = mask_indices[::-1] num_stages = len(mask_indices) for i in range(1, num_stages): mask_indices[i] = np.concatenate([mask_indices[i-1], mask_indices[i]], axis=1) # keep_indices = get_keep_indices(decisions) image = np.asarray(image) H, W, C = image.shape Hp, Wp = H // patch_size, W // patch_size image_tokens = image.reshape(Hp, patch_size, Wp, patch_size, 3).swapaxes(1, 2).reshape(Hp * Wp, patch_size, patch_size, 3) stages = [ recover_image(gen_masked_tokens(image_tokens, mask_indices[i]), H, W, Hp, Wp, patch_size) for i in range(num_stages) ] imgs = [image] + stages imgs = [pad_img(img) for img in imgs] viz = np.concatenate(imgs, axis=1) return viz ================================================ FILE: lib/test/utils/__init__.py ================================================ from .params import TrackerParams, FeatureParams, Choice ================================================ FILE: lib/test/utils/_init_paths.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as osp import sys def add_path(path): if path not in sys.path: sys.path.insert(0, path) this_dir = osp.dirname(__file__) prj_path = osp.join(this_dir, '..', '..', '..') add_path(prj_path) ================================================ FILE: lib/test/utils/hann.py ================================================ import torch import math import torch.nn.functional as F def hann1d(sz: int, centered = True) -> torch.Tensor: """1D cosine window.""" if centered: return 0.5 * (1 - torch.cos((2 * math.pi / (sz + 1)) * torch.arange(1, sz + 1).float())) w = 0.5 * (1 + torch.cos((2 * math.pi / (sz + 2)) * torch.arange(0, sz//2 + 1).float())) return torch.cat([w, w[1:sz-sz//2].flip((0,))]) def hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor: """2D cosine window.""" return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1) def hann2d_bias(sz: torch.Tensor, ctr_point: torch.Tensor, centered = True) -> torch.Tensor: """2D cosine window.""" distance = torch.stack([ctr_point, sz-ctr_point], dim=0) max_distance, _ = distance.max(dim=0) hann1d_x = hann1d(max_distance[0].item() * 2, centered) hann1d_x = hann1d_x[max_distance[0] - distance[0, 0]: max_distance[0] + distance[1, 0]] hann1d_y = hann1d(max_distance[1].item() * 2, centered) hann1d_y = hann1d_y[max_distance[1] - distance[0, 1]: max_distance[1] + distance[1, 1]] return hann1d_y.reshape(1, 1, -1, 1) * hann1d_x.reshape(1, 1, 1, -1) def hann2d_clipped(sz: torch.Tensor, effective_sz: torch.Tensor, centered = True) -> torch.Tensor: """1D clipped cosine window.""" # Ensure that the difference is even effective_sz += (effective_sz - sz) % 2 effective_window = hann1d(effective_sz[0].item(), True).reshape(1, 1, -1, 1) * hann1d(effective_sz[1].item(), True).reshape(1, 1, 1, -1) pad = (sz - effective_sz) // 2 window = F.pad(effective_window, (pad[1].item(), pad[1].item(), pad[0].item(), pad[0].item()), 'replicate') if centered: return window else: mid = (sz / 2).int() window_shift_lr = torch.cat((window[:, :, :, mid[1]:], window[:, :, :, :mid[1]]), 3) return torch.cat((window_shift_lr[:, :, mid[0]:, :], window_shift_lr[:, :, :mid[0], :]), 2) def gauss_fourier(sz: int, sigma: float, half: bool = False) -> torch.Tensor: if half: k = torch.arange(0, int(sz/2+1)) else: k = torch.arange(-int((sz-1)/2), int(sz/2+1)) return (math.sqrt(2*math.pi) * sigma / sz) * torch.exp(-2 * (math.pi * sigma * k.float() / sz)**2) def gauss_spatial(sz, sigma, center=0, end_pad=0): k = torch.arange(-(sz-1)/2, (sz+1)/2+end_pad) return torch.exp(-1.0/(2*sigma**2) * (k - center)**2) def label_function(sz: torch.Tensor, sigma: torch.Tensor): return gauss_fourier(sz[0].item(), sigma[0].item()).reshape(1, 1, -1, 1) * gauss_fourier(sz[1].item(), sigma[1].item(), True).reshape(1, 1, 1, -1) def label_function_spatial(sz: torch.Tensor, sigma: torch.Tensor, center: torch.Tensor = torch.zeros(2), end_pad: torch.Tensor = torch.zeros(2)): """The origin is in the middle of the image.""" return gauss_spatial(sz[0].item(), sigma[0].item(), center[0], end_pad[0].item()).reshape(1, 1, -1, 1) * \ gauss_spatial(sz[1].item(), sigma[1].item(), center[1], end_pad[1].item()).reshape(1, 1, 1, -1) def cubic_spline_fourier(f, a): """The continuous Fourier transform of a cubic spline kernel.""" bf = (6*(1 - torch.cos(2 * math.pi * f)) + 3*a*(1 - torch.cos(4 * math.pi * f)) - (6 + 8*a)*math.pi*f*torch.sin(2 * math.pi * f) - 2*a*math.pi*f*torch.sin(4 * math.pi * f)) \ / (4 * math.pi**4 * f**4) bf[f == 0] = 1 return bf def max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor): """Computes maximum and argmax in the last two dimensions.""" max_val_row, argmax_row = torch.max(a, dim=-2) max_val, argmax_col = torch.max(max_val_row, dim=-1) argmax_row = argmax_row.view(argmax_col.numel(),-1)[torch.arange(argmax_col.numel()), argmax_col.view(-1)] argmax_row = argmax_row.reshape(argmax_col.shape) argmax = torch.cat((argmax_row.unsqueeze(-1), argmax_col.unsqueeze(-1)), -1) return max_val, argmax ================================================ FILE: lib/test/utils/load_text.py ================================================ import numpy as np import pandas as pd def load_text_numpy(path, delimiter, dtype): if isinstance(delimiter, (tuple, list)): for d in delimiter: try: ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype) return ground_truth_rect except: pass raise Exception('Could not read file {}'.format(path)) else: ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype) return ground_truth_rect def load_text_pandas(path, delimiter, dtype): if isinstance(delimiter, (tuple, list)): for d in delimiter: try: ground_truth_rect = pd.read_csv(path, delimiter=d, header=None, dtype=dtype, na_filter=False, low_memory=False).values return ground_truth_rect except Exception as e: pass raise Exception('Could not read file {}'.format(path)) else: ground_truth_rect = pd.read_csv(path, delimiter=delimiter, header=None, dtype=dtype, na_filter=False, low_memory=False).values return ground_truth_rect def load_text(path, delimiter=' ', dtype=np.float32, backend='numpy'): if backend == 'numpy': return load_text_numpy(path, delimiter, dtype) elif backend == 'pandas': return load_text_pandas(path, delimiter, dtype) def load_str(path): with open(path, "r") as f: text_str = f.readline().strip().lower() return text_str ================================================ FILE: lib/test/utils/params.py ================================================ from lib.utils import TensorList import random class TrackerParams: """Class for tracker parameters.""" def set_default_values(self, default_vals: dict): for name, val in default_vals.items(): if not hasattr(self, name): setattr(self, name, val) def get(self, name: str, *default): """Get a parameter value with the given name. If it does not exists, it return the default value given as a second argument or returns an error if no default value is given.""" if len(default) > 1: raise ValueError('Can only give one default value.') if not default: return getattr(self, name) return getattr(self, name, default[0]) def has(self, name: str): """Check if there exist a parameter with the given name.""" return hasattr(self, name) class FeatureParams: """Class for feature specific parameters""" def __init__(self, *args, **kwargs): if len(args) > 0: raise ValueError for name, val in kwargs.items(): if isinstance(val, list): setattr(self, name, TensorList(val)) else: setattr(self, name, val) def Choice(*args): """Can be used to sample random parameter values.""" return random.choice(args) ================================================ FILE: lib/test/utils/transform_got10k.py ================================================ import numpy as np import os import shutil import argparse import _init_paths from lib.test.evaluation.environment import env_settings def transform_got10k(tracker_name, cfg_name): env = env_settings() result_dir = env.results_path src_dir = os.path.join(result_dir, "%s/%s/got10k/" % (tracker_name, cfg_name)) dest_dir = os.path.join(result_dir, "%s/%s/got10k_submit/" % (tracker_name, cfg_name)) if not os.path.exists(dest_dir): os.makedirs(dest_dir) items = os.listdir(src_dir) for item in items: if "all" in item: continue src_path = os.path.join(src_dir, item) if "time" not in item: seq_name = item.replace(".txt", '') seq_dir = os.path.join(dest_dir, seq_name) if not os.path.exists(seq_dir): os.makedirs(seq_dir) new_item = item.replace(".txt", '_001.txt') dest_path = os.path.join(seq_dir, new_item) bbox_arr = np.loadtxt(src_path, dtype=np.int, delimiter='\t') np.savetxt(dest_path, bbox_arr, fmt='%d', delimiter=',') else: seq_name = item.replace("_time.txt", '') seq_dir = os.path.join(dest_dir, seq_name) if not os.path.exists(seq_dir): os.makedirs(seq_dir) dest_path = os.path.join(seq_dir, item) os.system("cp %s %s" % (src_path, dest_path)) # make zip archive shutil.make_archive(src_dir, "zip", src_dir) shutil.make_archive(dest_dir, "zip", dest_dir) # Remove the original files shutil.rmtree(src_dir) shutil.rmtree(dest_dir) if __name__ == "__main__": parser = argparse.ArgumentParser(description='transform got10k results.') parser.add_argument('--tracker_name', type=str, help='Name of tracking method.') parser.add_argument('--cfg_name', type=str, help='Name of config file.') args = parser.parse_args() transform_got10k(args.tracker_name, args.cfg_name) ================================================ FILE: lib/test/utils/transform_trackingnet.py ================================================ import numpy as np import os import shutil import argparse import _init_paths from lib.test.evaluation.environment import env_settings def transform_trackingnet(tracker_name, cfg_name): env = env_settings() result_dir = env.results_path src_dir = os.path.join(result_dir, "%s/%s/trackingnet/" % (tracker_name, cfg_name)) dest_dir = os.path.join(result_dir, "%s/%s/trackingnet_submit/" % (tracker_name, cfg_name)) if not os.path.exists(dest_dir): os.makedirs(dest_dir) items = os.listdir(src_dir) for item in items: if "all" in item: continue if "time" not in item: src_path = os.path.join(src_dir, item) dest_path = os.path.join(dest_dir, item) bbox_arr = np.loadtxt(src_path, dtype=np.int, delimiter='\t') np.savetxt(dest_path, bbox_arr, fmt='%d', delimiter=',') # make zip archive shutil.make_archive(src_dir, "zip", src_dir) shutil.make_archive(dest_dir, "zip", dest_dir) # Remove the original files shutil.rmtree(src_dir) shutil.rmtree(dest_dir) if __name__ == "__main__": parser = argparse.ArgumentParser(description='transform trackingnet results.') parser.add_argument('--tracker_name', type=str, help='Name of tracking method.') parser.add_argument('--cfg_name', type=str, help='Name of config file.') args = parser.parse_args() transform_trackingnet(args.tracker_name, args.cfg_name) ================================================ FILE: lib/train/__init__.py ================================================ from .admin.multigpu import MultiGPU ================================================ FILE: lib/train/_init_paths.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as osp import sys def add_path(path): if path not in sys.path: sys.path.insert(0, path) this_dir = osp.dirname(__file__) prj_path = osp.join(this_dir, '../..') add_path(prj_path) ================================================ FILE: lib/train/actors/__init__.py ================================================ from .base_actor import BaseActor from .artrack import ARTrackActor from .artrack_seq import ARTrackSeqActor from .artrackv2 import ARTrackV2Actor from .artrackv2_seq import ARTrackV2SeqActor ================================================ FILE: lib/train/actors/artrack.py ================================================ from . import BaseActor from lib.utils.misc import NestedTensor from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy import torch import math import numpy as np from lib.utils.merge import merge_template_search from ...utils.heapmap_utils import generate_heatmap from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) def generate_sa_simdr(joints): ''' :param joints: [num_joints, 3] :param joints_vis: [num_joints, 3] :return: target, target_weight(1: visible, 0: invisible) ''' num_joints = 48 image_size = [256, 256] simdr_split_ratio = 1.5625 sigma = 6 target_x1 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y1 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) target_x2 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y2 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32) tmp_size = sigma * 3 for joint_id in range(num_joints): mu_x1 = joints[joint_id][0] mu_y1 = joints[joint_id][1] mu_x2 = joints[joint_id][2] mu_y2 = joints[joint_id][3] x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) return target_x1, target_y1, target_x2, target_y2 # angle cost def SIoU_loss(test1, test2, theta=4): eps = 1e-7 cx_pred = (test1[:, 0] + test1[:, 2]) / 2 cy_pred = (test1[:, 1] + test1[:, 3]) / 2 cx_gt = (test2[:, 0] + test2[:, 2]) / 2 cy_gt = (test2[:, 1] + test2[:, 3]) / 2 dist = ((cx_pred - cx_gt)**2 + (cy_pred - cy_gt)**2) ** 0.5 ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred) x = ch / (dist + eps) angle = 1 - 2*torch.sin(torch.arcsin(x)-torch.pi/4)**2 # distance cost xmin = torch.min(test1[:, 0], test2[:, 0]) xmax = torch.max(test1[:, 2], test2[:, 2]) ymin = torch.min(test1[:, 1], test2[:, 1]) ymax = torch.max(test1[:, 3], test2[:, 3]) cw = xmax - xmin ch = ymax - ymin px = ((cx_gt - cx_pred) / (cw+eps))**2 py = ((cy_gt - cy_pred) / (ch+eps))**2 gama = 2 - angle dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py)) #shape cost w_pred = test1[:, 2] - test1[:, 0] h_pred = test1[:, 3] - test1[:, 1] w_gt = test2[:, 2] - test2[:, 0] h_gt = test2[:, 3] - test2[:, 1] ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps) wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps) omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta #IoU loss lt = torch.max(test1[..., :2], test2[..., :2]) # [B, rows, 2] rb = torch.min(test1[..., 2:], test2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] area1 = (test1[..., 2] - test1[..., 0]) * ( test1[..., 3] - test1[..., 1]) area2 = (test2[..., 2] - test2[..., 0]) * ( test2[..., 3] - test2[..., 1]) iou = overlap / (area1 + area2 - overlap) SIoU = 1 - iou + (omega + dis) / 2 return SIoU, iou def ciou(pred, target, eps=1e-7): # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right factor = 4 / math.pi**2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) # CIoU cious = ious - (rho2 / c2 + v**2 / (1 - ious + v)) return cious, ious class ARTrackActor(BaseActor): """ Actor for training ARTrack models """ def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None): super().__init__(net, objective) self.loss_weight = loss_weight self.settings = settings self.bs = self.settings.batchsize # batch size self.cfg = cfg self.bins = bins self.range = self.cfg.MODEL.RANGE self.search_size = search_size self.logsoftmax = torch.nn.LogSoftmax(dim=1) self.focal = None self.loss_weight['KL'] = 100 self.loss_weight['focal'] = 2 def __call__(self, data): """ args: data - The input data, should contain the fields 'template', 'search', 'gt_bbox'. template_images: (N_t, batch, 3, H, W) search_images: (N_s, batch, 3, H, W) returns: loss - the training loss status - dict containing detailed losses """ # forward pass out_dict = self.forward_pass(data) # compute losses loss, status = self.compute_losses(out_dict, data) return loss, status def forward_pass(self, data): # currently only support 1 template and 1 search region assert len(data['template_images']) == 1 assert len(data['search_images']) == 1 template_list = [] for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) template_list.append(template_img_i) search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) if len(template_list) == 1: template_list = template_list[0] gt_bbox = data['search_anno'][-1] begin = self.bins * self.range end = self.bins * self.range + 1 magic_num = (self.range - 1) * 0.5 gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2] gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3] gt_bbox = gt_bbox.clamp(min=(-1*magic_num), max=(1+magic_num)) data['real_bbox'] = gt_bbox seq_ori = (gt_bbox + magic_num) * (self.bins - 1) seq_ori = seq_ori.int().to(search_img) B = seq_ori.shape[0] seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1) seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1) data['seq_input'] = seq_input data['seq_output'] = seq_output out_dict = self.net(template=template_list, search=search_img, seq_input=seq_input) return out_dict def compute_losses(self, pred_dict, gt_dict, return_status=True): bins = self.bins magic_num = (self.range - 1) * 0.5 seq_output = gt_dict['seq_output'] pred_feat = pred_dict["feat"] if self.focal == None: weight = torch.ones(bins*self.range+2) * 1 weight[bins*self.range+1] = 0.1 weight[bins*self.range] = 0.1 weight.to(pred_feat) self.klloss = torch.nn.KLDivLoss(reduction='none').to(pred_feat) self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat) # compute varfifocal loss pred = pred_feat.permute(1, 0, 2).reshape(-1, bins*2+2) target = seq_output.reshape(-1).to(torch.int64) varifocal_loss = self.focal(pred, target) # compute giou and L1 loss beta = 1 pred = pred_feat[0:4, :, 0:bins*self.range] * beta target = seq_output[:, 0:4].to(pred_feat) out = pred.softmax(-1).to(pred) mul = torch.range((-1*magic_num+1/(self.bins*self.range)), (1+magic_num-1/(self.bins*self.range)), 2/(self.bins*self.range)).to(pred) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) target = target / (bins - 1) - magic_num extra_seq = ans extra_seq = extra_seq.to(pred) sious, iou = SIoU_loss(extra_seq, target, 4) sious = sious.mean() siou_loss = sious l1_loss = self.objective['l1'](extra_seq, target) loss = self.loss_weight['giou'] * siou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight['focal'] * varifocal_loss if return_status: # status for log mean_iou = iou.detach().mean() status = {"Loss/total": loss.item(), "Loss/giou": siou_loss.item(), "Loss/l1": l1_loss.item(), "Loss/location": varifocal_loss.item(), "IoU": mean_iou.item()} return loss, status else: return loss ================================================ FILE: lib/train/actors/artrack_seq.py ================================================ from . import BaseActor from lib.utils.misc import NestedTensor from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy import torch import math import numpy as np import numpy import cv2 import torch.nn.functional as F import torchvision.transforms.functional as tvisf import lib.train.data.bounding_box_utils as bbutils from lib.utils.merge import merge_template_search from torch.distributions.categorical import Categorical from ...utils.heapmap_utils import generate_heatmap from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate def IoU(rect1, rect2): """ caculate interection over union Args: rect1: (x1, y1, x2, y2) rect2: (x1, y1, x2, y2) Returns: iou """ # overlap x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3] tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3] xx1 = np.maximum(tx1, x1) yy1 = np.maximum(ty1, y1) xx2 = np.minimum(tx2, x2) yy2 = np.minimum(ty2, y2) ww = np.maximum(0, xx2 - xx1) hh = np.maximum(0, yy2 - yy1) area = (x2 - x1) * (y2 - y1) target_a = (tx2 - tx1) * (ty2 - ty1) inter = ww * hh iou = inter / (area + target_a - inter) return iou def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) def generate_sa_simdr(joints): ''' :param joints: [num_joints, 3] :param joints_vis: [num_joints, 3] :return: target, target_weight(1: visible, 0: invisible) ''' num_joints = 48 image_size = [256, 256] simdr_split_ratio = 1.5625 sigma = 6 target_x1 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y1 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) target_x2 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y2 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32) tmp_size = sigma * 3 for joint_id in range(num_joints): mu_x1 = joints[joint_id][0] mu_y1 = joints[joint_id][1] mu_x2 = joints[joint_id][2] mu_y2 = joints[joint_id][3] x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) return target_x1, target_y1, target_x2, target_y2 # angle cost def SIoU_loss(test1, test2, theta=4): eps = 1e-7 cx_pred = (test1[:, 0] + test1[:, 2]) / 2 cy_pred = (test1[:, 1] + test1[:, 3]) / 2 cx_gt = (test2[:, 0] + test2[:, 2]) / 2 cy_gt = (test2[:, 1] + test2[:, 3]) / 2 dist = ((cx_pred - cx_gt) ** 2 + (cy_pred - cy_gt) ** 2) ** 0.5 ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred) x = ch / (dist + eps) angle = 1 - 2 * torch.sin(torch.arcsin(x) - torch.pi / 4) ** 2 # distance cost xmin = torch.min(test1[:, 0], test2[:, 0]) xmax = torch.max(test1[:, 2], test2[:, 2]) ymin = torch.min(test1[:, 1], test2[:, 1]) ymax = torch.max(test1[:, 3], test2[:, 3]) cw = xmax - xmin ch = ymax - ymin px = ((cx_gt - cx_pred) / (cw + eps)) ** 2 py = ((cy_gt - cy_pred) / (ch + eps)) ** 2 gama = 2 - angle dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py)) # shape cost w_pred = test1[:, 2] - test1[:, 0] h_pred = test1[:, 3] - test1[:, 1] w_gt = test2[:, 2] - test2[:, 0] h_gt = test2[:, 3] - test2[:, 1] ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps) wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps) omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta # IoU loss lt = torch.max(test1[..., :2], test2[..., :2]) # [B, rows, 2] rb = torch.min(test1[..., 2:], test2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] area1 = (test1[..., 2] - test1[..., 0]) * ( test1[..., 3] - test1[..., 1]) area2 = (test2[..., 2] - test2[..., 0]) * ( test2[..., 3] - test2[..., 1]) iou = overlap / (area1 + area2 - overlap) SIoU = 1 - iou + (omega + dis) / 2 return SIoU, iou def ciou(pred, target, eps=1e-7): # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw ** 2 + ch ** 2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4 rho2 = left + right factor = 4 / math.pi ** 2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) # CIoU cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v)) return cious, ious class ARTrackSeqActor(BaseActor): """ Actor for training OSTrack models """ def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None): super().__init__(net, objective) self.loss_weight = loss_weight self.settings = settings self.bs = self.settings.batchsize # batch size self.cfg = cfg self.bins = bins self.search_size = search_size self.logsoftmax = torch.nn.LogSoftmax(dim=1) self.focal = None self.range = cfg.MODEL.RANGE self.pre_num = cfg.MODEL.PRENUM self.loss_weight['KL'] = 0 self.loss_weight['focal'] = 0 self.pre_bbox = None self.x_feat_rem = None self.update_rem = None def __call__(self, data): """ args: data - The input data, should contain the fields 'template', 'search', 'gt_bbox'. template_images: (N_t, batch, 3, H, W) search_images: (N_s, batch, 3, H, W) returns: loss - the training loss status - dict containing detailed losses """ # forward pass out_dict = self.forward_pass(data) # compute losses loss, status = self.compute_losses(out_dict, data) return loss, status def _bbox_clip(self, cx, cy, width, height, boundary): cx = max(0, min(cx, boundary[1])) cy = max(0, min(cy, boundary[0])) width = max(10, min(width, boundary[1])) height = max(10, min(height, boundary[0])) return cx, cy, width, height def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans): """ args: im: bgr based image pos: center position model_sz: exemplar size s_z: original size avg_chans: channel average """ if isinstance(pos, float): pos = [pos, pos] sz = original_sz im_sz = im.shape c = (original_sz + 1) / 2 # context_xmin = round(pos[0] - c) # py2 and py3 round context_xmin = np.floor(pos[0] - c + 0.5) context_xmax = context_xmin + sz - 1 # context_ymin = round(pos[1] - c) context_ymin = np.floor(pos[1] - c + 0.5) context_ymax = context_ymin + sz - 1 left_pad = int(max(0., -context_xmin)) top_pad = int(max(0., -context_ymin)) right_pad = int(max(0., context_xmax - im_sz[1] + 1)) bottom_pad = int(max(0., context_ymax - im_sz[0] + 1)) context_xmin = context_xmin + left_pad context_xmax = context_xmax + left_pad context_ymin = context_ymin + top_pad context_ymax = context_ymax + top_pad r, c, k = im.shape if any([top_pad, bottom_pad, left_pad, right_pad]): size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k) te_im = np.zeros(size, np.uint8) te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im if top_pad: te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans if bottom_pad: te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans if left_pad: te_im[:, 0:left_pad, :] = avg_chans if right_pad: te_im[:, c + left_pad:, :] = avg_chans im_patch = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] else: im_patch = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] if not np.array_equal(model_sz, original_sz): try: im_patch = cv2.resize(im_patch, (model_sz, model_sz)) except: return None im_patch = im_patch.transpose(2, 0, 1) im_patch = im_patch[np.newaxis, :, :, :] im_patch = im_patch.astype(np.float32) im_patch = torch.from_numpy(im_patch) im_patch = im_patch.cuda() return im_patch def batch_init(self, images, template_bbox, initial_bbox) -> dict: self.frame_num = 1 self.device = 'cuda' # Convert bbox (x1, y1, w, h) -> (cx, cy, w, h) template_bbox = bbutils.batch_xywh2center2(template_bbox) # ndarray:(2*num_seq,4) initial_bbox = bbutils.batch_xywh2center2(initial_bbox) # ndarray:(2*num_seq,4) self.center_pos = initial_bbox[:, :2] # ndarray:(2*num_seq,2) self.size = initial_bbox[:, 2:] # ndarray:(2*num_seq,2) self.pre_bbox = initial_bbox for i in range(self.pre_num - 1): self.pre_bbox = numpy.concatenate((self.pre_bbox, initial_bbox), axis=1) # print(self.pre_bbox.shape) template_factor = self.cfg.DATA.TEMPLATE.FACTOR w_z = template_bbox[:, 2] * template_factor # ndarray:(2*num_seq) h_z = template_bbox[:, 3] * template_factor # ndarray:(2*num_seq) s_z = np.ceil(np.sqrt(w_z * h_z)) # ndarray:(2*num_seq) self.channel_average = [] for img in images: self.channel_average.append(np.mean(img, axis=(0, 1))) self.channel_average = np.array(self.channel_average) # ndarray:(2*num_seq,3) # get crop z_crop_list = [] for i in range(len(images)): here_crop = self.get_subwindow(images[i], template_bbox[i, :2], self.cfg.DATA.TEMPLATE.SIZE, s_z[i], self.channel_average[i]) z_crop = here_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0) self.mean = [0.485, 0.456, 0.406] self.std = [0.229, 0.224, 0.225] self.inplace = False z_crop[0] = tvisf.normalize(z_crop[0], self.mean, self.std, self.inplace) z_crop_list.append(z_crop.clone()) z_crop = torch.cat(z_crop_list, dim=0) # Tensor(2*num_seq,3,128,128) self.update_rem = None out = {'template_images': z_crop} return out def batch_track(self, img, gt_boxes, template, action_mode='max') -> dict: search_factor = self.cfg.DATA.SEARCH.FACTOR w_x = self.size[:, 0] * search_factor h_x = self.size[:, 1] * search_factor s_x = np.ceil(np.sqrt(w_x * h_x)) gt_boxes_corner = bbutils.batch_xywh2corner(gt_boxes) # ndarray:(2*num_seq,4) x_crop_list = [] gt_in_crop_list = [] pre_seq_list = [] pre_seq_in_list = [] x_feat_list = [] magic_num = (self.range - 1) * 0.5 for i in range(len(img)): channel_avg = np.mean(img[i], axis=(0, 1)) x_crop = self.get_subwindow(img[i], self.center_pos[i], self.cfg.DATA.SEARCH.SIZE, round(s_x[i]), channel_avg) if x_crop == None: return None for q in range(self.pre_num): pre_seq_temp = bbutils.batch_center2corner(self.pre_bbox[:, 0 + 4 * q:4 + 4 * q]) if q == 0: pre_seq = pre_seq_temp else: pre_seq = numpy.concatenate((pre_seq, pre_seq_temp), axis=1) if gt_boxes_corner is not None and np.sum(np.abs(gt_boxes_corner[i] - np.zeros(4))) > 10: pre_in = np.zeros(4 * self.pre_num) for w in range(self.pre_num): pre_in[0 + w * 4:2 + w * 4] = pre_seq[i, 0 + w * 4:2 + w * 4] - self.center_pos[i] pre_in[2 + w * 4:4 + w * 4] = pre_seq[i, 2 + w * 4:4 + w * 4] - self.center_pos[i] pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] * ( self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] / self.cfg.DATA.SEARCH.SIZE pre_seq_list.append(pre_in) gt_in_crop = np.zeros(4) gt_in_crop[:2] = gt_boxes_corner[i, :2] - self.center_pos[i] gt_in_crop[2:] = gt_boxes_corner[i, 2:] - self.center_pos[i] gt_in_crop = gt_in_crop * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 gt_in_crop[2:] = gt_in_crop[2:] - gt_in_crop[:2] # (x1,y1,x2,y2) to (x1,y1,w,h) gt_in_crop_list.append(gt_in_crop) else: pre_in = np.zeros(4 * self.pre_num) pre_seq_list.append(pre_in) gt_in_crop_list.append(np.zeros(4)) pre_seq_input = torch.from_numpy(pre_in).clamp(-1 * magic_num, 1 + magic_num) pre_seq_input = (pre_seq_input + 0.5) * (self.bins - 1) pre_seq_in_list.append(pre_seq_input.clone()) x_crop = x_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0) x_crop[0] = tvisf.normalize(x_crop[0], self.mean, self.std, self.inplace) x_crop_list.append(x_crop.clone()) x_crop = torch.cat(x_crop_list, dim=0) pre_seq_output = torch.cat(pre_seq_in_list, dim=0).reshape(-1, 4 * self.pre_num) outputs = self.net(template, x_crop, seq_input=pre_seq_output, head_type=None, stage="batch_track", search_feature=self.x_feat_rem, update=None) selected_indices = outputs['seqs'].detach() x_feat = outputs['x_feat'].detach().cpu() self.x_feat_rem = x_feat.clone() x_feat_list.append(x_feat.clone()) pred_bbox = selected_indices[:, 0:4].data.cpu().numpy() bbox = (pred_bbox / (self.bins - 1) - magic_num) * s_x.reshape(-1, 1) cx = bbox[:, 0] + self.center_pos[:, 0] - s_x / 2 cy = bbox[:, 1] + self.center_pos[:, 1] - s_x / 2 width = bbox[:, 2] - bbox[:, 0] height = bbox[:, 3] - bbox[:, 1] cx = cx + width / 2 cy = cy + height / 2 for i in range(len(img)): cx[i], cy[i], width[i], height[i] = self._bbox_clip(cx[i], cy[i], width[i], height[i], img[i].shape[:2]) self.center_pos = np.stack([cx, cy], 1) self.size = np.stack([width, height], 1) for e in range(self.pre_num): if e != self.pre_num - 1: self.pre_bbox[:, 0 + e * 4:4 + e * 4] = self.pre_bbox[:, 4 + e * 4:8 + e * 4] else: self.pre_bbox[:, 0 + e * 4:4 + e * 4] = numpy.stack([cx, cy, width, height], 1) bbox = np.stack([cx - width / 2, cy - height / 2, width, height], 1) out = { 'search_images': x_crop, 'pred_bboxes': bbox, 'selected_indices': selected_indices.cpu(), 'gt_in_crop': torch.tensor(np.stack(gt_in_crop_list, axis=0), dtype=torch.float), 'pre_seq': torch.tensor(np.stack(pre_seq_list, axis=0), dtype=torch.float), 'x_feat': torch.tensor([item.cpu().detach().numpy() for item in x_feat_list], dtype=torch.float), } return out def explore(self, data): results = {} search_images_list = [] search_anno_list = [] iou_list = [] pre_seq_list = [] x_feat_list = [] num_frames = data['num_frames'] images = data['search_images'] gt_bbox = data['search_annos'] template = data['template_images'] template_bbox = data['template_annos'] template = template template_bbox = template_bbox template_bbox = np.array(template_bbox) num_seq = len(num_frames) for idx in range(np.max(num_frames)): here_images = [img[idx] for img in images] # S, N here_gt_bbox = np.array([gt[idx] for gt in gt_bbox]) here_images = here_images here_gt_bbox = np.concatenate([here_gt_bbox], 0) if idx == 0: outputs_template = self.batch_init(template, template_bbox, here_gt_bbox) results['template_images'] = outputs_template['template_images'] else: outputs = self.batch_track(here_images, here_gt_bbox, outputs_template['template_images'], action_mode='half') if outputs == None: return None x_feat = outputs['x_feat'] pred_bbox = outputs['pred_bboxes'] search_images_list.append(outputs['search_images']) search_anno_list.append(outputs['gt_in_crop']) if len(outputs['pre_seq']) != 8: print(outputs['pre_seq']) print(len(outputs['pre_seq'])) print(idx) print(data['num_frames']) print(data['search_annos']) return None pre_seq_list.append(outputs['pre_seq']) pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox) gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox) here_iou = [] for i in range(num_seq): bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i]) here_iou.append(bbox_iou) iou_list.append(here_iou) x_feat_list.append(x_feat.clone()) results['x_feat'] = torch.cat([torch.stack(x_feat_list)], dim=2) results['search_images'] = torch.cat([torch.stack(search_images_list)], dim=1) results['search_anno'] = torch.cat([torch.stack(search_anno_list)], dim=1) results['pre_seq'] = torch.cat([torch.stack(pre_seq_list)], dim=1) iou_tensor = torch.tensor(iou_list, dtype=torch.float) results['baseline_iou'] = torch.cat([iou_tensor[:, :num_seq]], dim=1) return results def forward_pass(self, data): # currently only support 1 template and 1 search region assert len(data['template_images']) == 1 assert len(data['search_images']) == 1 template_list = [] for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) template_list.append(template_img_i) search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) box_mask_z = None ce_keep_rate = None if self.cfg.MODEL.BACKBONE.CE_LOC: box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device, data['template_anno'][0]) ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch, total_epochs=ce_start_epoch + ce_warm_epoch, ITERS_PER_EPOCH=1, base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0]) if len(template_list) == 1: template_list = template_list[0] gt_bbox = data['search_anno'][-1] begin = self.bins end = self.bins + 1 gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2] gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3] gt_bbox = gt_bbox.clamp(min=0.5, max=1.5) data['real_bbox'] = gt_bbox seq_ori = gt_bbox * (self.bins - 1) seq_ori = seq_ori.int().to(search_img) B = seq_ori.shape[0] seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1) seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1) data['seq_input'] = seq_input data['seq_output'] = seq_output out_dict = self.net(template=template_list, search=search_img, ce_template_mask=box_mask_z, ce_keep_rate=ce_keep_rate, return_last_attn=False, seq_input=seq_input) return out_dict def compute_sequence_losses(self, data): num_frames = data['search_images'].shape[0] template_images = data['template_images'].repeat(num_frames, 1, 1, 1, 1) template_images = template_images.view(-1, *template_images.size()[2:]) search_images = data['search_images'].reshape(-1, *data['search_images'].size()[2:]) search_anno = data['search_anno'].reshape(-1, *data['search_anno'].size()[2:]) magic_num = (self.range - 1) * 0.5 self.loss_weight['focal'] = 0 pre_seq = data['pre_seq'].reshape(-1, 4 * self.pre_num) x_feat = data['x_feat'].reshape(-1, *data['x_feat'].size()[2:]) pre_seq = pre_seq.clamp(-1 * magic_num, 1 + magic_num) pre_seq = (pre_seq + magic_num) * (self.bins - 1) outputs = self.net(template_images, search_images, seq_input=pre_seq, stage="forward_pass", search_feature=x_feat, update=None) pred_feat = outputs["feat"] # generate labels if self.focal == None: weight = torch.ones(self.bins * self.range + 2) * 1 weight[self.bins * self.range + 1] = 0.1 weight[self.bins * self.range] = 0.1 weight.to(pred_feat) self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat) search_anno[:, 2] = search_anno[:, 2] + search_anno[:, 0] search_anno[:, 3] = search_anno[:, 3] + search_anno[:, 1] target = (search_anno / self.cfg.DATA.SEARCH.SIZE + 0.5) * (self.bins - 1) target = target.clamp(min=0.0, max=(self.bins * self.range - 0.0001)) target_iou = target target = torch.cat([target], dim=1) target = target.reshape(-1).to(torch.int64) pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins * self.range + 2) varifocal_loss = self.focal(pred, target) pred = pred_feat[0:4, :, 0:self.bins * self.range] target = target_iou[:, 0:4].to(pred_feat) / (self.bins - 1) - magic_num out = pred.softmax(-1).to(pred) mul = torch.range(-1 * magic_num + 1 / (self.bins * self.range), 1 + magic_num - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) extra_seq = ans extra_seq = extra_seq.to(pred) cious, iou = SIoU_loss(extra_seq, target, 4) cious = cious.mean() giou_loss = cious loss_bb = self.loss_weight['giou'] * giou_loss + self.loss_weight[ 'focal'] * varifocal_loss total_losses = loss_bb mean_iou = iou.detach().mean() status = {"Loss/total": total_losses.item(), "Loss/giou": giou_loss.item(), "Loss/location": varifocal_loss.item(), "IoU": mean_iou.item()} return total_losses, status ================================================ FILE: lib/train/actors/artrackv2.py ================================================ from . import BaseActor from copy import deepcopy from lib.utils.misc import NestedTensor from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy import torch import math import numpy as np from lib.utils.merge import merge_template_search from ...utils.heapmap_utils import generate_heatmap from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) def generate_sa_simdr(joints): ''' :param joints: [num_joints, 3] :param joints_vis: [num_joints, 3] :return: target, target_weight(1: visible, 0: invisible) ''' num_joints = 48 image_size = [256, 256] simdr_split_ratio = 1.5625 sigma = 6 target_x1 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y1 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) target_x2 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y2 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32) tmp_size = sigma * 3 for joint_id in range(num_joints): mu_x1 = joints[joint_id][0] mu_y1 = joints[joint_id][1] mu_x2 = joints[joint_id][2] mu_y2 = joints[joint_id][3] x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) return target_x1, target_y1, target_x2, target_y2 # angle cost def SIoU_loss(test1, test2, theta=4): eps = 1e-7 cx_pred = (test1[:, 0] + test1[:, 2]) / 2 cy_pred = (test1[:, 1] + test1[:, 3]) / 2 cx_gt = (test2[:, 0] + test2[:, 2]) / 2 cy_gt = (test2[:, 1] + test2[:, 3]) / 2 dist = ((cx_pred - cx_gt) ** 2 + (cy_pred - cy_gt) ** 2) ** 0.5 ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred) x = ch / (dist + eps) angle = 1 - 2 * torch.sin(torch.arcsin(x) - torch.pi / 4) ** 2 # distance cost xmin = torch.min(test1[:, 0], test2[:, 0]) xmax = torch.max(test1[:, 2], test2[:, 2]) ymin = torch.min(test1[:, 1], test2[:, 1]) ymax = torch.max(test1[:, 3], test2[:, 3]) cw = xmax - xmin ch = ymax - ymin px = ((cx_gt - cx_pred) / (cw + eps)) ** 2 py = ((cy_gt - cy_pred) / (ch + eps)) ** 2 gama = 2 - angle dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py)) # shape cost w_pred = test1[:, 2] - test1[:, 0] h_pred = test1[:, 3] - test1[:, 1] w_gt = test2[:, 2] - test2[:, 0] h_gt = test2[:, 3] - test2[:, 1] ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps) wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps) omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta # IoU loss lt = torch.max(test1[..., :2], test2[..., :2]) # [B, rows, 2] rb = torch.min(test1[..., 2:], test2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] area1 = (test1[..., 2] - test1[..., 0]) * ( test1[..., 3] - test1[..., 1]) area2 = (test2[..., 2] - test2[..., 0]) * ( test2[..., 3] - test2[..., 1]) iou = overlap / (area1 + area2 - overlap) SIoU = 1 - iou + (omega + dis) / 2 return SIoU, iou def ciou(pred, target, eps=1e-7): # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw ** 2 + ch ** 2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4 rho2 = left + right factor = 4 / math.pi ** 2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) # CIoU cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v)) return cious, ious class ARTrackV2Actor(BaseActor): """ Actor for training OSTrack models """ def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None): super().__init__(net, objective) self.loss_weight = loss_weight self.settings = settings self.bs = self.settings.batchsize # batch size self.cfg = cfg self.bins = bins self.search_size = search_size self.logsoftmax = torch.nn.LogSoftmax(dim=1) self.focal = None self.range = self.cfg.MODEL.RANGE self.loss_weight['KL'] = 100 self.loss_weight['focal'] = 2 self.loss_weight['renew'] = 0.3 def __call__(self, data): """ args: data - The input data, should contain the fields 'template', 'search', 'gt_bbox'. template_images: (N_t, batch, 3, H, W) search_images: (N_s, batch, 3, H, W) returns: loss - the training loss status - dict containing detailed losses """ # forward pass out_dict = self.forward_pass(data) # compute losses loss, status = self.compute_losses(out_dict, data) return loss, status def forward_pass(self, data): # currently only support 1 template and 1 search region assert len(data['template_images']) == 2 assert len(data['search_images']) == 1 # print(data['dataset']) template_list = [] for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) # template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128) template_list.append(template_img_i) search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) target_in_search_img = data['target_in_search_images'][0].view(-1, *data['target_in_search_images'].shape[ 2:]) # (batch, 3, 320, 320) gt_bboxes = deepcopy(data['search_anno']) # search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320) box_mask_z = None ce_keep_rate = None if self.cfg.MODEL.BACKBONE.CE_LOC: box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device, data['template_anno'][0]) ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch, total_epochs=ce_start_epoch + ce_warm_epoch, ITERS_PER_EPOCH=1, base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0]) if len(template_list) == 1: template_list = template_list[0] gt_bbox = data['search_anno'][-1] x0 = self.bins * self.range y0 = self.bins * self.range + 1 x1 = self.bins * self.range + 2 y1 = self.bins * self.range + 3 score = self.bins * self.range + 5 end = self.bins * self.range + 4 gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2] gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3] gt_bbox = gt_bbox.clamp(min=(-0.5 * self.range + 0.5), max=(0.5 + self.range * 0.5)) data['real_bbox'] = gt_bbox seq_ori = (gt_bbox + (self.range * 0.5 - 0.5)) * (self.bins - 1) seq_ori = seq_ori.int().to(search_img) B = seq_ori.shape[0] seq_ori_4_4 = seq_ori[:, 0:3] seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * x0, torch.ones((B, 1)).to(search_img) * y0, torch.ones((B, 1)).to(search_img) * x1, torch.ones((B, 1)).to(search_img) * y1, torch.ones((B, 1)).to(search_img) * score], dim=1) seq_output = torch.cat([seq_ori], dim=1) data['seq_input'] = seq_input data['seq_output'] = seq_output out_dict = self.net(template=template_list, search=search_img, ce_template_mask=box_mask_z, ce_keep_rate=ce_keep_rate, return_last_attn=False, seq_input=seq_input, target_in_search_img=target_in_search_img, gt_bboxes=gt_bboxes[-1]) return out_dict def compute_losses(self, pred_dict, gt_dict, return_status=True): # gt gaussian map bins = self.bins gt_bbox = gt_dict['search_anno'][-1] # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4) real_bbox = gt_dict['real_bbox'] seq_output = gt_dict['seq_output'] pred_feat = pred_dict["feat"] if self.focal == None: weight = torch.ones(bins * self.range + 6) * 1 weight[bins * self.range + 4] = 0.1 weight[bins * self.range + 3] = 0.1 weight[bins * self.range + 2] = 0.1 weight[bins * self.range + 1] = 0.1 weight[bins * self.range] = 0.1 weight.to(pred_feat) self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat) pred = pred_feat.permute(1, 0, 2).reshape(-1, bins * self.range + 6) target = seq_output.reshape(-1).to(torch.int64) varifocal_loss = self.focal(pred, target) beta = 1 pred = pred_feat[0:4, :, 0:bins * self.range] * beta target = seq_output[:, 0:4].to(pred_feat) target_box = seq_output[:, 0:4].cpu().numpy() out = pred.softmax(-1).to(pred) mul = torch.range((self.range * 0.5 * -1 + 0.5) + 1 / (self.bins * self.range), (0.5 + self.range * 0.5) - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) target = target / (bins - 1) - (self.range * 0.5 - 0.5) extra_seq = ans extra_seq = extra_seq.to(pred) cious, iou = SIoU_loss(extra_seq, target, 4) cious = cious.mean() giou_loss = cious l1_loss = self.objective['l1'](extra_seq, target) score = pred_dict["score"] score_loss = self.objective['l1'](score, iou) loss = self.loss_weight['giou'] * giou_loss + self.loss_weight[ 'focal'] * varifocal_loss + self.loss_weight['score'] * score_loss if return_status: # status for log mean_iou = iou.detach().mean() status = {"Loss/total": loss.item(), "Loss/score": score_loss.item(), "Loss/giou": giou_loss.item(), "Loss/l1": l1_loss.item(), "Loss/location": varifocal_loss.item(), "IoU": mean_iou.item()} return loss, status else: return loss ================================================ FILE: lib/train/actors/artrackv2_seq.py ================================================ from . import BaseActor from lib.utils.misc import NestedTensor from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy import torch import math import numpy as np import numpy import cv2 import torch.nn.functional as F import torchvision.transforms.functional as tvisf import lib.train.data.bounding_box_utils as bbutils from lib.utils.merge import merge_template_search from torch.distributions.categorical import Categorical from ...utils.heapmap_utils import generate_heatmap from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate def IoU(rect1, rect2): """ caculate interection over union Args: rect1: (x1, y1, x2, y2) rect2: (x1, y1, x2, y2) Returns: iou """ # overlap x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3] tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3] xx1 = np.maximum(tx1, x1) yy1 = np.maximum(ty1, y1) xx2 = np.minimum(tx2, x2) yy2 = np.minimum(ty2, y2) ww = np.maximum(0, xx2 - xx1) hh = np.maximum(0, yy2 - yy1) area = (x2 - x1) * (y2 - y1) target_a = (tx2 - tx1) * (ty2 - ty1) inter = ww * hh iou = inter / (area + target_a - inter) return iou def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) def generate_sa_simdr(joints): ''' :param joints: [num_joints, 3] :param joints_vis: [num_joints, 3] :return: target, target_weight(1: visible, 0: invisible) ''' num_joints = 48 image_size = [256, 256] simdr_split_ratio = 1.5625 sigma = 6 target_x1 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y1 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) target_x2 = np.zeros((num_joints, int(image_size[0] * simdr_split_ratio)), dtype=np.float32) target_y2 = np.zeros((num_joints, int(image_size[1] * simdr_split_ratio)), dtype=np.float32) zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32) tmp_size = sigma * 3 for joint_id in range(num_joints): mu_x1 = joints[joint_id][0] mu_y1 = joints[joint_id][1] mu_x2 = joints[joint_id][2] mu_y2 = joints[joint_id][3] x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32) y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32) target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / ( sigma * np.sqrt(np.pi * 2)) return target_x1, target_y1, target_x2, target_y2 # angle cost def SIoU_loss(test1, test2, theta=4): eps = 1e-7 cx_pred = (test1[:, 0] + test1[:, 2]) / 2 cy_pred = (test1[:, 1] + test1[:, 3]) / 2 cx_gt = (test2[:, 0] + test2[:, 2]) / 2 cy_gt = (test2[:, 1] + test2[:, 3]) / 2 dist = ((cx_pred - cx_gt) ** 2 + (cy_pred - cy_gt) ** 2) ** 0.5 ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred) x = ch / (dist + eps) angle = 1 - 2 * torch.sin(torch.arcsin(x) - torch.pi / 4) ** 2 # distance cost xmin = torch.min(test1[:, 0], test2[:, 0]) xmax = torch.max(test1[:, 2], test2[:, 2]) ymin = torch.min(test1[:, 1], test2[:, 1]) ymax = torch.max(test1[:, 3], test2[:, 3]) cw = xmax - xmin ch = ymax - ymin px = ((cx_gt - cx_pred) / (cw + eps)) ** 2 py = ((cy_gt - cy_pred) / (ch + eps)) ** 2 gama = 2 - angle dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py)) # shape cost w_pred = test1[:, 2] - test1[:, 0] h_pred = test1[:, 3] - test1[:, 1] w_gt = test2[:, 2] - test2[:, 0] h_gt = test2[:, 3] - test2[:, 1] ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps) wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps) omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta # IoU loss lt = torch.max(test1[..., :2], test2[..., :2]) # [B, rows, 2] rb = torch.min(test1[..., 2:], test2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] area1 = (test1[..., 2] - test1[..., 0]) * ( test1[..., 3] - test1[..., 1]) area2 = (test2[..., 2] - test2[..., 0]) * ( test2[..., 3] - test2[..., 1]) iou = overlap / (area1 + area2 - overlap) SIoU = 1 - iou + (omega + dis) / 2 return SIoU, iou def ciou(pred, target, eps=1e-7): # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw ** 2 + ch ** 2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4 rho2 = left + right factor = 4 / math.pi ** 2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) # CIoU cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v)) return cious, ious class ARTrackV2SeqActor(BaseActor): """ Actor for training OSTrack models """ def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None): super().__init__(net, objective) self.loss_weight = loss_weight self.settings = settings self.bs = self.settings.batchsize # batch size self.cfg = cfg self.bins = bins self.search_size = search_size self.logsoftmax = torch.nn.LogSoftmax(dim=1) self.focal = None self.range = cfg.MODEL.RANGE self.loss_weight['KL'] = 0 self.loss_weight['focal'] = 0 self.pre_num = cfg.MODEL.PRENUM self.pre_bbox = None self.x_feat_rem = None def __call__(self, data): """ args: data - The input data, should contain the fields 'template', 'search', 'gt_bbox'. template_images: (N_t, batch, 3, H, W) search_images: (N_s, batch, 3, H, W) returns: loss - the training loss status - dict containing detailed losses """ # forward pass out_dict = self.forward_pass(data) # compute losses loss, status = self.compute_losses(out_dict, data) return loss, status def _bbox_clip(self, cx, cy, width, height, boundary): cx = max(0, min(cx, boundary[1])) cy = max(0, min(cy, boundary[0])) width = max(10, min(width, boundary[1])) height = max(10, min(height, boundary[0])) return cx, cy, width, height def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans): """ args: im: bgr based image pos: center position model_sz: exemplar size s_z: original size avg_chans: channel average """ if isinstance(pos, float): pos = [pos, pos] sz = original_sz im_sz = im.shape c = (original_sz + 1) / 2 context_xmin = np.floor(pos[0] - c + 0.5) context_xmax = context_xmin + sz - 1 context_ymin = np.floor(pos[1] - c + 0.5) context_ymax = context_ymin + sz - 1 left_pad = int(max(0., -context_xmin)) top_pad = int(max(0., -context_ymin)) right_pad = int(max(0., context_xmax - im_sz[1] + 1)) bottom_pad = int(max(0., context_ymax - im_sz[0] + 1)) context_xmin = context_xmin + left_pad context_xmax = context_xmax + left_pad context_ymin = context_ymin + top_pad context_ymax = context_ymax + top_pad r, c, k = im.shape if any([top_pad, bottom_pad, left_pad, right_pad]): size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k) te_im = np.zeros(size, np.uint8) te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im if top_pad: te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans if bottom_pad: te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans if left_pad: te_im[:, 0:left_pad, :] = avg_chans if right_pad: te_im[:, c + left_pad:, :] = avg_chans im_patch = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] else: im_patch = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :] if not np.array_equal(model_sz, original_sz): try: im_patch = cv2.resize(im_patch, (model_sz, model_sz)) except: return None im_patch = im_patch.transpose(2, 0, 1) im_patch = im_patch[np.newaxis, :, :, :] im_patch = im_patch.astype(np.float32) im_patch = torch.from_numpy(im_patch) im_patch = im_patch.cuda() return im_patch def batch_init(self, images, template_bbox, initial_bbox) -> dict: self.frame_num = 1 self.device = 'cuda' # Convert bbox (x1, y1, w, h) -> (cx, cy, w, h) template_bbox_1 = template_bbox[:, 0] template_bbox_2 = template_bbox[:, 1] template_bbox_1 = bbutils.batch_xywh2center2(template_bbox_1) # ndarray:(2*num_seq,4) template_bbox_2 = bbutils.batch_xywh2center2(template_bbox_2) # ndarray:(2*num_seq,4) initial_bbox = bbutils.batch_xywh2center2(initial_bbox) # ndarray:(2*num_seq,4) self.center_pos = initial_bbox[:, :2] # ndarray:(2*num_seq,2) self.size = initial_bbox[:, 2:] # ndarray:(2*num_seq,2) self.pre_bbox = initial_bbox for i in range(self.pre_num - 1): self.pre_bbox = numpy.concatenate((self.pre_bbox, initial_bbox), axis=1) template_factor = self.cfg.DATA.TEMPLATE.FACTOR w_z_1 = template_bbox_1[:, 2] * template_factor # ndarray:(2*num_seq) h_z_1 = template_bbox_1[:, 3] * template_factor # ndarray:(2*num_seq) s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1)) # ndarray:(2*num_seq) w_z_2 = template_bbox_2[:, 2] * template_factor # ndarray:(2*num_seq) h_z_2 = template_bbox_2[:, 3] * template_factor # ndarray:(2*num_seq) s_z_2 = np.ceil(np.sqrt(w_z_2 * h_z_2)) # ndarray:(2*num_seq) self.channel_average = [] for img in images: self.channel_average.append(np.mean(img[0], axis=(0, 1))) self.channel_average.append(np.mean(img[1], axis=(0, 1))) self.channel_average = np.array(self.channel_average) # ndarray:(2*num_seq,3) # get crop z_crop_list = [] z_1_list = [] z_2_list = [] for i in range(len(images)): here_crop_1 = self.get_subwindow(images[i][0], template_bbox_1[i, :2], self.cfg.DATA.TEMPLATE.SIZE, s_z_1[i], self.channel_average[2 * i]) here_crop_2 = self.get_subwindow(images[i][1], template_bbox_2[i, :2], self.cfg.DATA.TEMPLATE.SIZE, s_z_2[i], self.channel_average[2 * i + 1]) z_crop_1 = here_crop_1.float().mul(1.0 / 255.0).clamp(0.0, 1.0) z_crop_2 = here_crop_2.float().mul(1.0 / 255.0).clamp(0.0, 1.0) self.mean = [0.485, 0.456, 0.406] self.std = [0.229, 0.224, 0.225] self.inplace = False z_crop_1[0] = tvisf.normalize(z_crop_1[0], self.mean, self.std, self.inplace) z_crop_2[0] = tvisf.normalize(z_crop_2[0], self.mean, self.std, self.inplace) z_1_list.append(z_crop_1.unsqueeze(1).clone()) z_2_list.append(z_crop_2.unsqueeze(1).clone()) z_crop = torch.concat([z_crop_1.unsqueeze(1), z_crop_2.unsqueeze(1)], dim=1) z_crop_list.append(z_crop.clone()) z_crop = torch.cat(z_crop_list, dim=0) # Tensor(2*num_seq,3,128,128) z_1_crop = torch.cat(z_1_list, dim=0) z_2_crop = torch.cat(z_2_list, dim=0) model_to_access = getattr(self.net, 'module', self.net) z_2_crop = z_2_crop.squeeze(1).to(model_to_access.backbone.word_embeddings.weight) z_2_feat = model_to_access.backbone.patch_embed(z_2_crop) out = {'template_images': z_crop, "z_1": z_1_crop, "z_2": z_2_crop, "z_2_feat": z_2_feat} return out def batch_track(self, img, gt_boxes, template, dz_feat, action_mode='max') -> dict: search_factor = self.cfg.DATA.SEARCH.FACTOR w_x = self.size[:, 0] * search_factor h_x = self.size[:, 1] * search_factor s_x = np.ceil(np.sqrt(w_x * h_x)) gt_boxes_corner = bbutils.batch_xywh2corner(gt_boxes) # ndarray:(2*num_seq,4) initial_bbox = bbutils.batch_xywh2center2(gt_boxes) x_crop_list = [] gt_in_crop_list = [] pre_seq_list = [] pre_seq_in_list = [] x_feat_list = [] target_in_search_list = [] update_feat_list = [] for i in range(len(img)): template_factor = self.cfg.DATA.TEMPLATE.FACTOR w_z_1 = initial_bbox[:, 2] * template_factor # ndarray:(2*num_seq) h_z_1 = initial_bbox[:, 3] * template_factor # ndarray:(2*num_seq) s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1)) # ndarray:(2*num_seq) channel_avg = np.mean(img[i], axis=(0, 1)) target_in_search = self.get_subwindow(img[i], initial_bbox[i, :2], self.cfg.DATA.TEMPLATE.SIZE, round(s_z_1[i]), channel_avg) x_crop = self.get_subwindow(img[i], self.center_pos[i], self.cfg.DATA.SEARCH.SIZE, round(s_x[i]), channel_avg) if x_crop == None: return None if target_in_search == None: return None for q in range(self.pre_num): pre_seq_temp = bbutils.batch_center2corner(self.pre_bbox[:, 0 + 4 * q:4 + 4 * q]) if q == 0: pre_seq = pre_seq_temp else: pre_seq = numpy.concatenate((pre_seq, pre_seq_temp), axis=1) if gt_boxes_corner is not None and np.sum(np.abs(gt_boxes_corner[i] - np.zeros(4))) > 10: pre_in = np.zeros(4 * self.pre_num) for w in range(self.pre_num): pre_in[0 + w * 4:2 + w * 4] = pre_seq[i, 0 + w * 4:2 + w * 4] - self.center_pos[i] pre_in[2 + w * 4:4 + w * 4] = pre_seq[i, 2 + w * 4:4 + w * 4] - self.center_pos[i] pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] * ( self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] / self.cfg.DATA.SEARCH.SIZE pre_seq_list.append(pre_in) gt_in_crop = np.zeros(4) gt_in_crop[:2] = gt_boxes_corner[i, :2] - self.center_pos[i] gt_in_crop[2:] = gt_boxes_corner[i, 2:] - self.center_pos[i] gt_in_crop = gt_in_crop * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2 gt_in_crop[2:] = gt_in_crop[2:] - gt_in_crop[:2] # (x1,y1,x2,y2) to (x1,y1,w,h) gt_in_crop_list.append(gt_in_crop) else: pre_in = np.zeros(4 * self.pre_num) pre_seq_list.append(pre_in) gt_in_crop_list.append(np.zeros(4)) pre_seq_input = torch.from_numpy(pre_in).clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5) pre_seq_input = (pre_seq_input + (0.5 * self.range - 0.5)) * (self.bins - 1) pre_seq_in_list.append(pre_seq_input.clone()) x_crop = x_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0) target_in_search = target_in_search.float().mul(1.0 / 255.0).clamp(0.0, 1.0) rem_x = x_crop x_crop[0] = tvisf.normalize(x_crop[0], self.mean, self.std, self.inplace) target_in_search[0] = tvisf.normalize(target_in_search[0], self.mean, self.std, self.inplace) x_crop_list.append(x_crop.clone()) target_in_search_list.append(target_in_search.clone()) x_crop = torch.cat(x_crop_list, dim=0) target_in_search = torch.cat(target_in_search_list, dim=0) pre_seq_output = torch.cat(pre_seq_in_list, dim=0).reshape(-1, 4 * self.pre_num) pre = torch.zeros_like(pre_seq_output) outputs = self.net(template, dz_feat.cuda(), x_crop, seq_input=pre_seq_output, head_type=None, stage="batch_track", search_feature=self.x_feat_rem, target_in_search_img=target_in_search, gt_bboxes=gt_boxes[-1]) selected_indices = outputs['seqs'].detach() x_feat = outputs['x_feat'].detach().cpu() self.x_feat_rem = x_feat.clone() x_feat_list.append(x_feat.clone()) update_feat = outputs['dz_feat'].detach().cpu() update_feat_list.append(update_feat.clone()) pred_bbox = selected_indices[:, 0:4].data.cpu().numpy() bbox = (pred_bbox / (self.bins - 1) - (self.range * 0.5 - 0.5)) * s_x.reshape(-1, 1) cx = bbox[:, 0] + self.center_pos[:, 0] - s_x / 2 cy = bbox[:, 1] + self.center_pos[:, 1] - s_x / 2 width = bbox[:, 2] - bbox[:, 0] height = bbox[:, 3] - bbox[:, 1] cx = cx + width / 2 cy = cy + height / 2 for i in range(len(img)): cx[i], cy[i], width[i], height[i] = self._bbox_clip(cx[i], cy[i], width[i], height[i], img[i].shape[:2]) self.center_pos = np.stack([cx, cy], 1) self.size = np.stack([width, height], 1) for e in range(self.pre_num): if e != self.pre_num - 1: self.pre_bbox[:, 0 + e * 4:4 + e * 4] = self.pre_bbox[:, 4 + e * 4:8 + e * 4] else: self.pre_bbox[:, 0 + e * 4:4 + e * 4] = numpy.stack([cx, cy, width, height], 1) bbox = np.stack([cx - width / 2, cy - height / 2, width, height], 1) out = { 'dz_feat': update_feat, 'search_images': x_crop, 'target_in_search': target_in_search, 'pred_bboxes': bbox, 'selected_indices': selected_indices.cpu(), 'gt_in_crop': torch.tensor(np.stack(gt_in_crop_list, axis=0), dtype=torch.float), 'pre_seq': torch.tensor(np.stack(pre_seq_list, axis=0), dtype=torch.float), 'x_feat': torch.tensor([item.cpu().detach().numpy() for item in x_feat_list], dtype=torch.float), } return out def explore(self, data): results = {} search_images_list = [] search_anno_list = [] action_tensor_list = [] iou_list = [] # cover_list = [] pre_seq_list = [] x_feat_list = [] target_in_search_list = [] template_all_list = [] dz_feat_udpate_list = [] num_frames = data['num_frames'] images = data['search_images'] gt_bbox = data['search_annos'] template = data['template_images'] template_bbox = data['template_annos'] template = template template_bbox = template_bbox template_bbox = np.array(template_bbox) num_seq = len(num_frames) for idx in range(np.max(num_frames)): here_images = [img[idx] for img in images] # S, N here_gt_bbox = np.array([gt[idx] for gt in gt_bbox]) here_images = here_images here_gt_bbox = np.concatenate([here_gt_bbox], 0) if idx == 0: outputs_template = self.batch_init(template, template_bbox, here_gt_bbox) results['template_images'] = outputs_template['z_1'] self.template_temp = outputs_template['z_1'].clone() z_all = [outputs_template['z_1'], outputs_template['z_2']] results['z_all'] = z_all self.dz_feat_update = outputs_template['z_2_feat'] else: outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update, action_mode='half') if outputs == None: return None template_all_list.append(self.template_temp.clone()) dz_feat_udpate_list.append(self.dz_feat_update.clone().to(outputs['dz_feat'])) x_feat = outputs['x_feat'] self.dz_feat_update = outputs['dz_feat'] pred_bbox = outputs['pred_bboxes'] search_images_list.append(outputs['search_images']) target_in_search_list.append(outputs['target_in_search']) search_anno_list.append(outputs['gt_in_crop']) if len(outputs['pre_seq']) != 8: print(outputs['pre_seq']) print(len(outputs['pre_seq'])) print(idx) print(data['num_frames']) print(data['search_annos']) return None pre_seq_list.append(outputs['pre_seq']) pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox) gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox) here_iou = [] for i in range(num_seq): bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i]) here_iou.append(bbox_iou) iou_list.append(here_iou) x_feat_list.append(x_feat.clone()) search_images_reverse_list = [] search_anno_reverse_list = [] action_tensor_reverse_list = [] iou_reverse_list = [] pre_seq_reverse_list = [] x_feat_reverse_list = [] target_in_search_reverse_list = [] dz_feat_update_reverse_list = [] template_all_reverse_list = [] for idx in range(np.max(num_frames)): real_idx = np.max(num_frames) - 1 - idx here_images = [img[real_idx] for img in images] # S, N here_gt_bbox = np.array([gt[real_idx] for gt in gt_bbox]) here_images = here_images here_gt_bbox = np.concatenate([here_gt_bbox], 0) if idx == 0: outputs_template = self.batch_init(template, template_bbox, here_gt_bbox) results['template_images'] = outputs_template['z_1'] self.template_temp = outputs_template['z_1'].clone() z_all = [outputs_template['z_1'], outputs_template['z_2']] results['z_all'] = z_all self.dz_feat_update = outputs_template['z_2_feat'].clone() else: outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update, action_mode='half') if outputs == None: return None template_all_reverse_list.append(self.template_temp.clone()) dz_feat_update_reverse_list.append(self.dz_feat_update.clone().to(outputs['dz_feat'])) x_feat = outputs['x_feat'] self.dz_feat_update = outputs['dz_feat'] pred_bbox = outputs['pred_bboxes'] search_images_reverse_list.append(outputs['search_images']) target_in_search_reverse_list.append(outputs['target_in_search']) search_anno_reverse_list.append(outputs['gt_in_crop']) if len(outputs['pre_seq']) != 8: print(outputs['pre_seq']) print(len(outputs['pre_seq'])) print(idx) print(data['num_frames']) print(data['search_annos']) return None pre_seq_reverse_list.append(outputs['pre_seq']) pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox) gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox) here_iou = [] for i in range(num_seq): bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i]) here_iou.append(bbox_iou) iou_reverse_list.append(here_iou) x_feat_reverse_list.append(x_feat.clone()) results['x_feat'] = torch.cat([torch.stack(x_feat_list), torch.stack(x_feat_reverse_list)], dim=2) results['search_images'] = torch.cat([torch.stack(search_images_list), torch.stack(search_images_reverse_list)], dim=1) results['template_images_z0'] = torch.cat( [torch.stack(template_all_list), torch.stack(template_all_reverse_list)], dim=1) results['dz_feat_update'] = torch.cat( [torch.stack(dz_feat_udpate_list), torch.stack(dz_feat_update_reverse_list)], dim=1) results['search_anno'] = torch.cat([torch.stack(search_anno_list), torch.stack(search_anno_reverse_list)], dim=1) results['pre_seq'] = torch.cat([torch.stack(pre_seq_list), torch.stack(pre_seq_reverse_list)], dim=1) results['target_in_search'] = torch.cat( [torch.stack(target_in_search_list), torch.stack(target_in_search_reverse_list)], dim=1) iou_tensor = torch.tensor(iou_list, dtype=torch.float) iou_tensor_reverse = torch.tensor(iou_reverse_list, dtype=torch.float) results['baseline_iou'] = torch.cat([iou_tensor[:, :num_seq], iou_tensor_reverse[:, :num_seq]], dim=1) # results['explore_iou'] = iou_tensor[:, num_seq:] # results['action_tensor'] = torch.stack(action_tensor_list) return results def forward_pass(self, data): # currently only support 1 template and 1 search region assert len(data['template_images']) == 1 assert len(data['search_images']) == 1 template_list = [] for i in range(self.settings.num_template): template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128) template_list.append(template_img_i) search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320) box_mask_z = None ce_keep_rate = None if self.cfg.MODEL.BACKBONE.CE_LOC: box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device, data['template_anno'][0]) ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch, total_epochs=ce_start_epoch + ce_warm_epoch, ITERS_PER_EPOCH=1, base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0]) if len(template_list) == 1: template_list = template_list[0] gt_bbox = data['search_anno'][-1] begin = self.bins end = self.bins + 1 gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2] gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3] gt_bbox = gt_bbox.clamp(min=0.0, max=1.0) data['real_bbox'] = gt_bbox seq_ori = gt_bbox * (self.bins - 1) seq_ori = seq_ori.int().to(search_img) B = seq_ori.shape[0] seq_ori_4_4 = seq_ori[:, 0:3] seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1) seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1) data['seq_input'] = seq_input data['seq_output'] = seq_output out_dict = self.net(template=template_list, search=search_img, ce_template_mask=box_mask_z, ce_keep_rate=ce_keep_rate, return_last_attn=False, seq_input=seq_input) return out_dict def compute_sequence_losses(self, data): num_frames = data['search_images'].shape[0] template_images_for = data['template_images_z0'].reshape(-1, *data['template_images_z0'].size()[2:]) dz_feat = data['dz_feat_update'].reshape(-1, *data['dz_feat_update'].size()[2:]) target_in_search = data['target_in_search'].reshape(-1, *data['target_in_search'].size()[2:]) search_images = data['search_images'].reshape(-1, *data['search_images'].size()[2:]) search_anno = data['search_anno'].reshape(-1, *data['search_anno'].size()[2:]) pre_seq = data['pre_seq'].reshape(-1, 4 * self.pre_num) x_feat = data['x_feat'].reshape(-1, *data['x_feat'].size()[2:]) epoch = data['epoch'] if epoch < 11: self.loss_weight['focal'] = 2 self.loss_weight['score_update'] = 1 elif epoch < 31: self.loss_weight['focal'] = 0 self.loss_weight['score_update'] = 0.1 else: self.loss_weight['focal'] = 0 self.loss_weight['score_update'] = 0.0 pre_seq = pre_seq.clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5) pre_seq = (pre_seq + (self.range * 0.5 - 0.5)) * (self.bins - 1) outputs = self.net(template_images_for, dz_feat, search_images, seq_input=pre_seq, stage="forward_pass", search_feature=x_feat, target_in_search_img=target_in_search) score = outputs['score'] renew_loss = outputs['renew_loss'] pred_feat = outputs["feat"] if self.focal == None: weight = torch.ones(self.bins * self.range + 6) * 1 weight[self.bins * self.range + 4] = 0.1 weight[self.bins * self.range + 3] = 0.1 weight[self.bins * self.range + 2] = 0.1 weight[self.bins * self.range + 1] = 0.1 weight[self.bins * self.range] = 0.1 weight.to(pred_feat) self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat) search_anno[:, 2] = search_anno[:, 2] + search_anno[:, 0] search_anno[:, 3] = search_anno[:, 3] + search_anno[:, 1] target = (search_anno / self.cfg.DATA.SEARCH.SIZE + (self.range * 0.5 - 0.5)) * (self.bins - 1) target = target.clamp(min=0.0, max=(self.bins * self.range - 0.0001)) target_iou = target end_flag = torch.ones((target.shape[0], 1)) * (self.bins * self.range + 1) end_flag = end_flag.to(target) target = torch.cat([target], dim=1) target = target.reshape(-1).to(torch.int64) pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins * self.range + 6) varifocal_loss = self.focal(pred, target) pred = pred_feat[0:4, :, 0:self.bins * self.range] target = target_iou[:, 0:4].to(pred_feat) / (self.bins - 1) - (self.range * 0.5 - 0.5) out = pred.softmax(-1).to(pred) mul = torch.range((-1 * self.range * 0.5 + 0.5) + 1 / (self.bins * self.range), (self.range * 0.5 + 0.5) - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred) ans = out * mul ans = ans.sum(dim=-1) ans = ans.permute(1, 0).to(pred) extra_seq = ans extra_seq = extra_seq.to(pred) cious, iou = SIoU_loss(extra_seq, target, 4) cious = cious.mean() score_real = score score_loss = self.objective['l1'](score_real, iou) giou_loss = cious l1_loss = self.objective['l1'](extra_seq, target) loss_bb = (self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight[ 'focal'] * varifocal_loss) total_losses = loss_bb + renew_loss * self.loss_weight['score_update'] + score_loss * self.loss_weight['score_update'] mean_iou = iou.detach().mean() status = {"Loss/total": total_losses.item() / 2, "Loss/score": score_loss.item() / 2, "Loss/giou": giou_loss.item() / 2, "Loss/l1": l1_loss.item() / 2, "Loss/location": varifocal_loss.item() / 2, "Loss/renew": renew_loss.item() / 2, "IoU": mean_iou.item() / 2} return total_losses, status ================================================ FILE: lib/train/actors/base_actor.py ================================================ from lib.utils import TensorDict class BaseActor: """ Base class for actor. The actor class handles the passing of the data through the network and calculation the loss""" def __init__(self, net, objective): """ args: net - The network to train objective - The loss function """ self.net = net self.objective = objective def __call__(self, data: TensorDict): """ Called in each training iteration. Should pass in input data through the network, calculate the loss, and return the training stats for the input data args: data - A TensorDict containing all the necessary data blocks. returns: loss - loss for the input data stats - a dict containing detailed losses """ raise NotImplementedError def to(self, device): """ Move the network to device args: device - device to use. 'cpu' or 'cuda' """ self.net.to(device) def train(self, mode=True): """ Set whether the network is in train mode. args: mode (True) - Bool specifying whether in training mode. """ self.net.train(mode) def eval(self): """ Set network to eval mode""" self.train(False) ================================================ FILE: lib/train/admin/__init__.py ================================================ from .environment import env_settings, create_default_local_file_ITP_train from .stats import AverageMeter, StatValue #from .tensorboard import TensorboardWriter ================================================ FILE: lib/train/admin/environment.py ================================================ import importlib import os from collections import OrderedDict def create_default_local_file(): path = os.path.join(os.path.dirname(__file__), 'local.py') empty_str = '\'\'' default_settings = OrderedDict({ 'workspace_dir': empty_str, 'tensorboard_dir': 'self.workspace_dir + \'/tensorboard/\'', 'pretrained_networks': 'self.workspace_dir + \'/pretrained_networks/\'', 'lasot_dir': empty_str, 'got10k_dir': empty_str, 'trackingnet_dir': empty_str, 'coco_dir': empty_str, 'lvis_dir': empty_str, 'sbd_dir': empty_str, 'imagenet_dir': empty_str, 'imagenetdet_dir': empty_str, 'ecssd_dir': empty_str, 'hkuis_dir': empty_str, 'msra10k_dir': empty_str, 'davis_dir': empty_str, 'youtubevos_dir': empty_str}) comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 'tensorboard_dir': 'Directory for tensorboard files.'} with open(path, 'w') as f: f.write('class EnvironmentSettings:\n') f.write(' def __init__(self):\n') for attr, attr_val in default_settings.items(): comment_str = None if attr in comment: comment_str = comment[attr] if comment_str is None: f.write(' self.{} = {}\n'.format(attr, attr_val)) else: f.write(' self.{} = {} # {}\n'.format(attr, attr_val, comment_str)) def create_default_local_file_ITP_train(workspace_dir, data_dir): path = os.path.join(os.path.dirname(__file__), 'local.py') empty_str = '\'\'' default_settings = OrderedDict({ 'workspace_dir': workspace_dir, 'tensorboard_dir': os.path.join(workspace_dir, 'tensorboard'), # Directory for tensorboard files. 'pretrained_networks': os.path.join(workspace_dir, 'pretrained_networks'), 'lasot_dir': os.path.join(data_dir, 'lasot'), 'got10k_dir': os.path.join(data_dir, 'got10k/train'), 'got10k_val_dir': os.path.join(data_dir, 'got10k/val'), 'lasot_lmdb_dir': os.path.join(data_dir, 'lasot_lmdb'), 'got10k_lmdb_dir': os.path.join(data_dir, 'got10k_lmdb'), 'trackingnet_dir': os.path.join(data_dir, 'trackingnet'), 'trackingnet_lmdb_dir': os.path.join(data_dir, 'trackingnet_lmdb'), 'coco_dir': os.path.join(data_dir, 'coco'), 'coco_lmdb_dir': os.path.join(data_dir, 'coco_lmdb'), 'lvis_dir': empty_str, 'sbd_dir': empty_str, 'imagenet_dir': os.path.join(data_dir, 'vid'), 'imagenet_lmdb_dir': os.path.join(data_dir, 'vid_lmdb'), 'imagenetdet_dir': empty_str, 'ecssd_dir': empty_str, 'hkuis_dir': empty_str, 'msra10k_dir': empty_str, 'davis_dir': empty_str, 'youtubevos_dir': empty_str}) comment = {'workspace_dir': 'Base directory for saving network checkpoints.', 'tensorboard_dir': 'Directory for tensorboard files.'} with open(path, 'w') as f: f.write('class EnvironmentSettings:\n') f.write(' def __init__(self):\n') for attr, attr_val in default_settings.items(): comment_str = None if attr in comment: comment_str = comment[attr] if comment_str is None: if attr_val == empty_str: f.write(' self.{} = {}\n'.format(attr, attr_val)) else: f.write(' self.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' self.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) def env_settings(): env_module_name = 'lib.train.admin.local' try: env_module = importlib.import_module(env_module_name) return env_module.EnvironmentSettings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file)) ================================================ FILE: lib/train/admin/local.py ================================================ class EnvironmentSettings: def __init__(self): self.workspace_dir = '/home/baiyifan/code/2stage_update_intrain' # Base directory for saving network checkpoints. self.tensorboard_dir = '/home/baiyifan/code/2stage/tensorboard' # Directory for tensorboard files. self.pretrained_networks = '/home/baiyifan/code/2stage/pretrained_networks' self.lasot_dir = '/home/baiyifan/LaSOT/LaSOTBenchmark' self.got10k_dir = '/home/baiyifan/GOT-10k/train' self.got10k_val_dir = '/home/baiyifan/GOT-10k/val' self.lasot_lmdb_dir = '/home/baiyifan/code/2stage/data/lasot_lmdb' self.got10k_lmdb_dir = '/home/baiyifan/code/2stage/data/got10k_lmdb' self.trackingnet_dir = '/ssddata/TrackingNet/all_zip' self.trackingnet_lmdb_dir = '/home/baiyifan/code/2stage/data/trackingnet_lmdb' self.coco_dir = '/home/baiyifan/coco' self.coco_lmdb_dir = '/home/baiyifan/code/2stage/data/coco_lmdb' self.lvis_dir = '' self.sbd_dir = '' self.imagenet_dir = '/home/baiyifan/code/2stage/data/vid' self.imagenet_lmdb_dir = '/home/baiyifan/code/2stage/data/vid_lmdb' self.imagenetdet_dir = '' self.ecssd_dir = '' self.hkuis_dir = '' self.msra10k_dir = '' self.davis_dir = '' self.youtubevos_dir = '' ================================================ FILE: lib/train/admin/multigpu.py ================================================ import torch.nn as nn # Here we use DistributedDataParallel(DDP) rather than DataParallel(DP) for multiple GPUs training def is_multi_gpu(net): return isinstance(net, (MultiGPU, nn.parallel.distributed.DistributedDataParallel)) class MultiGPU(nn.parallel.distributed.DistributedDataParallel): def __getattr__(self, item): try: return super().__getattr__(item) except: pass return getattr(self.module, item) ================================================ FILE: lib/train/admin/settings.py ================================================ from lib.train.admin.environment import env_settings class Settings: """ Training settings, e.g. the paths to datasets and networks.""" def __init__(self): self.set_default() def set_default(self): self.env = env_settings() self.use_gpu = True ================================================ FILE: lib/train/admin/stats.py ================================================ class StatValue: def __init__(self): self.clear() def reset(self): self.val = 0 def clear(self): self.reset() self.history = [] def update(self, val): self.val = val self.history.append(self.val) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.clear() self.has_new_data = False def reset(self): self.avg = 0 self.val = 0 self.sum = 0 self.count = 0 def clear(self): self.reset() self.history = [] def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def new_epoch(self): if self.count > 0: self.history.append(self.avg) self.reset() self.has_new_data = True else: self.has_new_data = False def topk_accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" single_input = not isinstance(topk, (tuple, list)) if single_input: topk = (topk,) maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0] res.append(correct_k * 100.0 / batch_size) if single_input: return res[0] return res ================================================ FILE: lib/train/admin/tensorboard.py ================================================ #import os #from collections import OrderedDict #try: # from torch.utils.tensorboard import SummaryWriter #except: # print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.') # from tensorboardX import SummaryWriter #class TensorboardWriter: # def __init__(self, directory, loader_names): # self.directory = directory # self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names}) # def write_info(self, script_name, description): # tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info')) # tb_info_writer.add_text('Script_name', script_name) # tb_info_writer.add_text('Description', description) # tb_info_writer.close() # def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1): # for loader_name, loader_stats in stats.items(): # if loader_stats is None: # continue # for var_name, val in loader_stats.items(): # if hasattr(val, 'history') and getattr(val, 'has_new_data', True): # self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch) ================================================ FILE: lib/train/base_functions.py ================================================ import torch from torch.utils.data.distributed import DistributedSampler # datasets related from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader import lib.train.data.transforms as tfm from lib.utils.misc import is_main_process def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] #settings.use_lmdb = True for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode", sampler_mode) dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_train, frame_sample_mode=sampler_mode, train_cls=train_cls) train_sampler = DistributedSampler(dataset_train) if settings.local_rank != -1 else None shuffle = False if settings.local_rank != -1 else True loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler) # Validation samplers and loaders dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.VAL.DATASETS_RATIO, samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_val, frame_sample_mode=sampler_mode, train_cls=train_cls) val_sampler = DistributedSampler(dataset_val) if settings.local_rank != -1 else None loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler, epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL) return loader_train, loader_val def get_optimizer_scheduler(net, cfg): train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) if train_cls: print("Only training classification head. Learnable parameters are shown below.") param_dicts = [ {"params": [p for n, p in net.named_parameters() if "cls" in n and p.requires_grad]} ] for n, p in net.named_parameters(): if "cls" not in n: p.requires_grad = False else: print(n) else: param_dicts = [ {"params": [p for n, p in net.named_parameters() if "backbone" not in n and p.requires_grad]}, { "params": [p for n, p in net.named_parameters() if "backbone" in n and p.requires_grad], "lr": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER, }, ] if is_main_process(): print("Learnable parameters are shown below.") for n, p in net.named_parameters(): if p.requires_grad: print(n) if cfg.TRAIN.OPTIMIZER == "ADAMW": optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY) else: raise ValueError("Unsupported Optimizer") if cfg.TRAIN.SCHEDULER.TYPE == 'step': lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH) elif cfg.TRAIN.SCHEDULER.TYPE == "Mstep": lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.TRAIN.SCHEDULER.MILESTONES, gamma=cfg.TRAIN.SCHEDULER.GAMMA) else: raise ValueError("Unsupported scheduler") return optimizer, lr_scheduler def get_optimizer_scheduler_v2(net, cfg): train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) if train_cls: print("Only training classification head. Learnable parameters are shown below.") param_dicts = [ {"params": [p for n, p in net.named_parameters() if "cls" in n and p.requires_grad]} ] for n, p in net.named_parameters(): if "cls" not in n: p.requires_grad = False else: print(n) else: param_dicts = [ {"params": [p for n, p in net.named_parameters() if "backbone" not in n and p.requires_grad ]}, { "params": [p for n, p in net.named_parameters() if "backbone" in n and p.requires_grad and "output_bias" not in n and "embeddings" not in n and "extension" not in n], "lr": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER, }, { "params": [p for n, p in net.named_parameters() if "backbone" in n and p.requires_grad and ("output_bias" in n or "embeddings" in n or "extension" in n)], "lr": cfg.TRAIN.LR, }, ] if is_main_process(): print("Learnable parameters are shown below.") for n, p in net.named_parameters(): if p.requires_grad: print(n) if cfg.TRAIN.OPTIMIZER == "ADAMW": optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY) else: raise ValueError("Unsupported Optimizer") if cfg.TRAIN.SCHEDULER.TYPE == 'step': lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH) elif cfg.TRAIN.SCHEDULER.TYPE == "Mstep": lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.TRAIN.SCHEDULER.MILESTONES, gamma=cfg.TRAIN.SCHEDULER.GAMMA) else: raise ValueError("Unsupported scheduler") return optimizer, lr_scheduler ================================================ FILE: lib/train/data/__init__.py ================================================ from .loader import LTRLoader from .image_loader import jpeg4py_loader, opencv_loader, jpeg4py_loader_w_failsafe, default_image_loader ================================================ FILE: lib/train/data/bounding_box_utils.py ================================================ import torch import numpy as np def batch_center2corner(boxes): xmin = boxes[:, 0] - boxes[:, 2] * 0.5 ymin = boxes[:, 1] - boxes[:, 3] * 0.5 xmax = boxes[:, 0] + boxes[:, 2] * 0.5 ymax = boxes[:, 1] + boxes[:, 3] * 0.5 if isinstance(boxes, np.ndarray): return np.stack([xmin, ymin, xmax, ymax], 1) else: return torch.stack([xmin, ymin, xmax, ymax], 1) def batch_corner2center(boxes): cx = (boxes[:, 0] + boxes[:, 2]) * 0.5 cy = (boxes[:, 1] + boxes[:, 3]) * 0.5 w = (boxes[:, 2] - boxes[:, 0]) h = (boxes[:, 3] - boxes[:, 1]) if isinstance(boxes, np.ndarray): return np.stack([cx, cy, w, h], 1) else: return torch.stack([cx, cy, w, h], 1) def batch_xywh2center(boxes): cx = boxes[:, 0] + (boxes[:, 2] - 1) / 2 cy = boxes[:, 1] + (boxes[:, 3] - 1) / 2 w = boxes[:, 2] h = boxes[:, 3] if isinstance(boxes, np.ndarray): return np.stack([cx, cy, w, h], 1) else: return torch.stack([cx, cy, w, h], 1) def batch_xywh2center2(boxes): cx = boxes[:, 0] + boxes[:, 2] / 2 cy = boxes[:, 1] + boxes[:, 3] / 2 w = boxes[:, 2] h = boxes[:, 3] if isinstance(boxes, np.ndarray): return np.stack([cx, cy, w, h], 1) else: return torch.stack([cx, cy, w, h], 1) def batch_xywh2corner(boxes): xmin = boxes[:, 0] ymin = boxes[:, 1] xmax = boxes[:, 0] + boxes[:, 2] ymax = boxes[:, 1] + boxes[:, 3] if isinstance(boxes, np.ndarray): return np.stack([xmin, ymin, xmax, ymax], 1) else: return torch.stack([xmin, ymin, xmax, ymax], 1) def rect_to_rel(bb, sz_norm=None): """Convert standard rectangular parametrization of the bounding box [x, y, w, h] to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate. args: bb - N x 4 tensor of boxes. sz_norm - [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given. """ c = bb[...,:2] + 0.5 * bb[...,2:] if sz_norm is None: c_rel = c / bb[...,2:] else: c_rel = c / sz_norm sz_rel = torch.log(bb[...,2:]) return torch.cat((c_rel, sz_rel), dim=-1) def rel_to_rect(bb, sz_norm=None): """Inverts the effect of rect_to_rel. See above.""" sz = torch.exp(bb[...,2:]) if sz_norm is None: c = bb[...,:2] * sz else: c = bb[...,:2] * sz_norm tl = c - 0.5 * sz return torch.cat((tl, sz), dim=-1) def masks_to_bboxes(mask, fmt='c'): """ Convert a mask tensor to one or more bounding boxes. Note: This function is a bit new, make sure it does what it says. /Andreas :param mask: Tensor of masks, shape = (..., H, W) :param fmt: bbox layout. 'c' => "center + size" or (x_center, y_center, width, height) 't' => "top left + size" or (x_left, y_top, width, height) 'v' => "vertices" or (x_left, y_top, x_right, y_bottom) :return: tensor containing a batch of bounding boxes, shape = (..., 4) """ batch_shape = mask.shape[:-2] mask = mask.reshape((-1, *mask.shape[-2:])) bboxes = [] for m in mask: mx = m.sum(dim=-2).nonzero() my = m.sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bboxes.append(bb) bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device) bboxes = bboxes.reshape(batch_shape + (4,)) if fmt == 'v': return bboxes x1 = bboxes[..., :2] s = bboxes[..., 2:] - x1 + 1 if fmt == 'c': return torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': return torch.cat((x1, s), dim=-1) raise ValueError("Undefined bounding box layout '%s'" % fmt) def masks_to_bboxes_multi(mask, ids, fmt='c'): assert mask.dim() == 2 bboxes = [] for id in ids: mx = (mask == id).sum(dim=-2).nonzero() my = (mask == id).float().sum(dim=-1).nonzero() bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0] bb = torch.tensor(bb, dtype=torch.float32, device=mask.device) x1 = bb[:2] s = bb[2:] - x1 + 1 if fmt == 'v': pass elif fmt == 'c': bb = torch.cat((x1 + 0.5 * s, s), dim=-1) elif fmt == 't': bb = torch.cat((x1, s), dim=-1) else: raise ValueError("Undefined bounding box layout '%s'" % fmt) bboxes.append(bb) return bboxes ================================================ FILE: lib/train/data/image_loader.py ================================================ import jpeg4py import cv2 as cv from PIL import Image import numpy as np davis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8) davis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128], [128, 64, 128]] def default_image_loader(path): """The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader, but reverts to the opencv_loader if the former is not available.""" if default_image_loader.use_jpeg4py is None: # Try using jpeg4py im = jpeg4py_loader(path) if im is None: default_image_loader.use_jpeg4py = False print('Using opencv_loader instead.') else: default_image_loader.use_jpeg4py = True return im if default_image_loader.use_jpeg4py: return jpeg4py_loader(path) return opencv_loader(path) default_image_loader.use_jpeg4py = None def jpeg4py_loader(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_loader(path): """ Read image using opencv's imread function and returns it in rgb format""" try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def jpeg4py_loader_w_failsafe(path): """ Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py""" try: return jpeg4py.JPEG(path).decode() except: try: im = cv.imread(path, cv.IMREAD_COLOR) # convert to rgb and return return cv.cvtColor(im, cv.COLOR_BGR2RGB) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def opencv_seg_loader(path): """ Read segmentation annotation using opencv's imread function""" try: return cv.imread(path) except Exception as e: print('ERROR: Could not read image "{}"'.format(path)) print(e) return None def imread_indexed(filename): """ Load indexed image with given filename. Used to read segmentation annotations.""" im = Image.open(filename) annotation = np.atleast_3d(im)[...,0] return annotation def imwrite_indexed(filename, array, color_palette=None): """ Save indexed image as png. Used to save segmentation annotation.""" if color_palette is None: color_palette = davis_palette if np.atleast_3d(array).shape[2] != 1: raise Exception("Saving indexed PNGs requires 2D array.") im = Image.fromarray(array) im.putpalette(color_palette.ravel()) im.save(filename, format='PNG') ================================================ FILE: lib/train/data/loader.py ================================================ import torch import torch.utils.data.dataloader import importlib import collections from torch._six import string_classes from lib.utils import TensorDict, TensorList if float(torch.__version__[:3]) >= 1.9 or len('.'.join((torch.__version__).split('.')[0:2])) > 3: int_classes = int else: from torch._six import int_classes import warnings warnings.filterwarnings("ignore") def _check_use_shared_memory(): if hasattr(torch.utils.data.dataloader, '_use_shared_memory'): return getattr(torch.utils.data.dataloader, '_use_shared_memory') collate_lib = importlib.import_module('torch.utils.data._utils.collate') if hasattr(collate_lib, '_use_shared_memory'): return getattr(collate_lib, '_use_shared_memory') return torch.utils.data.get_worker_info() is not None def ltr_collate(batch): """Puts each data field into a tensor with outer dimension batch size""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) def ltr_collate_stack1(batch): """Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _check_use_shared_memory(): # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 1, out=out) # if batch[0].dim() < 4: # return torch.stack(batch, 0, out=out) # return torch.cat(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None: raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 1) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], TensorDict): return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}) elif isinstance(batch[0], collections.Mapping): return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], TensorList): transposed = zip(*batch) return TensorList([ltr_collate_stack1(samples) for samples in transposed]) elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [ltr_collate_stack1(samples) for samples in transposed] elif batch[0] is None: return batch raise TypeError((error_msg.format(type(batch[0])))) class LTRLoader(torch.utils.data.dataloader.DataLoader): """ Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset. Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to select along which dimension the data should be stacked to form a batch. Arguments: dataset (Dataset): dataset from which to load the data. batch_size (int, optional): how many samples per batch to load (default: 1). shuffle (bool, optional): set to ``True`` to have the data reshuffled at every epoch (default: False). sampler (Sampler, optional): defines the strategy to draw samples from the dataset. If specified, ``shuffle`` must be False. batch_sampler (Sampler, optional): like sampler, but returns a batch of indices at a time. Mutually exclusive with batch_size, shuffle, sampler, and drop_last. num_workers (int, optional): how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0) collate_fn (callable, optional): merges a list of samples to form a mini-batch. stack_dim (int): Dimension along which to stack to form the batch. (default: 0) pin_memory (bool, optional): If ``True``, the data loader will copy tensors into CUDA pinned memory before returning them. drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: False) timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: 0) worker_init_fn (callable, optional): If not None, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: None) .. note:: By default, each worker will have its PyTorch seed set to ``base_seed + worker_id``, where ``base_seed`` is a long generated by main process using its RNG. However, seeds for other libraries may be duplicated upon initializing workers (w.g., NumPy), causing each worker to return identical random numbers. (See :ref:`dataloader-workers-random-seed` section in FAQ.) You may use ``torch.initial_seed()`` to access the PyTorch seed for each worker in :attr:`worker_init_fn`, and use it to set other seeds before data loading. .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an unpicklable object, e.g., a lambda function. """ __initialized = False def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): print("pin_memory is", pin_memory) if collate_fn is None: if stack_dim == 0: collate_fn = ltr_collate elif stack_dim == 1: collate_fn = ltr_collate_stack1 else: raise ValueError('Stack dim no supported. Must be 0 or 1.') super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn) self.name = name self.training = training self.epoch_interval = epoch_interval self.stack_dim = stack_dim ================================================ FILE: lib/train/data/processing.py ================================================ import torch import torchvision.transforms as transforms from lib.utils import TensorDict import lib.train.data.processing_utils as prutils import torch.nn.functional as F def stack_tensors(x): if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor): return torch.stack(x) return x class BaseProcessing: """ Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it through the network. For example, it can be used to crop a search region around the object, apply various data augmentations, etc.""" def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None): """ args: transform - The set of transformations to be applied on the images. Used only if template_transform or search_transform is None. template_transform - The set of transformations to be applied on the template images. If None, the 'transform' argument is used instead. search_transform - The set of transformations to be applied on the search images. If None, the 'transform' argument is used instead. joint_transform - The set of transformations to be applied 'jointly' on the template and search images. For example, it can be used to convert both template and search images to grayscale. """ self.transform = {'template': transform if template_transform is None else template_transform, 'search': transform if search_transform is None else search_transform, 'joint': joint_transform} def __call__(self, data: TensorDict): raise NotImplementedError class STARKProcessing(BaseProcessing): """ The processing class used for training LittleBoy. The images are processed in the following way. First, the target bounding box is jittered by adding some noise. Next, a square region (called search region ) centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is always at the center of the search region. The search region is then resized to a fixed size given by the argument output_sz. """ def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, mode='pair', settings=None, *args, **kwargs): """ args: search_area_factor - The size of the search region relative to the target size. output_sz - An integer, denoting the size to which the search region is resized. The search region is always square. center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before extracting the search region. See _get_jittered_box for how the jittering is done. scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before extracting the search region. See _get_jittered_box for how the jittering is done. mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames """ super().__init__(*args, **kwargs) self.search_area_factor = search_area_factor self.output_sz = output_sz self.center_jitter_factor = center_jitter_factor self.scale_jitter_factor = scale_jitter_factor self.mode = mode self.settings = settings def _get_jittered_box(self, box, mode): """ Jitter the input box args: box - input bounding box mode - string 'template' or 'search' indicating template or search data returns: torch.Tensor - jittered box """ jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode]) max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float()) jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5) return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0) def __call__(self, data: TensorDict): """ args: data - The input data, should contain the following fields: 'template_images', search_images', 'template_anno', 'search_anno' returns: TensorDict - output data block with following fields: 'template_images', 'search_images', 'template_anno', 'search_anno', 'test_proposals', 'proposal_iou' """ # Apply joint transforms if self.transform['joint'] is not None: data['template_images'], data['template_anno'], data['template_masks'] = self.transform['joint']( image=data['template_images'], bbox=data['template_anno'], mask=data['template_masks']) data['search_images'], data['search_anno'], data['search_masks'] = self.transform['joint']( image=data['search_images'], bbox=data['search_anno'], mask=data['search_masks'], new_roll=False) data["target_in_search_images"] = data["search_images"] data["target_in_search_anno"] = data["search_anno"] data["target_in_search_masks"] = data["search_masks"] self.scale_jitter_factor["target_in_search"] = self.scale_jitter_factor["template"] self.center_jitter_factor["target_in_search"] = self.center_jitter_factor["template"] self.search_area_factor["target_in_search"] = self.search_area_factor["template"] self.output_sz["target_in_search"] = self.output_sz["template"] self.transform["target_in_search"] = self.transform["search"] for s in ['template', 'search', 'target_in_search']: assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \ "In pair mode, num train/test frames must be 1" # Add a uniform noise to the center pos jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']] # 2021.1.9 Check whether data is valid. Avoid too small bounding boxes w, h = torch.stack(jittered_anno, dim=0)[:, 2], torch.stack(jittered_anno, dim=0)[:, 3] crop_sz = torch.ceil(torch.sqrt(w * h) * self.search_area_factor[s]) if (crop_sz < 1).any(): data['valid'] = False # print("Too small box is found. Replace it with new data.") return data # Crop image region centered at jittered_anno box and get the attention mask crops, boxes, att_mask, mask_crops = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'], self.search_area_factor[s], self.output_sz[s], masks=data[s + '_masks']) # Apply transforms data[s + '_images'], data[s + '_anno'], data[s + '_att'], data[s + '_masks'] = self.transform[s]( image=crops, bbox=boxes, att=att_mask, mask=mask_crops, joint=False) # 2021.1.9 Check whether elements in data[s + '_att'] is all 1 # Note that type of data[s + '_att'] is tuple, type of ele is torch.tensor for ele in data[s + '_att']: if (ele == 1).all(): data['valid'] = False # print("Values of original attention mask are all one. Replace it with new data.") return data # 2021.1.10 more strict conditions: require the donwsampled masks not to be all 1 for ele in data[s + '_att']: feat_size = self.output_sz[s] // 16 # 16 is the backbone stride # (1,1,128,128) (1,1,256,256) --> (1,1,8,8) (1,1,16,16) mask_down = F.interpolate(ele[None, None].float(), size=feat_size).to(torch.bool)[0] if (mask_down == 1).all(): data['valid'] = False # print("Values of down-sampled attention mask are all one. " # "Replace it with new data.") return data data['valid'] = True # if we use copy-and-paste augmentation if data["template_masks"] is None or data["search_masks"] is None: data["template_masks"] = torch.zeros((1, self.output_sz["template"], self.output_sz["template"])) data["search_masks"] = torch.zeros((1, self.output_sz["search"], self.output_sz["search"])) # Prepare output if self.mode == 'sequence': data = data.apply(stack_tensors) else: data = data.apply(lambda x: x[0] if isinstance(x, list) else x) return data ================================================ FILE: lib/train/data/processing_utils.py ================================================ import torch import math import cv2 as cv import torch.nn.functional as F import numpy as np '''modified from the original test implementation Replace cv.BORDER_REPLICATE with cv.BORDER_CONSTANT Add a variable called att_mask for computing attention and positional encoding later''' def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None): """ Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area args: im - cv image target_bb - target box [x, y, w, h] search_area_factor - Ratio of crop size to target size output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done. returns: cv image - extracted crop float - the factor by which the crop has been resized to make the crop size equal output_size """ if not isinstance(target_bb, list): x, y, w, h = target_bb.tolist() else: x, y, w, h = target_bb # Crop image crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor) if crop_sz < 1: raise Exception('Too small bounding box.') x1 = round(x + 0.5 * w - crop_sz * 0.5) x2 = x1 + crop_sz y1 = round(y + 0.5 * h - crop_sz * 0.5) y2 = y1 + crop_sz x1_pad = max(0, -x1) x2_pad = max(x2 - im.shape[1] + 1, 0) y1_pad = max(0, -y1) y2_pad = max(y2 - im.shape[0] + 1, 0) # Crop target im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] if mask is not None: mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad] # Pad im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT) # deal with attention mask H, W, _ = im_crop_padded.shape att_mask = np.ones((H,W)) end_x, end_y = -x2_pad, -y2_pad if y2_pad == 0: end_y = None if x2_pad == 0: end_x = None att_mask[y1_pad:end_y, x1_pad:end_x] = 0 if mask is not None: mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0) if output_sz is not None: resize_factor = output_sz / crop_sz im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz)) att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_) if mask is None: return im_crop_padded, resize_factor, att_mask mask_crop_padded = \ F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0] return im_crop_padded, resize_factor, att_mask, mask_crop_padded else: if mask is None: return im_crop_padded, att_mask.astype(np.bool_), 1.0 return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded def transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float, crop_sz: torch.Tensor, normalize=False) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box_in - the box for which the co-ordinates are to be transformed box_extract - the box about which the image crop has been extracted. resize_factor - the ratio between the original image scale and the scale of the image crop crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4] box_in_center = box_in[0:2] + 0.5 * box_in[2:4] box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor box_out_wh = box_in[2:4] * resize_factor box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh)) if normalize: return box_out / crop_sz[0] else: return box_out def jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None): """ For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2 times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates args: frames - list of frames box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from image co-ordinates to the crop co-ordinates search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area output_sz - The size to which the extracted crops are resized returns: list - list of image crops list - box_gt location in the crop co-ordinates """ if masks is None: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz) for f, a in zip(frames, box_extract)] frames_crop, resize_factors, att_mask = zip(*crops_resize_factors) masks_crop = None else: crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m) for f, a, m in zip(frames, box_extract, masks)] frames_crop, resize_factors, att_mask, masks_crop = zip(*crops_resize_factors) # frames_crop: tuple of ndarray (128,128,3), att_mask: tuple of ndarray (128,128) crop_sz = torch.Tensor([output_sz, output_sz]) # find the bb location in the crop '''Note that here we use normalized coord''' box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz, normalize=True) for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)] # (x1,y1,w,h) list of tensors return frames_crop, box_crop, att_mask, masks_crop def transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor, normalize=False) -> torch.Tensor: """ Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image args: box - the box for which the co-ordinates are to be transformed crop_box - bounding box defining the crop in the original image crop_sz - size of the cropped image returns: torch.Tensor - transformed co-ordinates of box_in """ box_out = box.clone() box_out[:2] -= crop_box[:2] scale_factor = crop_sz / crop_box[2:] box_out[:2] *= scale_factor box_out[2:] *= scale_factor if normalize: return box_out / crop_sz[0] else: return box_out ================================================ FILE: lib/train/data/sampler.py ================================================ import random import torch.utils.data from lib.utils import TensorDict import numpy as np def no_processing(data): return data class TrackingSampler(torch.utils.data.Dataset): """ Class responsible for sampling frames from training sequences to form batches. The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and 'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id] and (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled. If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found. The sampled frames are then passed through the input 'processing' function for the necessary processing- """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal', train_cls=False, pos_prob=0.5): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the test frames. num_search_frames - Number of search frames to sample. num_template_frames - Number of template frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally, otherwise randomly within the interval. """ self.datasets = datasets self.train_cls = train_cls # whether we are training classification self.pos_prob = pos_prob # probability of sampling positive class when making classification # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.num_search_frames = num_search_frames self.num_template_frames = num_template_frames self.processing = processing self.frame_sample_mode = frame_sample_mode def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None, allow_invisible=False, force_invisible=False): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) # get valid ids if force_invisible: valid_ids = [i for i in range(min_id, max_id) if not visible[i]] else: if allow_invisible: valid_ids = [i for i in range(min_id, max_id)] else: valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def __getitem__(self, index): if self.train_cls: return self.getitem_cls() else: return self.getitem() def getitem(self): """ returns: TensorDict - dict containing all the data blocks """ valid = False while not valid: # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # sample a sequence from the given dataset seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset) if is_video_dataset: template_frame_ids = None search_frame_ids = None gap_increase = 0 if self.frame_sample_mode == 'causal': # Sample test and train frames in a causal manner, i.e. search_frame_ids > template_frame_ids while search_frame_ids is None: base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_template_frames - 1, max_id=len(visible) - self.num_search_frames) prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_template_frames - 1, min_id=base_frame_id[0] - self.max_gap - gap_increase, max_id=base_frame_id[0]) if prev_frame_ids is None: gap_increase += 5 continue template_frame_ids = base_frame_id + prev_frame_ids search_frame_ids = self._sample_visible_ids(visible, min_id=template_frame_ids[0] + 1, max_id=template_frame_ids[0] + self.max_gap + gap_increase, num_ids=self.num_search_frames) # Increase gap until a frame is found gap_increase += 5 elif self.frame_sample_mode == "trident" or self.frame_sample_mode == "trident_pro": template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible) elif self.frame_sample_mode == "stark": template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict["valid"]) else: raise ValueError("Illegal frame sample mode") else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames try: template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) H, W, _ = template_frames[0].shape template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros((H, W))] * self.num_template_frames search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros((H, W))] * self.num_search_frames data = TensorDict({'template_images': template_frames, 'template_anno': template_anno['bbox'], 'template_masks': template_masks, 'search_images': search_frames, 'search_anno': search_anno['bbox'], 'search_masks': search_masks, 'dataset': dataset.get_name(), 'test_class': meta_obj_test.get('object_class_name')}) # make data augmentation data = self.processing(data) # check whether data is valid valid = data['valid'] except: valid = False return data def getitem_cls(self): # get data for classification """ args: index (int): Index (Ignored since we sample randomly) aux (bool): whether the current data is for auxiliary use (e.g. copy-and-paste) returns: TensorDict - dict containing all the data blocks """ valid = False label = None while not valid: # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # sample a sequence from the given dataset seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset) # sample template and search frame ids if is_video_dataset: if self.frame_sample_mode in ["trident", "trident_pro"]: template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible) elif self.frame_sample_mode == "stark": template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict["valid"]) else: raise ValueError("illegal frame sample mode") else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames try: # "try" is used to handle trackingnet data failure # get images and bounding boxes (for templates) template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) H, W, _ = template_frames[0].shape template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros( (H, W))] * self.num_template_frames # get images and bounding boxes (for searches) # positive samples if random.random() < self.pos_prob: label = torch.ones(1,) search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros( (H, W))] * self.num_search_frames # negative samples else: label = torch.zeros(1,) if is_video_dataset: search_frame_ids = self._sample_visible_ids(visible, num_ids=1, force_invisible=True) if search_frame_ids is None: search_frames, search_anno, meta_obj_test = self.get_one_search() else: search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) search_anno["bbox"] = [self.get_center_box(H, W)] else: search_frames, search_anno, meta_obj_test = self.get_one_search() H, W, _ = search_frames[0].shape search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros( (H, W))] * self.num_search_frames data = TensorDict({'template_images': template_frames, 'template_anno': template_anno['bbox'], 'template_masks': template_masks, 'search_images': search_frames, 'search_anno': search_anno['bbox'], 'search_masks': search_masks, 'dataset': dataset.get_name(), 'test_class': meta_obj_test.get('object_class_name')}) # make data augmentation data = self.processing(data) # add classification label data["label"] = label # check whether data is valid valid = data['valid'] except: valid = False return data def get_center_box(self, H, W, ratio=1/8): cx, cy, w, h = W/2, H/2, W * ratio, H * ratio return torch.tensor([int(cx-w/2), int(cy-h/2), int(w), int(h)]) def sample_seq_from_dataset(self, dataset, is_video_dataset): # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_search_frames + self.num_template_frames) and len(visible) >= 20 enough_visible_frames = enough_visible_frames or not is_video_dataset return seq_id, visible, seq_info_dict def get_one_search(self): # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] is_video_dataset = dataset.is_video_sequence() # sample a sequence seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset) # sample a frame if is_video_dataset: if self.frame_sample_mode == "stark": search_frame_ids = self._sample_visible_ids(seq_info_dict["valid"], num_ids=1) else: search_frame_ids = self._sample_visible_ids(visible, num_ids=1, allow_invisible=True) else: search_frame_ids = [1] # get the image, bounding box and other info search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) return search_frames, search_anno, meta_obj_test def get_frame_ids_trident(self, visible): # get template and search ids in a 'trident' manner template_frame_ids_extra = [] while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0: template_frame_ids_extra = [] # first randomly sample two frames from a video template_frame_id1 = self._sample_visible_ids(visible, num_ids=1) # the initial template id search_frame_ids = self._sample_visible_ids(visible, num_ids=1) # the search region id # get the dynamic template id for max_gap in self.max_gap: if template_frame_id1[0] >= search_frame_ids[0]: min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap else: min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0] if self.frame_sample_mode == "trident_pro": f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id, allow_invisible=True) else: f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id) if f_id is None: template_frame_ids_extra += [None] else: template_frame_ids_extra += f_id template_frame_ids = template_frame_id1 + template_frame_ids_extra return template_frame_ids, search_frame_ids def get_frame_ids_stark(self, visible, valid): # get template and search ids in a 'stark' manner template_frame_ids_extra = [] while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0: template_frame_ids_extra = [] # first randomly sample two frames from a video template_frame_id1 = self._sample_visible_ids(visible, num_ids=1) # the initial template id search_frame_ids = self._sample_visible_ids(visible, num_ids=1) # the search region id # get the dynamic template id for max_gap in self.max_gap: if template_frame_id1[0] >= search_frame_ids[0]: min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap else: min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0] """we require the frame to be valid but not necessary visible""" f_id = self._sample_visible_ids(valid, num_ids=1, min_id=min_id, max_id=max_id) if f_id is None: template_frame_ids_extra += [None] else: template_frame_ids_extra += f_id template_frame_ids = template_frame_id1 + template_frame_ids_extra return template_frame_ids, search_frame_ids ================================================ FILE: lib/train/data/sequence_sampler.py ================================================ import random import torch.utils.data import numpy as np from lib.utils import TensorDict class SequenceSampler(torch.utils.data.Dataset): """ Sample sequence for sequence-level training """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_search_frames, num_template_frames=1, frame_sample_mode='sequential', max_interval=10, prob=0.7): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the search frames.\ max_interval - Maximum interval between sampled frames num_search_frames - Number of search frames to sample. num_template_frames - Number of template frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the search frames are sampled in a causally, otherwise randomly within the interval. prob - sequential sampling by prob / interval sampling by 1-prob """ self.datasets = datasets # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.max_interval = max_interval self.num_search_frames = num_search_frames self.num_template_frames = num_template_frames self.frame_sample_mode = frame_sample_mode self.prob=prob self.extra=1 def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def _sequential_sample(self, visible): # Sample frames in sequential manner template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0, max_id=len(visible) - self.num_search_frames) if self.max_gap == -1: left = template_frame_ids[0] else: # template frame (1) ->(max_gap) -> search frame (num_search_frames) left_max = min(len(visible) - self.num_search_frames, template_frame_ids[0] + self.max_gap) left = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0], max_id=left_max)[0] valid_ids = [i for i in range(left, len(visible)) if visible[i]] search_frame_ids = valid_ids[:self.num_search_frames] # if length is not enough last = search_frame_ids[-1] while len(search_frame_ids) < self.num_search_frames: if last >= len(visible) - 1: search_frame_ids.append(last) else: last += 1 if visible[last]: search_frame_ids.append(last) return template_frame_ids, search_frame_ids def _random_interval_sample(self, visible): # Get valid ids valid_ids = [i for i in range(len(visible)) if visible[i]] # Sample template frame avg_interval = self.max_interval while avg_interval * (self.num_search_frames - 1) > len(visible): avg_interval = max(avg_interval - 1, 1) while True: template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0, max_id=len(visible) - avg_interval * (self.num_search_frames - 1)) if template_frame_ids == None: avg_interval = avg_interval - 1 else: break if avg_interval == 0: template_frame_ids = [valid_ids[0]] break # Sample first search frame if self.max_gap == -1: search_frame_ids = template_frame_ids else: avg_interval = self.max_interval while avg_interval * (self.num_search_frames - 1) > len(visible): avg_interval = max(avg_interval - 1, 1) while True: left_max = min(max(len(visible) - avg_interval * (self.num_search_frames - 1), template_frame_ids[0] + 1), template_frame_ids[0] + self.max_gap) search_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0], max_id=left_max) if search_frame_ids == None: avg_interval = avg_interval - 1 else: break if avg_interval == -1: search_frame_ids = template_frame_ids break # Sample rest of the search frames with random interval last = search_frame_ids[0] while last <= len(visible) - 1 and len(search_frame_ids) < self.num_search_frames: # sample id with interval max_id = min(last + self.max_interval + 1, len(visible)) id = self._sample_visible_ids(visible, num_ids=1, min_id=last, max_id=max_id) if id is None: # If not found in current range, find from previous range last = last + self.max_interval else: search_frame_ids.append(id[0]) last = search_frame_ids[-1] # if length is not enough, randomly sample new ids if len(search_frame_ids) < self.num_search_frames: valid_ids = [x for x in valid_ids if x > search_frame_ids[0] and x not in search_frame_ids] if len(valid_ids) > 0: new_ids = random.choices(valid_ids, k=min(len(valid_ids), self.num_search_frames - len(search_frame_ids))) search_frame_ids = search_frame_ids + new_ids search_frame_ids = sorted(search_frame_ids, key=int) # if length is still not enough, duplicate last frame while len(search_frame_ids) < self.num_search_frames: search_frame_ids.append(search_frame_ids[-1]) for i in range(1, self.num_search_frames): if search_frame_ids[i] - search_frame_ids[i - 1] > self.max_interval: print(search_frame_ids[i] - search_frame_ids[i - 1]) return template_frame_ids, search_frame_ids def __getitem__(self, index): """ args: index (int): Index (Ignored since we sample randomly) returns: TensorDict - dict containing all the data blocks """ # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] if dataset.get_name() == 'got10k' : max_gap = self.max_gap max_interval = self.max_interval else: max_gap = self.max_gap max_interval = self.max_interval self.max_gap = max_gap * self.extra self.max_interval = max_interval * self.extra is_video_dataset = dataset.is_video_sequence() # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_search_frames + self.num_template_frames) and len(visible) >= (self.num_search_frames + self.num_template_frames) enough_visible_frames = enough_visible_frames or not is_video_dataset if is_video_dataset: if self.frame_sample_mode == 'sequential': template_frame_ids, search_frame_ids = self._sequential_sample(visible) elif self.frame_sample_mode == 'random_interval': if random.random() < self.prob: template_frame_ids, search_frame_ids = self._random_interval_sample(visible) else: template_frame_ids, search_frame_ids = self._sequential_sample(visible) else: self.max_gap = max_gap self.max_interval = max_interval raise NotImplementedError else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames #print(dataset.get_name(), search_frame_ids, self.max_gap, self.max_interval) self.max_gap = max_gap self.max_interval = max_interval #print(self.max_gap, self.max_interval) template_frames, template_anno, meta_obj_template = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) search_frames, search_anno, meta_obj_search = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) template_bbox = [bbox.numpy() for bbox in template_anno['bbox']] # tensor -> numpy array search_bbox = [bbox.numpy() for bbox in search_anno['bbox']] # tensor -> numpy array return TensorDict({'template_images': np.array(template_frames).squeeze(), # 1 template images 'template_annos': np.array(template_bbox).squeeze(), 'search_images': np.array(search_frames), # (num_frames) search images 'search_annos': np.array(search_bbox), 'seq_id': seq_id, 'dataset': dataset.get_name(), 'search_class': meta_obj_search.get('object_class_name'), 'num_frames': len(search_frames) }) ================================================ FILE: lib/train/data/sequence_sampler_v2.py ================================================ import random import torch.utils.data import numpy as np from lib.utils import TensorDict class SequenceSampler(torch.utils.data.Dataset): """ Sample sequence for sequence-level training """ def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap, num_search_frames, num_template_frames=1, frame_sample_mode='sequential', max_interval=10, prob=0.7): """ args: datasets - List of datasets to be used for training p_datasets - List containing the probabilities by which each dataset will be sampled samples_per_epoch - Number of training samples per epoch max_gap - Maximum gap, in frame numbers, between the train frames and the search frames.\ max_interval - Maximum interval between sampled frames num_search_frames - Number of search frames to sample. num_template_frames - Number of template frames to sample. processing - An instance of Processing class which performs the necessary processing of the data. frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the search frames are sampled in a causally, otherwise randomly within the interval. prob - sequential sampling by prob / interval sampling by 1-prob """ self.datasets = datasets # If p not provided, sample uniformly from all videos if p_datasets is None: p_datasets = [len(d) for d in self.datasets] # Normalize p_total = sum(p_datasets) self.p_datasets = [x / p_total for x in p_datasets] self.samples_per_epoch = samples_per_epoch self.max_gap = max_gap self.max_interval = max_interval self.num_search_frames = num_search_frames self.num_template_frames = num_template_frames self.frame_sample_mode = frame_sample_mode self.prob = prob self.extra = 1 def __len__(self): return self.samples_per_epoch def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None): """ Samples num_ids frames between min_id and max_id for which target is visible args: visible - 1d Tensor indicating whether target is visible for each frame num_ids - number of frames to be samples min_id - Minimum allowed frame number max_id - Maximum allowed frame number returns: list - List of sampled frame numbers. None if not sufficient visible frames could be found. """ if num_ids == 0: return [] if min_id is None or min_id < 0: min_id = 0 if max_id is None or max_id > len(visible): max_id = len(visible) valid_ids = [i for i in range(min_id, max_id) if visible[i]] # No visible ids if len(valid_ids) == 0: return None return random.choices(valid_ids, k=num_ids) def _sequential_sample(self, visible): # Sample frames in sequential manner template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0, max_id=len(visible) - self.num_search_frames) template_another = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0], max_id=min(len(visible) - self.num_search_frames, template_frame_ids[0] + self.max_gap)) template_frame_ids.append(template_another[0]) template_frame_ids.sort() if self.max_gap == -1: left = template_frame_ids[1] else: # template frame (1) ->(max_gap) -> search frame (num_search_frames) left_max = min(len(visible) - self.num_search_frames, template_frame_ids[1] + self.max_gap) left = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[1], max_id=left_max)[0] valid_ids = [i for i in range(left, len(visible)) if visible[i]] search_frame_ids = valid_ids[:self.num_search_frames] # if length is not enough last = search_frame_ids[-1] while len(search_frame_ids) < self.num_search_frames: if last >= len(visible) - 1: search_frame_ids.append(last) else: last += 1 if visible[last]: search_frame_ids.append(last) return template_frame_ids, search_frame_ids def _random_interval_sample(self, visible): # Get valid ids valid_ids = [i for i in range(len(visible)) if visible[i]] # Sample template frame avg_interval = self.max_interval while avg_interval * (self.num_search_frames - 1) > len(visible): avg_interval = max(avg_interval - 1, 1) while True: template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0, max_id=len(visible) - avg_interval * ( self.num_search_frames - 1)) if template_frame_ids == None: avg_interval = avg_interval - 1 else: break if avg_interval == 0: template_frame_ids = [valid_ids[0]] break # Sample first search frame if self.max_gap == -1: search_frame_ids = template_frame_ids else: avg_interval = self.max_interval while avg_interval * (self.num_search_frames - 1) > len(visible): avg_interval = max(avg_interval - 1, 1) while True: left_max = min( max(len(visible) - avg_interval * (self.num_search_frames - 1), template_frame_ids[0] + 1), template_frame_ids[0] + self.max_gap) search_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0], max_id=left_max) if search_frame_ids == None: avg_interval = avg_interval - 1 else: break if avg_interval == -1: search_frame_ids = template_frame_ids break # Sample rest of the search frames with random interval last = search_frame_ids[0] while last <= len(visible) - 1 and len(search_frame_ids) < self.num_search_frames: # sample id with interval max_id = min(last + self.max_interval + 1, len(visible)) id = self._sample_visible_ids(visible, num_ids=1, min_id=last, max_id=max_id) if id is None: # If not found in current range, find from previous range last = last + self.max_interval else: search_frame_ids.append(id[0]) last = search_frame_ids[-1] # if length is not enough, randomly sample new ids if len(search_frame_ids) < self.num_search_frames: valid_ids = [x for x in valid_ids if x > search_frame_ids[0] and x not in search_frame_ids] if len(valid_ids) > 0: new_ids = random.choices(valid_ids, k=min(len(valid_ids), self.num_search_frames - len(search_frame_ids))) search_frame_ids = search_frame_ids + new_ids search_frame_ids = sorted(search_frame_ids, key=int) # if length is still not enough, duplicate last frame while len(search_frame_ids) < self.num_search_frames: search_frame_ids.append(search_frame_ids[-1]) for i in range(1, self.num_search_frames): if search_frame_ids[i] - search_frame_ids[i - 1] > self.max_interval: print(search_frame_ids[i] - search_frame_ids[i - 1]) return template_frame_ids, search_frame_ids def __getitem__(self, index): """ args: index (int): Index (Ignored since we sample randomly) returns: TensorDict - dict containing all the data blocks """ # Select a dataset dataset = random.choices(self.datasets, self.p_datasets)[0] if dataset.get_name() == 'got10k': max_gap = self.max_gap max_interval = self.max_interval else: max_gap = self.max_gap max_interval = self.max_interval self.max_gap = max_gap * self.extra self.max_interval = max_interval * self.extra is_video_dataset = dataset.is_video_sequence() # Sample a sequence with enough visible frames enough_visible_frames = False while not enough_visible_frames: # Sample a sequence seq_id = random.randint(0, dataset.get_num_sequences() - 1) # Sample frames seq_info_dict = dataset.get_sequence_info(seq_id) visible = seq_info_dict['visible'] enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * ( self.num_search_frames + self.num_template_frames) and len(visible) >= ( self.num_search_frames + self.num_template_frames) enough_visible_frames = enough_visible_frames or not is_video_dataset if is_video_dataset: if self.frame_sample_mode == 'sequential': template_frame_ids, search_frame_ids = self._sequential_sample(visible) elif self.frame_sample_mode == 'random_interval': if random.random() < self.prob: template_frame_ids, search_frame_ids = self._random_interval_sample(visible) else: template_frame_ids, search_frame_ids = self._sequential_sample(visible) else: self.max_gap = max_gap self.max_interval = max_interval raise NotImplementedError else: # In case of image dataset, just repeat the image to generate synthetic video template_frame_ids = [1] * self.num_template_frames search_frame_ids = [1] * self.num_search_frames self.max_gap = max_gap self.max_interval = max_interval # print("this is template_frame_ids", template_frame_ids) # print("this is search_frame_ids", search_frame_ids) template_frames, template_anno, meta_obj_template = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict) search_frames, search_anno, meta_obj_search = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict) # visible_ratio = search_anno['visible_ratio'] template_bbox = [bbox.numpy() for bbox in template_anno['bbox']] # tensor -> numpy array search_bbox = [bbox.numpy() for bbox in search_anno['bbox']] # tensor -> numpy array return TensorDict({'template_images': np.array(template_frames).squeeze(), # 1 template images 'template_annos': np.array(template_bbox).squeeze(), 'search_images': np.array(search_frames), # (num_frames) search images 'search_annos': np.array(search_bbox), 'seq_id': seq_id, 'dataset': dataset.get_name(), 'search_class': meta_obj_search.get('object_class_name'), 'num_frames': len(search_frames), # 'visible_ratio': visible_ratio }) ================================================ FILE: lib/train/data/transforms.py ================================================ import random import numpy as np import math import cv2 as cv import torch import torch.nn.functional as F import torchvision.transforms.functional as tvisf class Transform: """A set of transformations, used for e.g. data augmentation. Args of constructor: transforms: An arbitrary number of transformations, derived from the TransformBase class. They are applied in the order they are given. The Transform object can jointly transform images, bounding boxes and segmentation masks. This is done by calling the object with the following key-word arguments (all are optional). The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances. image - Image coords - 2xN dimensional Tensor of 2D image coordinates [y, x] bbox - Bounding box on the form [x, y, w, h] mask - Segmentation mask with discrete classes The following parameters can be supplied with calling the transform object: joint [Bool] - If True then transform all images/coords/bbox/mask in the list jointly using the same transformation. Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using different random rolls. Default: True. new_roll [Bool] - If False, then no new random roll is performed, and the saved result from the previous roll is used instead. Default: True. Check the DiMPProcessing class for examples. """ def __init__(self, *transforms): if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)): transforms = transforms[0] self.transforms = transforms self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att'] self._valid_args = ['joint', 'new_roll'] self._valid_all = self._valid_inputs + self._valid_args def __call__(self, **inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] for v in inputs.keys(): if v not in self._valid_all: raise ValueError('Incorrect input \"{}\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args)) joint_mode = inputs.get('joint', True) new_roll = inputs.get('new_roll', True) if not joint_mode: out = zip(*[self(**inp) for inp in self._split_inputs(inputs)]) return tuple(list(o) for o in out) out = {k: v for k, v in inputs.items() if k in self._valid_inputs} for t in self.transforms: out = t(**out, joint=joint_mode, new_roll=new_roll) if len(var_names) == 1: return out[var_names[0]] # Make sure order is correct return tuple(out[v] for v in var_names) def _split_inputs(self, inputs): var_names = [k for k in inputs.keys() if k in self._valid_inputs] split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])] for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()): if isinstance(arg_val, list): for inp, av in zip(split_inputs, arg_val): inp[arg_name] = av else: for inp in split_inputs: inp[arg_name] = arg_val return split_inputs def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class TransformBase: """Base class for transformation objects. See the Transform class for details.""" def __init__(self): """2020.12.24 Add 'att' to valid inputs""" self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att'] self._valid_args = ['new_roll'] self._valid_all = self._valid_inputs + self._valid_args self._rand_params = None def __call__(self, **inputs): # Split input input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs} input_args = {k: v for k, v in inputs.items() if k in self._valid_args} # Roll random parameters for the transform if input_args.get('new_roll', True): rand_params = self.roll() if rand_params is None: rand_params = () elif not isinstance(rand_params, tuple): rand_params = (rand_params,) self._rand_params = rand_params outputs = dict() for var_name, var in input_vars.items(): if var is not None: transform_func = getattr(self, 'transform_' + var_name) if var_name in ['coords', 'bbox']: params = (self._get_image_size(input_vars),) + self._rand_params else: params = self._rand_params if isinstance(var, (list, tuple)): outputs[var_name] = [transform_func(x, *params) for x in var] else: outputs[var_name] = transform_func(var, *params) return outputs def _get_image_size(self, inputs): im = None for var_name in ['image', 'mask']: if inputs.get(var_name) is not None: im = inputs[var_name] break if im is None: return None if isinstance(im, (list, tuple)): im = im[0] if isinstance(im, np.ndarray): return im.shape[:2] if torch.is_tensor(im): return (im.shape[-2], im.shape[-1]) raise Exception('Unknown image type') def roll(self): return None def transform_image(self, image, *rand_params): """Must be deterministic""" return image def transform_coords(self, coords, image_shape, *rand_params): """Must be deterministic""" return coords def transform_bbox(self, bbox, image_shape, *rand_params): """Assumes [x, y, w, h]""" # Check if not overloaded if self.transform_coords.__code__ == TransformBase.transform_coords.__code__: return bbox coord = bbox.clone().view(-1,2).t().flip(0) x1 = coord[1, 0] x2 = coord[1, 0] + coord[1, 1] y1 = coord[0, 0] y2 = coord[0, 0] + coord[0, 1] coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]]) coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0) tl = torch.min(coord_transf, dim=1)[0] sz = torch.max(coord_transf, dim=1)[0] - tl bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape) return bbox_out def transform_mask(self, mask, *rand_params): """Must be deterministic""" return mask def transform_att(self, att, *rand_params): """2020.12.24 Added to deal with attention masks""" return att class ToTensor(TransformBase): """Convert to a Tensor""" def transform_image(self, image): # handle numpy array if image.ndim == 2: image = image[:, :, None] image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if isinstance(image, torch.ByteTensor): return image.float().div(255) else: return image def transfrom_mask(self, mask): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) def transform_att(self, att): if isinstance(att, np.ndarray): return torch.from_numpy(att).to(torch.bool) elif isinstance(att, torch.Tensor): return att.to(torch.bool) else: raise ValueError ("dtype must be np.ndarray or torch.Tensor") class ToTensorAndJitter(TransformBase): """Convert to a Tensor and jitter brightness""" def __init__(self, brightness_jitter=0.0, normalize=True): super().__init__() self.brightness_jitter = brightness_jitter self.normalize = normalize def roll(self): return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter) def transform_image(self, image, brightness_factor): # handle numpy array image = torch.from_numpy(image.transpose((2, 0, 1))) # backward compatibility if self.normalize: return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0) else: return image.float().mul(brightness_factor).clamp(0.0, 255.0) def transform_mask(self, mask, brightness_factor): if isinstance(mask, np.ndarray): return torch.from_numpy(mask) else: return mask def transform_att(self, att, brightness_factor): if isinstance(att, np.ndarray): return torch.from_numpy(att).to(torch.bool) elif isinstance(att, torch.Tensor): return att.to(torch.bool) else: raise ValueError ("dtype must be np.ndarray or torch.Tensor") class Normalize(TransformBase): """Normalize image""" def __init__(self, mean, std, inplace=False): super().__init__() self.mean = mean self.std = std self.inplace = inplace def transform_image(self, image): return tvisf.normalize(image, self.mean, self.std, self.inplace) class ToGrayscale(TransformBase): """Converts image to grayscale with probability""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32) def roll(self): return random.random() < self.probability def transform_image(self, image, do_grayscale): if do_grayscale: if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY) return np.stack([img_gray, img_gray, img_gray], axis=2) # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2) return image class ToBGR(TransformBase): """Converts image to BGR""" def transform_image(self, image): if torch.is_tensor(image): raise NotImplementedError('Implement torch variant.') img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR) return img_bgr class RandomHorizontalFlip(TransformBase): """Horizontally flip image randomly with a probability p.""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability def roll(self): return random.random() < self.probability def transform_image(self, image, do_flip): if do_flip: if torch.is_tensor(image): return image.flip((2,)) return np.fliplr(image).copy() return image def transform_coords(self, coords, image_shape, do_flip): if do_flip: coords_flip = coords.clone() coords_flip[1,:] = (image_shape[1] - 1) - coords[1,:] return coords_flip return coords def transform_mask(self, mask, do_flip): if do_flip: if torch.is_tensor(mask): return mask.flip((-1,)) return np.fliplr(mask).copy() return mask def transform_att(self, att, do_flip): if do_flip: if torch.is_tensor(att): return att.flip((-1,)) return np.fliplr(att).copy() return att class RandomHorizontalFlip_Norm(RandomHorizontalFlip): """Horizontally flip image randomly with a probability p. The difference is that the coord is normalized to [0,1]""" def __init__(self, probability = 0.5): super().__init__() self.probability = probability def transform_coords(self, coords, image_shape, do_flip): """we should use 1 rather than image_shape""" if do_flip: coords_flip = coords.clone() coords_flip[1,:] = 1 - coords[1,:] return coords_flip return coords ================================================ FILE: lib/train/data/wandb_logger.py ================================================ from collections import OrderedDict try: import wandb except ImportError: raise ImportError( 'Please run "pip install wandb" to install wandb') class WandbWriter: def __init__(self, exp_name, cfg, output_dir, cur_step=0, step_interval=0): self.wandb = wandb self.step = cur_step self.interval = step_interval wandb.init(project="tracking", name=exp_name, config=cfg, dir=output_dir) def write_log(self, stats: OrderedDict, epoch=-1): self.step += 1 for loader_name, loader_stats in stats.items(): if loader_stats is None: continue log_dict = {} for var_name, val in loader_stats.items(): if hasattr(val, 'avg'): log_dict.update({loader_name + '/' + var_name: val.avg}) else: log_dict.update({loader_name + '/' + var_name: val.val}) if epoch >= 0: log_dict.update({loader_name + '/epoch': epoch}) self.wandb.log(log_dict, step=self.step*self.interval) ================================================ FILE: lib/train/data_specs/README.md ================================================ # README ## Description for different text files GOT10K - got10k_train_full_split.txt: the complete GOT-10K training set. (9335 videos) - got10k_train_split.txt: part of videos from the GOT-10K training set - got10k_val_split.txt: another part of videos from the GOT-10K training set - got10k_vot_exclude.txt: 1k videos that are forbidden from "using to train models then testing on VOT" (as required by [VOT Challenge](https://www.votchallenge.net/vot2020/participation.html)) - got10k_vot_train_split.txt: part of videos from the "VOT-permitted" GOT-10K training set - got10k_vot_val_split.txt: another part of videos from the "VOT-permitted" GOT-10K training set LaSOT - lasot_train_split.txt: the complete LaSOT training set TrackingNnet - trackingnet_classmap.txt: The map from the sequence name to the target class for the TrackingNet ================================================ FILE: lib/train/data_specs/got10k_train_full_split.txt ================================================ 3784 8998 3906 1631 8277 8358 2338 7938 2988 8302 2662 2663 2825 7447 4781 2218 6348 5860 4517 2819 8075 5391 116 3606 7976 7941 1024 4519 1970 557 8579 6908 993 7204 1991 3674 8781 6840 5 3225 3763 8688 6778 5777 4794 2744 8126 3864 1733 2923 6829 701 683 2081 1831 2404 1459 2741 5972 3618 7462 2654 103 2174 6224 2989 2506 2766 5912 2699 3295 3986 609 4895 6673 801 1098 1602 2490 3129 8476 3186 7355 4784 4270 1812 4226 2267 8873 6544 6112 2381 4752 753 3776 6511 6016 731 2559 7369 5866 563 7731 1105 5603 50 4238 2208 8725 4994 4719 1444 8807 7298 8139 8760 8173 2332 4131 5207 1065 8562 3992 4024 2188 9095 6765 1707 6105 6922 5362 1486 7898 4135 6574 1551 998 6565 8127 8927 2544 4365 510 768 3535 3875 6808 2931 487 1088 4451 368 2470 8111 3493 7338 8281 6390 1271 4373 3667 3494 3757 2966 3756 7840 6315 7827 3300 6261 4163 2217 6549 94 7236 9136 1857 6691 3470 6271 807 516 9311 6098 3144 8420 5425 5694 2643 6696 6072 7285 3781 903 8522 6092 5979 2622 2529 855 3420 3261 8953 7866 2492 3157 359 1520 2642 7452 759 36 8931 1744 4350 1089 9199 4295 1889 1908 4868 4498 1968 9103 3273 8723 7413 4114 5584 4874 1427 5211 7618 1542 1353 8158 4168 3200 6345 8560 5619 5953 3158 8849 5831 1411 7294 8103 6539 7397 1006 5450 3119 4274 5352 4571 2319 4217 4976 902 1814 2651 3299 3398 982 2428 5793 1346 7057 3737 7329 4449 2110 7405 1773 958 3901 4127 8234 2994 7066 1289 2995 5871 3556 9085 846 2366 585 7032 5516 5230 3481 2732 6658 7423 1855 6384 3554 5823 4948 7058 4667 5377 2503 7694 9191 9144 655 3409 62 8019 8970 5523 7403 3379 2323 4833 5750 3178 6548 8891 7501 3280 7404 343 2171 8397 1367 8611 6118 6603 3729 7182 9048 7733 5642 7141 3335 4845 5449 3467 6250 163 5168 2040 5339 3609 8352 3426 8567 769 187 6151 6437 7028 8507 3970 9146 2068 5028 7492 1661 2815 2469 2563 3814 8430 4305 3479 5678 9115 4132 1211 5459 4814 545 4556 238 4296 2724 1260 2581 6087 4632 4313 380 1209 5447 3032 7942 8943 806 2432 6130 4314 2131 9045 6531 5706 6747 7724 2017 3292 5469 2743 424 4233 7643 8619 5192 4516 9324 3537 9152 8058 7526 8711 1949 5982 1732 6702 7027 6388 7012 328 2130 452 306 7669 3134 5761 3703 44 4189 695 7672 5224 9215 5644 3143 3704 5443 2348 7177 2328 4725 354 1418 7810 7746 9002 5759 7226 4535 9160 4385 5397 7249 2936 3204 6287 385 2371 2738 3636 9033 2246 2680 6940 4310 2054 9250 9080 4568 5586 4469 2038 3410 7900 4332 6108 678 3319 9079 1054 4048 4751 1320 6890 7931 1398 4349 5299 5025 7932 5738 7787 4590 4020 1274 2488 8497 3372 8965 3219 799 3664 6500 7093 4362 6205 4244 4652 1964 5945 6434 2031 2684 6632 4588 8271 3232 5782 2904 6789 5636 7200 3632 5435 8203 3480 4786 7579 3351 1921 798 3646 3094 4359 1654 5975 376 5965 780 7821 9224 6738 3185 2133 6248 5996 2834 531 5688 2448 7925 7974 5924 6401 5778 6594 5442 8336 4522 3770 6340 6328 4946 4161 2954 2588 8465 2885 1606 5787 3407 3121 7310 1413 1932 4787 2579 3325 508 5610 6480 4290 479 3792 6628 2545 6717 6972 2665 6730 3547 6845 5929 3540 4356 8993 1052 2235 8356 3403 8818 8260 572 4159 1180 5348 941 7948 2676 3539 4866 6422 8365 3217 1310 2059 9177 1419 2283 8892 8162 1212 6277 3725 7806 6149 7874 718 6888 7118 277 656 8763 8289 4759 5854 8659 7710 3145 5981 1881 5799 6947 1609 6396 2631 2887 318 2550 6132 1736 2907 7816 48 4304 8133 6698 2760 7779 7732 7642 1154 7242 711 9262 539 8033 7440 1913 5480 5570 8594 8772 4654 8974 6128 6183 1071 8449 2142 2298 524 1695 820 4053 8241 1856 8641 3981 217 1063 9286 3152 221 5461 1270 2006 7164 1199 6951 5604 5400 5309 3498 6407 6661 7097 8165 5169 3852 7070 5702 4344 6648 6904 3272 7119 5795 2365 2659 353 5444 6968 2755 1924 2098 2972 6006 5865 8740 2418 3401 7856 5841 598 836 1147 931 8897 0 6049 1837 865 1871 6116 6831 5773 3587 303 1883 2163 3070 1308 7953 6300 6909 853 7301 3279 123 7186 3194 5553 5133 1931 4622 6075 4891 5722 5693 8 2339 6596 71 379 4506 4370 1238 2707 3344 4254 8767 1726 325 4148 5438 5357 548 1332 6824 2290 2335 3146 2594 2315 3389 3885 2621 4116 5389 7412 7222 4894 8595 2000 4978 4721 6444 3796 9321 2236 6409 1523 1468 9249 8270 2341 2874 174 4757 4502 4703 9034 9108 5451 2619 5022 9158 490 6540 1466 2962 8771 3036 2712 4539 1581 5638 9246 4308 4363 4647 4470 1636 2511 1311 6560 7519 8027 9217 6464 6364 3779 4822 3563 3982 5896 5510 6655 1524 2846 3137 621 141 1887 6567 8921 4671 6052 8445 8699 7349 3553 2117 7651 5034 5383 649 3818 9022 8414 1012 8159 5081 8571 4765 9135 4361 4073 9142 727 2835 8229 3989 4490 4923 5477 1638 3643 712 9044 2230 499 7166 96 3172 8431 8401 1470 6356 8817 927 4212 2152 1795 3812 4949 1219 1538 3029 6481 9042 7775 7742 423 2085 7715 4541 9061 5916 3950 7420 4878 7406 7046 7808 4911 8804 6927 8820 3264 300 8670 2979 252 4407 3383 4688 8504 6723 26 3837 2489 4137 8209 229 6490 2364 9016 1763 1728 338 8335 9063 5280 2791 641 5454 4581 5420 4548 2840 8508 3463 7231 7619 2560 1755 6201 165 1471 6279 5806 6867 5890 2396 3416 1981 6073 5872 3045 4182 7607 3318 4414 2998 6553 7139 5624 2123 3666 723 5110 6932 8200 2222 8399 1041 4138 1594 3569 9253 393 7940 8004 1475 6759 5393 1107 2597 878 9309 7576 5250 1759 3142 2015 571 3921 1255 7080 893 2160 1355 82 1562 9153 8583 4085 4644 7196 9165 3558 4550 6374 7826 8602 4146 9257 6083 874 8383 3731 3374 3653 8222 7344 470 1813 4478 6871 7245 6866 3998 7433 276 1915 1988 8168 2518 2686 831 6143 5205 8718 1703 7729 2077 7983 8450 1195 9232 507 7989 6974 4054 5828 8655 6679 5245 7783 5886 9098 6491 8782 3525 6542 131 8110 9186 9074 4933 9035 2607 4 2057 6273 2711 5829 3382 2696 3043 2048 619 2499 5295 1162 7807 3694 2194 3149 1940 7934 840 3592 8237 4731 1324 8486 8726 8573 2928 9078 2272 2564 1370 5911 7434 8026 407 7546 2004 5849 3034 7887 3425 1118 926 3430 1544 5902 2282 1124 2334 129 1372 4842 6473 4382 1028 415 8269 8073 6910 2796 3038 5735 5080 2852 6306 8842 9188 3637 1066 532 928 5485 2838 6753 9008 7984 2816 8819 7103 5977 5044 2064 2599 4973 382 3249 6446 6638 852 1724 3368 892 3250 8258 7962 4300 1616 167 8855 2090 4424 879 5136 5350 2635 7828 8506 63 3004 3847 3676 1184 1705 6745 1263 5020 746 1888 7036 1033 3914 5433 3905 4641 8909 228 4801 3766 8085 643 6914 9280 3013 5657 3696 1590 2920 8282 2403 416 911 3849 4215 1120 5490 296 2306 3140 3742 4819 6153 6414 760 3000 7498 7108 6429 3031 5314 751 3357 5808 7505 98 7652 4027 6257 3943 1799 8577 5577 4969 9163 2025 6061 4026 5732 588 7017 1415 4961 4940 7152 538 706 2802 8983 3375 1246 6593 5837 1789 7939 4997 5939 2411 6133 199 7593 1702 5406 6082 2359 2912 6109 100 8149 5470 2807 3384 6413 3362 5621 6019 9241 9268 7703 4111 7967 5458 7181 5492 1112 6729 4577 106 8853 3774 979 7082 4610 1853 9003 9292 2867 6262 2245 3460 1557 767 4796 8147 2658 5769 6985 7065 421 7990 3289 1540 9316 2251 6896 5947 4965 2652 4480 963 9047 7168 7824 3976 6210 7018 7179 5016 7789 6102 6828 7659 9109 9071 8115 7628 7110 16 7513 835 939 4078 2351 2322 3881 4945 560 6837 6094 6475 7901 3 771 8029 3135 8044 7127 3741 5156 7030 4906 113 3747 7042 5232 5225 3002 4747 6879 5379 4886 7192 4184 1896 1834 8689 3665 2957 6913 8009 4851 6420 7987 828 3003 8884 8815 3198 8008 194 6251 3303 3934 395 1285 4169 1648 1347 3600 4631 509 211 6230 7241 8250 2219 2582 8353 7790 7583 4462 3904 9004 6942 1704 5686 8051 2981 5511 6182 7088 1699 1222 3455 6189 1528 5197 6221 7893 3283 2837 7773 8766 2942 8021 614 4102 7362 1786 400 133 556 3127 5237 3727 1440 3873 6322 8448 6285 8696 8800 4009 3386 454 4847 5685 9093 246 1314 5895 6863 4302 4260 8405 8417 7116 255 3223 4737 7852 6337 814 710 1094 6103 5809 5882 6336 4974 1499 2806 3744 2664 2436 4482 8665 8918 1076 8676 5725 9248 4755 1447 9328 5500 78 2653 792 6854 6093 6172 3378 4492 5529 5476 3846 1391 383 4289 3883 2648 3265 2525 5402 4599 6870 6877 4413 2464 8519 2521 1839 5822 5664 7257 5375 6852 6764 5182 8914 3015 8509 3080 4562 8979 6215 6643 8601 6096 4812 5246 7862 527 7849 6737 12 2468 7961 275 27 5932 3840 7341 4996 8564 2154 3788 6138 7831 4442 757 4464 1170 2568 19 323 6584 7675 3441 2067 9027 2486 4379 4744 1737 7563 301 3907 4742 6857 1221 9284 8458 8236 2897 4004 1526 5345 4423 6246 8578 1057 3711 4986 4785 3997 7311 4788 107 8387 2041 2608 8628 5830 6031 783 6817 3293 541 773 8473 2501 7247 5667 804 483 1639 696 6060 5429 5762 1527 7342 1329 6225 7895 381 8030 8520 8362 4734 3526 9273 2039 4142 5084 875 6905 8968 5275 3052 650 7509 232 2595 3631 1810 4355 8315 8908 1777 4834 3164 2336 1543 6212 8346 3024 3719 1242 6265 8101 3133 6150 6358 3316 4089 1647 4629 7117 2596 5366 1225 6371 624 2209 1428 1158 7648 466 8765 802 153 4639 3657 6482 9320 2693 6591 3294 2617 5052 6305 3227 8784 7170 93 5868 6716 1671 178 2703 954 3254 2262 5046 5743 8647 6393 7706 6604 3728 6978 7489 7474 8754 2740 2233 6038 1491 8814 2080 2358 5944 5653 1164 9259 4518 7343 5748 3897 923 5967 2677 3503 1202 4966 1836 1863 6634 1962 9096 9064 977 4049 1464 658 536 3402 8064 1309 259 7999 8122 910 224 6152 7142 6070 7523 8411 2408 6766 9214 9312 8325 6192 626 6025 6240 8708 4630 6777 1075 8906 408 9269 6236 9067 2514 8568 2324 156 3136 3530 7878 7308 4335 2065 3845 4453 3356 1450 371 7219 5171 201 8642 2099 477 1603 8339 7430 3061 235 8291 1133 8474 7035 8653 989 4569 9092 8347 3102 1743 9086 5140 7438 1530 4342 2460 7646 5047 5071 5430 6944 610 2803 1448 4696 6156 4386 4248 4256 994 2112 805 8011 8276 8999 4956 1712 2795 7553 6436 2158 9083 3184 5784 4428 612 5288 6222 1365 5074 6848 575 5213 2175 4240 351 2086 2656 5150 9255 8189 7735 1261 1344 4097 8674 2984 4235 5998 6488 537 1267 7486 7124 6245 7955 7337 5436 1194 8226 209 1710 7906 4357 4139 5679 2584 2854 1004 8246 8586 5087 1878 4926 6637 3197 7757 8249 4055 6502 1248 990 3928 2770 2751 1020 6426 4190 6839 2671 884 3871 9212 4179 3394 10 5861 5316 6869 2985 8905 8559 4457 2480 2313 4100 4395 6835 7799 7890 2785 5468 7302 5862 1803 6376 3171 8591 717 7053 1655 4489 2522 2921 8555 1984 895 8949 1305 738 7606 112 3042 1325 437 3167 3340 511 3689 5813 8982 69 4421 7150 550 8829 8685 3147 8956 3166 7023 8633 3308 2014 3573 3880 4045 2069 6051 4950 702 6664 8418 2454 6181 4853 4166 7022 7418 3605 9181 7172 5031 4589 7858 6586 6351 8334 7504 634 3759 1890 890 6959 5085 4919 2161 1191 256 3610 7079 3427 4071 7323 2982 7263 7444 4251 5846 4864 3649 4311 7461 8120 4582 6373 2805 4872 4869 5493 5867 2670 7099 30 8933 930 7919 501 7261 5289 7449 7772 3613 7848 3196 474 205 841 2611 6185 3088 409 7239 5938 7871 1343 6705 1027 5596 2199 9113 5471 6134 838 2345 8359 4061 1474 3229 270 4245 1979 5995 1517 8652 4006 4880 6137 4693 2528 6996 2926 5798 2477 2549 1128 3341 6014 4479 2861 4208 5175 5174 5118 3736 5463 1588 2327 8380 7982 1514 1058 4586 6608 7985 3044 1822 3628 6851 549 1811 2184 2601 4608 8922 2540 6659 3859 307 3650 3767 8167 505 4366 4824 5520 461 1933 2401 8106 2055 7844 8544 8838 4797 7419 6686 7670 6039 5672 5141 6543 206 5252 4718 888 1601 3218 5114 713 4022 4419 6708 397 425 6612 5057 1729 6573 4729 4080 1034 2961 534 8194 5598 9218 2424 329 4154 1597 922 109 8823 3578 9038 8437 3307 128 8032 1412 7333 8762 8851 8865 3056 468 3808 3064 8798 7052 7767 9231 1086 2162 6566 2109 3439 6122 3642 7696 8610 5279 1808 8687 8377 817 8714 6066 4008 3640 6015 1021 7601 4855 6017 87 7071 2730 7268 3614 6084 6117 6924 9102 2829 375 8724 2095 22 1541 2970 633 139 451 4521 179 1396 3876 5824 8020 426 4982 4172 1157 190 4859 1455 3110 3323 9104 858 6719 6428 4495 8551 2141 3984 3066 67 4299 5821 8444 6581 6097 7090 7781 8944 3085 8606 2114 5355 8901 1461 3301 422 7000 4820 5790 1379 7536 4199 8736 8991 5241 1698 1294 1753 196 2987 8680 4658 4144 8639 6441 8255 8156 3677 6385 6520 7700 3760 6001 1144 5478 7394 8057 5018 4232 5235 6844 3111 8802 867 949 7843 573 2278 6801 7629 2714 5105 6946 2697 5315 1571 8677 2537 4374 3833 7820 3750 2033 6526 3884 8706 7195 417 3603 3001 6284 5873 5718 8576 8457 3589 5839 459 3626 6342 8729 6933 607 6053 8228 3773 1805 6365 5142 6069 1389 9026 570 4614 5712 5533 9222 2821 1897 819 766 4060 4902 5905 6842 5446 1277 4303 2836 934 1014 7822 7494 3466 665 1047 5881 3328 4664 315 1315 1462 8616 7725 2756 5749 1730 8184 4567 5065 7499 8867 1304 3669 9192 410 8177 6710 1210 2329 8443 3911 1899 7686 3315 7190 6180 3116 5341 4394 8337 9182 6969 5715 2172 1742 2782 3715 9195 7960 2517 4890 8294 2337 8014 3353 7475 2193 4843 8831 4200 4653 6196 6957 3063 2996 8959 8973 6529 3457 5274 8002 6823 6154 5561 1780 9318 7657 1758 6503 7678 3274 1625 4327 3236 8575 3155 4707 4331 1494 8756 3174 1074 8116 8295 8311 3048 3752 6050 6483 8003 9175 4674 1642 2556 6166 7165 8441 5413 3990 1640 1778 7500 8304 1395 4315 5949 3364 242 5763 1036 249 2430 7426 8131 411 6267 2045 6606 899 8065 9052 7507 5779 5616 2107 5408 2980 6310 5776 4328 821 3251 2354 7076 1700 5313 6736 79 8212 3959 5677 7545 160 6790 6859 3659 6770 1106 8846 956 7472 2050 8099 4795 8053 9293 7037 1646 9307 1069 5322 5332 2708 8977 917 2419 184 2105 1578 3923 5780 1903 2512 429 5582 493 4972 445 8286 555 320 8300 322 617 3413 4459 525 5631 6314 5157 5300 8545 182 1031 4429 2495 7586 1534 3099 3916 3738 1919 535 2119 1299 177 1838 2159 4099 8285 5172 8540 6020 7683 3073 3115 1673 3087 3488 2416 1894 5942 3597 5834 2007 43 1779 4174 2023 2546 2429 9006 436 4214 4536 3693 5426 6767 5903 4368 2170 5051 7490 7882 2859 5035 7835 5372 7122 925 3253 6338 8393 4093 5848 7588 2683 8049 5403 5894 8745 8550 2941 3484 9029 4461 8022 725 2355 1619 3030 1975 5623 2415 1957 6141 9278 3226 3062 5670 7326 8759 8496 6619 8187 8262 6199 951 7183 668 2388 4698 5681 8240 2851 871 4988 9084 9089 3162 1167 8244 5227 6461 2831 776 5010 5770 5282 3574 5102 1278 2281 5455 305 4628 4663 9119 7487 8746 4889 6569 1175 102 2386 8940 2479 5566 53 8833 1918 8001 321 6786 6861 4358 2771 7467 975 4777 605 3543 2600 7584 9299 4530 6477 7364 7328 183 4761 7543 304 1196 4623 7839 2139 5519 1953 533 5989 7590 7428 6346 6162 1091 1946 6260 4405 5676 8924 7171 8409 1866 6379 3411 2387 3051 7398 154 1185 6442 6004 1611 2165 9018 8323 616 3995 8952 1533 7853 4194 213 789 4991 3675 7456 5752 175 7556 4195 907 2248 9057 8467 4594 1017 7968 880 7446 3304 1666 4942 3867 4802 9156 6357 4621 887 6213 5261 1336 521 8928 1818 7864 4792 6742 157 1593 823 7235 5303 5633 1100 1692 8047 5993 1460 6714 1630 6440 6307 3608 292 212 401 5974 7107 8301 8342 2720 4583 2757 7315 833 4466 4236 1282 5273 2149 287 8484 2380 8119 7167 737 5076 6598 3596 5382 2650 8980 3421 1356 1954 7823 1172 2226 1941 6136 7274 2256 4928 324 1407 4410 4579 1061 7113 486 862 3435 6956 2873 1465 6113 8225 8512 6806 272 6008 1241 88 5662 3555 689 8733 2812 7453 6282 420 2471 4477 7495 1445 594 6939 1564 8704 8590 7992 7374 5796 9298 4213 5713 5864 326 5513 402 464 608 1951 8640 8180 3347 3459 4162 2690 7478 5856 5240 2389 3022 602 5547 1798 1345 9276 599 3673 3277 1635 8625 1567 5928 636 5671 2896 3477 412 7575 4201 685 4760 1229 4275 8960 3123 4471 5941 3355 3999 7157 6354 7741 6850 8783 1943 6769 7330 8721 8477 1381 848 778 6408 2644 5817 1441 1723 2144 2776 2368 120 367 8839 8749 5353 4158 3148 9114 1233 9228 8857 2895 1286 200 6755 5125 5857 1657 7658 5097 5000 942 7020 586 784 7078 6194 8658 8957 9325 1851 8911 4862 7004 1186 8824 1651 2999 561 7639 4316 5086 3187 7912 2624 9183 8487 5089 8475 7554 4031 6297 6059 5329 115 2058 7650 7634 7121 2485 7805 2241 7713 4352 2409 1026 2745 4549 6474 5124 5201 6556 6617 9091 3945 8402 5648 5257 2192 4901 7750 6131 6027 6352 4625 1254 5498 3720 8261 3939 5576 3685 6713 8472 991 8354 8068 5655 5997 1029 7506 6740 2575 2990 4898 583 7402 3290 5388 6715 8235 5361 4970 1363 3338 5731 9014 5358 2216 2856 635 1193 3705 6334 7666 5270 1384 6368 8604 3564 1937 2481 1341 721 2100 3958 6551 3813 2592 7980 5385 319 2357 8761 8910 8693 1204 489 4827 8024 7832 6427 3895 89 9068 8067 1708 1111 8963 1902 9251 5719 9143 5537 9169 77 5365 1840 485 4456 2841 1169 3271 7144 6886 9140 7173 6003 1659 1807 8371 2439 274 4660 3448 6623 347 2103 3400 2106 9073 8169 3687 3305 4416 8454 6635 332 2433 2909 3839 4063 1944 6509 1296 7770 1880 6610 4075 9331 4484 302 418 4219 1333 2350 6498 8424 4694 4883 5269 6580 5007 6722 1669 8470 2571 513 3810 7049 6332 7363 3532 8456 2097 297 8841 7180 714 1587 5234 4268 2320 7372 660 8503 1668 8847 1101 7275 3336 6460 722 7782 3947 502 4258 2132 1835 181 3841 427 3446 2551 8324 6963 4284 7297 7577 3399 9148 8213 5656 8440 851 657 2446 4292 6992 976 1108 2681 3237 8582 377 5969 5287 9209 8523 7178 7833 6175 2126 3023 5090 7491 6640 6077 2221 2780 1694 4094 144 6161 3203 7123 749 3625 3848 980 2270 7819 3672 7689 7203 2718 1714 2884 3474 3802 3851 4224 7237 5415 7998 7207 4106 9036 1046 8731 5070 6818 4592 6056 693 1328 3309 5791 2629 2736 202 388 7886 4417 8786 8822 4035 7718 8492 5505 1192 4388 8941 5019 7538 6732 7296 6389 5923 1405 3278 3917 1688 8374 443 4037 9099 5190 6402 4177 9310 7747 4348 7197 4844 4998 5609 4345 29 3332 8648 4107 346 2577 3941 1215 3782 8252 4706 2675 3790 7459 6164 7316 1149 6687 582 3139 5040 7645 3882 7322 4034 1861 4701 8757 3208 8801 6349 8907 1823 4528 4789 143 4746 9234 3866 9245 1911 1366 4393 2061 859 1959 6967 3138 7382 9031 6237 845 80 6911 7163 5229 4736 8738 33 8543 357 3193 7262 4448 6796 6793 3321 7569 6411 7692 7340 1417 5847 3836 2678 1188 8727 223 8615 7417 5771 3170 8061 2935 8263 8257 6883 1276 1239 812 6258 3922 7525 8117 3039 603 8554 7573 2787 3445 5115 3478 962 3961 6570 7722 216 2797 5154 2530 4904 2405 7542 4021 3252 5370 9302 236 4532 1361 3373 1716 2183 1583 3783 868 1687 8925 1433 6198 8208 6367 7603 882 3469 1645 7654 1176 4231 150 7997 5456 7031 4375 8840 5634 6945 705 3442 4774 3822 7148 1922 8459 6249 8713 6197 8599 6071 6756 1634 950 5640 7749 5920 6622 4783 7837 7479 7229 3919 1797 5272 8945 4908 5439 6903 5833 6930 8197 9261 1711 5483 6046 4285 8852 7409 8971 8278 7534 7792 2444 7496 8063 1665 248 3894 4585 1982 66 6651 4850 1240 7511 7524 9258 2075 3979 4714 7592 965 2919 8239 1842 8013 4750 2344 6155 3468 31 2087 1599 1573 5883 7613 195 3749 644 2189 8779 8743 9005 8081 1040 7785 5820 8830 5495 4867 2710 3843 491 7153 6217 1148 4741 1761 5484 3423 5474 6916 5876 7252 1739 8930 6647 5198 4903 8488 7366 2774 2726 2385 7625 3179 2211 8845 6600 399 6810 3447 6684 4915 8368 1867 2325 2101 1335 7734 3722 7437 3716 7025 4000 6897 1408 7154 5013 2204 9233 4225 3817 1877 9161 2197 6991 3390 280 1892 1612 7753 2801 7246 7909 6229 9314 8407 1436 3879 6432 6849 5326 5327 8535 7910 7745 5545 7916 207 1783 6158 8517 7361 8070 6430 119 6146 4183 1083 7385 4497 9133 1686 3765 5099 595 8046 4418 4043 2361 7915 9149 1717 1141 6375 1018 5602 1262 7485 9178 6629 3339 8934 4648 7988 6252 3440 864 5418 3874 7280 6191 8388 4323 6792 4324 2232 7228 8684 7813 6187 6678 3177 3534 4953 4402 7739 6319 2414 8700 5946 8238 4533 6917 4167 4618 2115 2268 3081 1247 4001 8580 7636 3101 2195 1559 3714 2484 7188 6028 7530 2828 1977 3238 6496 2340 110 3247 7532 7541 924 1632 484 4487 4439 6447 1319 4944 6347 1791 2285 8087 5452 91 1166 162 5185 7933 4743 1627 7259 8620 8525 8207 5845 9011 5525 4269 4700 1824 8186 8872 8299 3957 8242 4558 6439 2666 5943 6958 8112 5121 8806 6170 7688 3486 2082 7436 2778 1096 786 2206 5170 1443 6030 3312 9151 8485 6404 8498 2883 8961 2280 8341 9137 4337 2809 2445 809 8298 8643 8316 4951 6853 1572 3215 3938 2249 6515 1337 8328 7712 1429 4117 5441 3230 4152 7225 3513 6953 1507 348 3639 5739 2673 1550 6301 1652 8453 204 6833 8056 2200 5217 1854 4711 7368 4572 4032 7531 1013 3634 2875 6058 8307 7609 1766 904 667 5410 6578 3601 1664 3233 7390 8178 4486 4952 4427 4876 9166 3107 2772 6295 5001 5296 3371 6518 6327 854 1615 8288 1912 5927 6202 5814 9032 1059 3214 6547 7038 5781 6926 4390 6114 1622 4318 5803 5984 736 3561 6554 5045 4277 7386 9081 8462 2034 4955 2701 932 1298 7758 7176 9205 2276 3077 3803 3562 8054 7946 295 1843 7728 1629 7768 3663 6363 2971 431 9285 2513 1116 3656 4529 6366 5758 6339 8398 816 4153 648 2536 1826 7870 8113 7730 7101 6555 9256 6774 1072 4578 2598 3604 5880 861 8273 3350 3117 4685 9219 4334 5165 2035 7224 4066 4253 4447 3815 5038 253 3658 2252 330 3967 6443 2143 7336 6135 593 2734 8390 4655 7800 1399 1173 5618 2822 7905 7503 4431 2443 1568 3909 1974 2496 4772 5164 4105 2138 2864 3799 3924 4882 8245 1585 5528 5692 5730 5832 137 3175 2894 2062 3899 2752 4028 2113 5411 293 2647 730 3758 1667 8879 9303 6653 3698 3968 3053 503 2150 4645 2257 4627 8303 7966 8742 4692 5901 8547 2277 5546 986 370 4697 8712 4804 4881 1182 6650 7290 3487 2814 5668 7567 5333 3724 4164 3084 8896 3888 6537 17 6882 3531 704 1037 8866 5263 6758 3762 1393 3824 5575 5112 214 1439 5700 8932 1306 5011 6928 5173 4098 1132 7352 4778 7723 1368 2390 670 2685 5855 1772 6380 3853 940 5424 6091 1748 6193 5297 6572 8877 6874 430 5041 5267 1145 7448 620 9112 4294 1432 72 130 2393 7920 4597 6614 8889 3697 1895 3462 2616 3978 4791 7846 7780 8372 428 6559 8326 9211 2363 1525 5980 7888 3331 8118 7899 615 7377 791 5930 6627 8322 1138 770 8460 5100 8274 8350 6316 2893 7594 9236 5082 8150 1986 1909 8902 2145 3617 3501 7 2426 5056 8016 2702 5360 8135 8385 8378 8018 8574 720 8893 3021 1978 4782 1816 2083 4051 1446 5870 971 9097 8006 4222 8287 686 1377 611 8153 4920 4808 1536 679 4096 3891 4884 432 4615 8988 5560 3451 5589 3514 6169 1414 3244 1490 7100 3588 690 7317 4171 2266 6800 108 2793 5151 6977 2587 8188 8752 6318 5815 5116 263 3311 5191 5689 289 3392 5755 1022 5548 9319 8937 6011 7632 5328 4993 4141 5407 1865 520 7305 7208 526 3645 1859 2520 3523 8629 7304 8881 3076 4005 8329 2205 2214 6925 8691 4136 8883 974 7873 7952 3965 5887 7964 7189 2406 2783 8086 405 6568 5147 2021 4727 4826 7674 1600 5078 2949 6624 6541 8986 5740 4679 8500 3591 4434 398 983 7544 1478 4570 6012 465 9330 7206 808 8737 2356 4959 8812 6955 3599 2168 1420 1721 1794 5897 8422 2 4023 2739 3619 8797 5496 8951 8181 6893 9254 1809 5682 4309 6929 2742 5988 3363 4493 8434 4210 1503 1876 5094 4600 4936 4798 3933 5216 646 7660 3098 8773 4076 1576 5335 3746 3327 47 4602 8636 4129 363 6417 7416 9025 4377 4766 2779 4151 9046 7860 3154 3476 7620 966 2052 8344 1752 7199 4412 8895 8882 2463 339 56 5390 4821 7555 6558 1905 5258 8880 4205 3580 6735 1023 4511 3850 161 7395 2532 3349 7055 7387 758 1907 872 3006 659 815 1961 6902 7668 4708 1904 4433 5159 6816 8664 6918 1016 6513 7314 5364 7480 9313 716 3395 6843 2292 918 4329 1035 6344 8593 3404 5212 837 480 8524 1342 3690 6797 7414 288 8863 3352 1628 24 135 3314 2181 8650 5915 8078 6812 1375 6040 906 5635 7126 1387 7458 6119 5591 3795 1531 95 1960 7522 3033 898 4607 4921 3913 2623 4430 6268 7063 1326 9075 2505 7400 1284 2951 747 6466 1357 6493 7320 5892 576 5107 5559 97 2583 6361 8843 3509 7892 6086 1476 4612 7427 4267 9094 7050 6048 8455 8382 2227 284 2898 3221 2353 2157 5990 5810 3581 7279 6188 7859 3549 5539 7918 2022 9066 630 2500 5111 6561 5127 8095 5569 6123 1338 8605 3491 4187 8220 7334 9213 3067 6997 2853 4735 4372 1489 5954 6662 2207 973 3361 960 6350 4170 7431 8076 1129 750 7559 7194 2261 2300 6590 5893 6889 3125 8788 334 7286 3472 8164 7693 1469 1181 669 7515 5563 4773 3210 6324 3113 9070 3638 7551 2541 3506 5138 4069 7198 7560 3306 6100 2932 4473 1741 14 4672 7564 8748 8874 3804 3678 2240 2610 2862 1358 5716 42 5176 9326 8464 1038 2993 3017 9072 32 4809 4364 2808 4125 448 152 7299 5431 6178 793 3444 9120 8410 4963 772 5457 6954 3014 6881 286 553 1948 6398 6255 3057 8646 6176 2700 7106 5663 6683 1281 6013 8799 7635 9289 1885 442 2225 6294 5054 2674 7884 8730 8216 4203 1488 7111 4013 3623 7950 1971 1966 3248 2900 1553 472 3865 7796 6937 4591 8098 5208 294 5627 5691 5687 7149 4879 3624 7005 2773 3112 9185 1633 7830 5101 8707 8469 4678 4860 700 5527 9194 2794 5068 2639 1177 4282 6492 8128 5859 5029 5123 2877 522 5048 7230 2104 6642 6731 2717 5149 2043 9059 5277 844 1394 3262 5515 6706 3651 9105 7671 2880 3607 6410 2508 8463 2394 1916 1125 5343 3322 5307 4547 1589 8478 8899 2955 8028 7293 4619 4058 2781 8715 1272 5734 4474 4863 4367 49 8844 5605 8671 6743 4281 7077 1874 2626 2516 258 5249 6186 7958 5432 3801 6288 4732 9121 7558 2527 4661 6819 3835 7508 584 215 5036 4261 8978 5228 647 4657 2591 5931 5088 9204 929 4381 5421 2965 5050 6495 5033 4799 959 6115 3520 1232 5811 317 8976 7705 3842 2178 7187 1373 7112 2694 8627 8493 3991 7441 6308 2589 6462 3406 7673 8660 2902 752 1025 849 7682 6982 6652 3612 298 5148 4873 3414 1693 1458 327 2016 5002 6768 7016 5583 3270 857 8232 7158 7981 4676 4675 2164 8360 6709 8143 365 4062 4527 7928 9009 6228 5818 2533 9305 8887 55 2507 8870 6649 5158 76 5595 6693 5306 8666 3020 7527 3082 6304 1591 6145 6868 7205 9107 1165 6773 172 1993 4176 8400 4611 7589 8702 5386 6095 6335 1561 8805 5963 7393 3681 2037 4968 7451 3360 7466 8361 4455 4064 5422 1689 3977 7269 362 4178 4145 6127 5162 2399 9225 7068 1650 794 3007 1348 7736 444 6081 5298 2026 2543 9087 3593 7425 3730 8468 2641 7529 1720 6377 8732 5851 7956 3150 3785 6485 3611 2869 8510 4775 4463 1251 9124 6873 3391 6505 4118 1617 8837 7051 3213 3668 5347 8452 6289 5840 478 3522 453 3376 6190 3342 2237 2870 5178 5567 5952 6919 3005 134 3397 7443 8539 6822 5264 3288 5962 8421 6744 8608 4656 1802 2073 4271 1043 2922 8211 2196 5260 3789 7211 7571 7834 5680 2047 5502 3369 3437 3286 5517 3912 8386 1442 6961 2191 2417 9088 5155 6813 4520 7375 1224 811 1891 3748 4123 2789 5305 8419 7248 9237 992 4038 4499 2060 5538 850 2669 7612 104 9290 2526 1287 4160 4633 7125 742 744 4534 2407 7714 4555 8764 7661 4722 7721 3205 6657 1214 3754 6080 4593 3018 8792 2294 4450 7701 9301 127 7069 4513 6243 8025 4010 8632 4715 5284 4574 726 4252 4561 7354 299 6088 1090 5012 5684 3489 5639 4888 1584 1969 4846 2915 6804 2775 7306 6506 9306 5231 7740 4283 953 6725 458 8290 1504 1539 8885 138 3764 1256 257 335 1011 7060 5986 9323 4740 8994 4140 6807 8254 3963 9297 2102 2964 9207 4910 8709 4411 1672 457 5852 8037 4932 3679 8794 2362 8592 495 8432 1608 2155 7411 2881 9244 37 6535 8219 4505 8635 1928 8384 2570 8996 7610 2128 8728 6656 8935 6681 2070 176 9062 972 514 1796 4039 6838 2462 230 569 5521 4637 4939 4420 2863 672 4995 3807 447 1656 2005 5113 3297 8858 2118 6309 1926 481 1156 1509 1228 1787 5978 8678 3951 2929 4980 5039 4713 7002 151 5536 8148 3823 4709 2299 142 7067 2372 3761 9 2265 5747 2764 724 2913 3151 4525 6370 4247 9329 5494 3721 629 3621 7371 59 1999 6704 3734 2698 4691 6938 9117 8415 6353 6750 9077 2679 7623 2478 7321 6611 4007 2076 5772 6416 2264 8348 2672 6546 754 6934 7908 8546 4404 592 4748 6625 2129 7944 2377 6 8929 8275 3515 4524 3660 8710 419 6878 170 8313 7460 8753 2917 6891 6663 4918 7129 396 7256 3500 631 5585 8343 2695 6168 6292 3176 5092 5160 3701 9021 7221 7825 1216 1438 3471 2318 8923 6223 2182 7621 8514 9010 8987 1252 1972 1872 1715 8205 6463 8138 8989 5661 2890 565 2427 8946 1303 3718 6000 3620 1560 5276 8089 9260 1467 6173 7641 7520 5061 4677 5757 4400 2620 2719 8995 2079 6644 1683 8141 7754 5744 2952 7568 654 7457 5368 3310 1510 4440 1513 3072 8034 1456 9164 3163 3035 6111 5042 7161 1401 1084 8000 6672 8531 5404 6550 8379 9141 8681 7752 6394 7011 3739 8253 978 4771 6024 4828 7959 1649 1727 7073 8349 6952 661 7283 3159 2590 3496 8741 3969 2956 4565 920 1830 8558 1930 6677 6825 8256 7454 7521 4710 1768 3753 6459 5606 5292 1397 240 2733 946 6711 3242 2627 4929 5006 3202 132 2295 2746 1293 2124 5405 4065 818 7464 1820 4398 1312 6994 6920 261 987 6120 3109 331 2986 4338 7774 5122 8396 1364 8969 6712 8161 7083 7595 5940 1566 6419 8634 4432 6047 4749 6076 1161 8217 674 8494 3688 2447 4704 969 7477 1160 3243 3173 4979 9288 6860 1662 6171 225 5143 313 8327 3275 3385 7626 3103 4401 6794 5600 5043 7664 933 6830 4452 3980 1604 5875 6633 4635 5756 3329 1751 8108 4817 1989 1237 1893 2848 9334 51 8875 4981 5417 4134 877 6688 3545 4943 5615 2476 1684 3652 7396 1769 1171 6563 3415 3644 340 6630 8284 3256 7240 5371 3405 2108 6360 1734 5612 8638 2343 1103 7803 6809 3055 188 8031 3124 3683 4537 988 2297 4893 6499 3396 839 4467 5195 4041 6457 4441 6378 6472 6195 4912 6884 5922 7014 1660 38 1595 6752 4554 1292 2709 3800 6057 1980 8775 6587 6392 6263 7214 5219 282 309 6685 2253 6311 4092 18 7570 5543 4081 2515 6278 8690 5294 6184 5215 9130 6720 250 7250 4983 639 3567 7841 2636 4067 8446 5703 8609 2586 7695 1253 6701 7930 6317 5921 7719 8501 7312 4110 6219 4552 5059 4088 7975 9132 6054 692 3412 4079 6754 6950 5281 3028 8321 3877 7614 8939 4188 2223 239 4745 6875 7096 5571 4403 2640 5556 1845 6690 1825 4157 314 4682 8825 1003 6206 8093 7215 6465 99 8077 6631 4206 2523 366 1208 6043 4640 1457 5475 4985 1351 3090 5625 7307 8466 2003 8854 218 1500 4476 2293 1847 5032 2147 866 3710 2552 1749 6692 3926 4112 6458 735 9171 60 9304 6726 2630 2882 1178 1151 4922 4662 173 7233 1776 6533 4113 2423 2425 4343 5800 970 6372 1009 6607 3068 8435 6423 3126 4813 1709 1201 7104 5620 3932 5701 5724 3366 8050 4984 5023 9203 5079 627 290 779 5572 5233 1392 4975 8534 8210 2269 1143 2475 2562 905 4546 267 3536 8538 449 101 7367 2722 4605 7356 6781 8537 8697 6820 8340 8926 3821 2349 2259 6545 8100 8395 2258 2911 5108 3946 1406 8683 8296 5579 2177 8264 1425 3940 957 3647 515 5342 8363 2449 3108 1001 2937 3452 5574 4319 9184 8381 945 6876 600 5714 4871 8532 1852 8856 392 2018 8878 369 5711 9230 5304 7266 1681 7829 2309 4683 8938 2255 6159 3207 4651 2029 4341 5106 5794 9024 4712 2434 7151 7359 6431 1290 5918 8705 3438 5554 8876 7415 6290 5373 3805 2950 2331 6772 8997 6576 2307 8515 4033 3428 6487 6595 45 5792 333 762 2383 3388 666 2166 460 943 364 6980 8223 8221 637 6218 4108 5381 4649 5096 1614 8768 5095 3809 5030 984 3538 5120 2498 5222 5613 5486 5119 241 5707 9227 544 4109 7771 728 3671 9327 1230 9270 1070 8565 4769 7056 5654 7965 1793 5956 7883 1362 5479 8769 8821 8320 1901 1994 2461 5552 389 2839 6467 2762 4763 3499 1487 7599 4488 3241 8272 1131 4496 7006 7265 4897 2747 6618 5291 4563 5146 1939 6369 8548 6163 5526 4068 9030 5349 8433 748 1477 4265 9200 3878 462 6846 9040 4806 3519 6798 5464 5179 546 6044 8114 7216 6276 1495 494 8146 5434 856 8403 8071 3972 5544 3337 6855 1546 2824 1718 6009 2042 251 9076 3330 5004 192 4717 3797 1146 394 7814 7699 4659 4689 4156 7903 9054 7332 7811 1119 5531 6782 5210 8412 2633 7924 4624 8314 5666 3240 2310 4262 8160 4553 8196 2661 7213 7455 7399 870 6126 1227 1226 781 937 6343 2578 2892 4124 2792 5696 6865 6455 8312 5193 6026 5251 3787 4460 4687 7923 1140 9106 796 2482 9170 8695 2749 6734 4825 114 8319 827 4175 390 7611 7484 1249 7727 955 579 3629 8915 2958 885 7227 1424 4810 4604 1535 774 7518 5428 1955 8233 2645 2167 6484 3855 1502 4861 2333 2973 4829 1906 3966 476 9023 6960 3483 2748 5891 8174 7702 8948 5324 4396 1605 2823 7348 7347 5933 310 9082 916 4255 203 4239 5976 6200 6435 4425 787 1121 6034 13 39 3104 5961 5507 5785 1463 7339 1575 7801 5445 8283 5951 6995 999 5163 6023 3786 6536 5850 3524 3528 4508 6674 2939 8227 4598 7550 8495 8622 1152 4538 4003 1318 739 3296 8202 1552 6204 5236 3576 4699 9238 1879 488 2274 433 5587 1678 9282 7914 8552 6445 7971 8331 6880 7476 7282 1570 7271 3827 6489 8091 9287 7351 1765 5286 6921 542 1762 8553 4987 894 3622 7855 92 3131 4811 3590 6517 4510 733 4954 1360 5669 2842 8107 5646 5968 1618 1827 7709 8521 5807 5321 9239 5501 3745 4437 1586 7273 5265 6605 7917 1607 6074 4668 7061 1580 8694 8461 4573 618 9173 5243 435 8770 2421 7450 3870 8308 2605 2934 9240 6887 4512 1198 7585 7691 7738 2843 8423 7929 6971 7854 86 9128 4298 622 790 9155 6579 2203 7716 1265 8645 3834 1174 7380 623 8936 4306 8082 4312 8661 5753 7243 2768 8155 85 4143 3047 8479 7809 2833 5555 7578 1637 1936 8130 5549 8062 7143 5522 8966 5614 8105 8719 7655 7502 8268 5760 6695 5565 7615 9226 4870 4507 3160 4835 1598 2465 4422 5248 7867 1078 5015 6660 1676 5354 6391 5351 7184 6280 5936 6124 1327 2906 269 8292 2466 8809 5167 8142 8204 2713 1910 2930 2494 5592 7384 7726 5727 625 1735 5710 5518 2491 1410 4989 5183 8777 6562 4947 3692 6129 384 1097 2084 5209 3723 7272 6895 2459 543 8621 5394 6211 2074 1511 2524 7776 5055 7191 6207 7922 281 8436 2918 3141 4800 6323 7631 8903 2716 3735 3012 5301 3975 2800 7963 105 1920 7391 4909 1754 4816 5488 5145 5098 5139 5268 9317 8631 4346 7318 136 3993 1220 2151 308 7483 7582 3071 1339 3777 8191 5378 7087 1056 7465 5608 6564 512 2754 2687 1596 5376 1512 566 6382 7360 1757 8035 2296 4264 3551 1053 4716 1537 8518 254 6253 7132 8557 3490 9267 5473 2412 7539 7136 6670 3974 891 1323 5958 1217 2879 9118 1259 2317 7033 2467 6665 6244 2180 2140 7098 5126 6395 4150 547 4120 4307 1725 2737 8549 8195 1245 6286 935 1756 1701 1626 7379 3492 3717 5802 2817 1234 1005 4101 21 2576 4650 3381 1030 2844 1641 936 2729 6469 8913 8369 5994 341 81 4083 1685 5152 3380 8739 6615 3829 164 7927 4779 829 4216 8528 3641 4606 2769 6970 1545 8850 4971 5489 2008 4564 8682 7784 5768 9252 901 438 3577 2765 5904 664 3348 6298 3602 2502 8617 7684 4293 5166 5805 4126 2451 6906 7234 9243 3778 2940 1087 9053 5026 2504 5283 2820 4242 797 3925 1383 8750 7861 1403 6973 7617 968 3065 5395 4347 8144 2688 6527 8597 8673 7327 6331 1422 7115 244 7013 2092 54 7970 5742 3464 4823 8588 2938 3060 6406 4149 2375 6616 8803 1555 4369 1380 3011 6144 3367 4990 7370 7131 1995 2602 985 8785 8480 9125 1927 3269 3771 1032 7378 6900 5726 2731 2020 4503 3313 6727 8793 2304 523 6036 58 7993 5512 5049 2721 8482 673 7937 1168 4472 8247 7287 9017 6421 9190 3584 1819 1792 2810 6033 638 6749 7677 981 7160 4726 1886 7845 7911 6975 568 7422 4613 4501 2569 4263 3206 4133 2420 3706 8894 2263 5774 4925 9180 8888 2945 2091 1873 6303 729 6728 2156 3267 1860 6597 1374 4930 5253 938 580 5825 4839 166 8198 6892 8701 74 7094 7284 8954 3156 6140 4279 5594 2229 7535 5466 8413 7105 8192 2632 7638 9308 8530 832 4643 2201 3268 4322 6510 2967 262 403 7973 1258 8828 4036 5838 9263 8529 2788 4202 237 3838 1291 2305 4056 5628 7281 1430 6476 7935 2850 6041 2013 4016 4576 5312 6827 6321 8669 8439 830 1942 1519 2750 6106 6993 6235 5899 7313 5331 4371 7086 4399 8600 2660 5409 3465 5499 6231 5745 1801 5337 4468 1451 4192 1275 8230 2302 1114 4960 8860 3900 6468 5058 1505 8868 5588 3858 1947 2565 1472 8499 243 8442 6583 7085 5374 2250 4291 4426 492 2311 8305 3662 5338 8780 7488 3890 5005 2442 4680 7358 9116 4397 5999 587 7902 83 3566 2134 8942 4767 6601 2456 1745 5736 5254 8017 4015 7690 3798 8947 1067 2116 7945 590 2547 2535 64 2053 5359 2493 6669 4351 6412 7473 6147 7175 6983 5196 745 2657 3497 697 3161 7528 2239 5991 3201 7681 2440 5189 2959 2044 8917 2046 6313 6333 5318 2763 4301 2555 2213 2933 4121 1340 3903 4392 7889 5323 1055 707 3857 518 6078 5134 6645 9138 1592 680 4446 7943 3461 3887 5601 2321 6621 558 4914 913 5637 6453 8511 4531 1218 5508 2603 6802 8426 8297 2947 5971 6552 5262 5935 782 7435 8357 6139 1136 1473 5008 3585 3627 2914 5356 2997 2347 881 5652 4849 8808 8351 4017 2010 6836 7616 4391 3630 3712 6099 2969 5238 4333 2301 4406 1236 1050 1864 1104 8408 8251 8795 5879 3365 7481 8206 2452 1767 8859 124 3948 4444 8962 4438 5003 1740 8428 3105 5117 1095 1480 8755 7881 3097 4877 155 1917 2455 6042 337 6724 6045 8483 7135 2242 4566 1679 834 1746 795 3548 2314 2036 4046 9129 6979 7084 5091 2413 8170 5775 1817 529 7220 813 2916 5130 8972 126 1243 2370 4831 9122 3010 5104 2613 6761 7482 909 2146 4595 5340 3512 6283 2346 653 6121 2615 7421 1869 1002 8834 2991 8992 632 1093 4543 645 2352 4115 373 1483 6966 8598 3896 3434 5987 8318 1815 1223 1548 6885 5073 6330 2573 1369 4095 1431 2185 5766 1301 7258 8048 7598 2847 1996 2378 8561 743 6381 271 1956 7439 7596 7134 6636 5804 1858 6214 4730 8536 1203 3118 9202 1875 5885 8975 168 5898 4014 4186 3346 3041 5558 9296 8157 4339 3234 1738 2604 6803 5387 5590 125 2173 8012 8005 4858 3069 651 372 378 8366 6299 1449 7793 8541 3235 8043 3086 3983 6949 4690 2176 6494 7637 8406 3856 7408 350 7021 8224 7044 7662 6697 7679 169 528 7029 2790 7138 7432 7602 8333 1582 1378 519 482 9279 8015 6592 4514 3542 2612 628 5053 6699 6227 2094 1621 847 3598 2728 8490 7276 6620 8345 9216 4278 4059 9058 5063 5816 4173 8134 1997 3182 3224 8129 5109 4494 189 7640 8243 180 2963 1123 5593 3263 4185 7140 8990 6320 9275 4601 4854 5907 1135 8083 5964 7788 1992 8069 9174 6160 35 8572 2865 46 3952 6418 2510 5783 20 3816 2715 3930 2548 5204 4122 4103 708 7756 3825 777 3550 8502 3929 5440 6751 7764 4070 7331 3743 9131 9206 3828 23 41 4197 234 5723 7622 8832 4626 2169 5599 2976 5266 1967 1150 5334 90 822 2538 3169 6771 7442 498 4967 5580 7581 7680 4728 1115 4040 1064 3106 6266 4415 9294 5597 7059 197 7218 6948 5690 4234 1653 4485 4019 3370 919 1330 6085 2078 3768 5427 4545 2435 8862 3633 8145 5221 1388 5913 8140 7471 7156 6989 1190 6832 2830 4387 3454 7469 2910 4526 5187 2410 9223 6247 6912 4681 1300 7407 8612 6523 3616 6894 7253 4515 5874 5448 7137 7957 1130 3092 7054 3516 5797 1000 2727 4336 9090 6403 7255 8919 6522 6760 8898 4803 1938 374 8686 9150 3985 7045 3475 6065 7991 1409 7851 6671 6090 5826 7857 1155 8964 1117 7072 6064 2497 4899 2397 3189 2369 15 5027 5754 8950 5617 8391 914 6264 279 6174 5184 3733 7392 5278 2924 567 7994 352 8084 2148 2723 3359 70 1870 7708 220 3994 9013 3191 9220 4155 5717 1110 2198 9179 785 5325 4770 4250 52 4634 5072 9037 601 8036 7996 2483 7232 8675 8836 1279 5346 7676 6104 1515 4603 5607 7894 5144 2628 68 440 3586 3083 4830 4378 7762 1134 4542 7850 6296 2866 4011 8751 4776 7954 7102 5697 2032 5729 5017 6962 2051 1092 764 9019 2759 8581 1484 8618 912 2382 4892 8447 8176 5491 5695 5504 1060 7064 709 578 4320 2379 7649 8416 1613 5344 7512 7865 3037 6689 6557 1569 5955 3707 9168 8566 1775 5950 6943 7804 434 6179 9300 1142 7947 6456 6291 5789 6538 9134 3049 5075 5399 5161 1623 948 6302 6063 7516 117 506 3302 7146 355 3854 1081 2827 1496 2574 6167 3183 4287 5482 1722 7319 7277 3860 3443 3298 8364 3826 7254 2360 5093 7039 6325 4230 2567 6241 4443 559 2625 4228 8967 6405 1674 3936 4475 8556 8585 896 3713 6259 4297 6718 2392 2279 4927 1283 2374 2860 7665 663 596 6293 6805 2811 7383 8306 8330 3153 2153 2618 2441 3615 8092 552 5285 5255 8124 9247 5530 8175 6242 5660 3433 1610 1832 3892 3862 640 2127 2474 4196 3495 7217 5206 4836 7759 4376 800 4227 3699 9055 5665 6826 7463 9065 4720 5069 3245 3453 3358 6532 5970 7921 4087 1547 3424 8040 7995 6787 9069 8716 2561 8199 1479 2767 7818 7145 604 7597 4896 9281 4666 185 8171 7978 3059 9196 9221 2135 1800 2974 1529 5948 446 4436 8672 3508 6208 5673 6998 5203 278 7041 9110 5853 8121 1764 3046 2400 6575 4738 2228 7761 9322 7019 6931 6383 6762 283 3935 2534 7717 6785 471 8214 231 4241 5310 3844 5746 2011 7209 336 6433 756 9167 6741 3345 7685 4018 6682 9147 4790 5836 5906 8747 676 3964 6362 3510 7510 2308 1806 5917 1189 4012 3387 1331 5319 5423 8900 147 3780 1696 9111 6783 6497 4104 1898 3987 260 4616 2121 9283 1400 2437 4670 2735 1163 2096 6521 1423 4523 2243 6667 6990 3944 6915 6763 5611 404 2691 1015 7092 7562 8624 2291 4193 5934 5503 2326 4408 2960 842 1963 3354 5568 9050 3806 439 9154 6055 6451 2190 7633 688 4354 8890 2813 2872 8102 8317 6609 1497 8389 6449 1682 3594 5103 5812 863 268 3054 8079 2260 2027 3091 7687 6703 3557 2019 8427 2799 8182 6641 3168 2284 1934 4865 1077 6507 1658 3811 1774 7897 2238 2943 191 3869 3246 4057 3188 414 8072 7838 1382 4962 6010 5363 4042 1983 4077 7429 1833 3583 4044 1109 1295 386 5481 3927 311 1349 5651 5878 562 2202 8904 765 1501 8654 2975 2689 3680 5180 1900 7707 4723 8912 4029 3579 869 2888 8657 6599 741 4288 2244 7357 5704 8791 208 8587 7969 4805 8526 4887 8871 7468 3343 886 7794 5764 2646 6454 6101 7885 7744 1297 4119 4856 122 2286 2925 5131 3570 5843 3027 5320 5626 540 1862 5401 7335 699 7760 9198 3259 7345 8698 1280 6479 3100 3988 1322 5737 1268 3257 6791 3326 4815 7644 1082 2826 6821 8984 2553 5290 5909 4762 9242 8096 8066 4325 6666 7193 7114 8060 2376 7872 6788 3544 5460 3507 2509 6626 3429 5542 4220 2968 5271 4249 3863 1868 5581 2012 6270 8038 4050 121 2845 1565 1998 2275 5524 6068 7624 4913 9277 1506 803 8848 5925 2450 2072 8190 4753 9162 1923 825 7303 9028 2088 8516 1556 5937 7847 2367 7549 1049 1521 4739 3931 8958 4130 7877 7876 897 5985 7346 7537 111 3700 1126 7896 1288 3419 4673 1051 5720 1068 3458 146 291 6256 5514 2857 4580 6239 6525 8717 391 4841 6676 4360 1453 4211 73 1675 1987 4025 1321 662 8265 6424 2758 7765 7656 3209 7497 7600 9039 7697 5177 2983 5622 9295 1200 3284 964 2024 1269 4551 8088 5659 2212 5199 5551 8607 5573 2247 5200 6341 7951 8429 7720 5919 1273 3529 6707 9176 7552 3255 5649 6110 9235 1137 9272 775 788 5786 5186 6746 2667 9145 7630 3953 1828 8827 6471 4702 7815 467 6387 3195 6238 6508 2373 5983 4931 2948 921 2438 517 3949 2137 3216 5683 3695 1719 4837 9159 6981 860 7410 5497 1770 5557 8810 5194 4857 9100 6329 2609 1925 3686 9041 4924 349 9187 3393 3661 7120 6858 4587 3831 3130 5396 5060 6486 3937 8023 824 5398 1354 8861 5534 7292 4389 6029 6226 3505 4326 7445 581 6089 3450 7324 6516 6775 1207 4575 5135 9265 3918 9020 3473 3898 7812 6571 6757 6639 2557 1206 6148 7325 8790 4938 7026 4383 8041 1250 7267 1952 7561 8811 4941 8373 4848 6602 8355 8104 5214 6654 4330 995 3181 3422 456 1782 3408 6530 719 7587 5910 3058 740 2009 4207 5336 2798 9229 8668 2473 4221 1493 3281 171 9157 9139 7766 6220 9127 3324 5308 3708 2431 8080 2093 2585 406 7040 5064 5247 4758 6512 2953 4257 4935 2705 2572 3436 8513 5884 1385 4852 2637 7091 2761 6007 8332 6694 2422 4917 2186 6898 1390 6965 3132 7698 475 2002 2692 5024 7365 7373 4091 1731 947 3962 8692 1788 8734 8656 6862 6856 1950 1914 5658 3635 1620 4780 2580 1454 2786 687 7238 3648 6452 1197 3190 5900 9043 4958 1935 1821 1187 1153 7737 7223 3820 7169 7350 5674 6254 3025 6680 1690 2899 3893 1577 5728 9189 5077 34 3560 2179 5462 1402 3654 1376 7936 4246 5506 1179 5647 4686 8644 1352 2855 6079 2254 2668 2287 2457 3418 7264 677 3074 2655 1042 2210 4504 7089 8309 4209 4280 3258 2977 84 4705 1244 3511 6355 8813 3228 9266 1122 613 732 5202 8425 2638 6470 2886 3541 8132 2063 8201 5129 2818 7949 6936 8090 4465 7295 5239 7009 9271 8563 2832 952 8136 6776 3565 5188 7288 6999 285 5487 7763 7608 8584 2071 7868 2804 3655 7048 6847 3276 4082 4272 3910 3709 1574 4559 7580 7081 5014 7769 8183 6386 7574 356 4937 2487 9315 7572 3040 671 2682 8626 3868 8623 387 8679 4074 1481 3527 3595 4754 2453 1579 4638 9123 1829 316 3009 3691 763 4875 3572 4642 3128 4273 2777 6032 4793 233 7147 996 3199 8835 3517 7210 6125 6037 3684 8589 3915 3095 8310 3180 7043 4458 2889 57 4483 7667 8375 1434 7493 6986 4733 8471 5827 2111 1313 7986 3075 2614 7547 4977 8527 3212 7300 5842 5244 3291 597 1007 2030 227 3830 5540 247 5643 9333 1958 3096 1371 5220 7926 2927 1516 7130 193 1522 6165 6923 3794 4223 5535 2472 8630 3971 9101 2946 222 4609 7291 8542 6501 7548 4557 6274 1010 5226 7309 1317 9056 6275 1624 1099 4191 4030 7270 5392 2316 3819 1670 8154 8045 4807 8864 2391 5908 8338 8218 6400 9193 3165 843 6613 6941 4380 9332 5629 7557 4321 3702 681 734 1159 4665 5959 1697 5509 8774 7389 3832 3751 8637 3079 1680 6841 703 684 8293 3682 5733 4818 3231 3078 5562 9001 3889 7024 2519 1713 3287 219 6021 8776 2289 7212 4832 4684 4617 4237 2649 8185 6326 3568 551 1426 4181 8869 312 2905 4165 8248 2558 900 1044 8613 7743 5437 7604 3122 5708 8649 2878 4695 4491 1929 7533 5223 7711 915 1844 5751 3008 8055 961 6142 4636 61 198 2271 5698 4596 4500 5709 5819 7972 2992 1643 1048 6281 8886 360 4198 1841 6814 3960 2606 7001 5888 450 7133 7015 7034 5153 8920 5066 469 1302 8816 463 8651 5869 8193 6582 5578 1231 9274 7260 7751 8052 6799 2089 2342 8451 3260 5550 7795 2288 1205 40 496 8367 7836 5973 3908 5242 5062 2706 997 6514 5419 9201 1965 6062 3050 5302 8735 358 2398 7470 1644 8179 7047 1549 5414 2539 7381 589 8166 8505 6035 3956 4540 6721 8074 1062 2384 2531 7159 3502 3902 4584 2554 264 8720 2849 4916 5218 7202 883 4560 1677 4317 7863 4509 6577 2903 1452 1416 5369 473 6233 6359 5992 4934 8059 6834 4907 3320 8267 8280 2066 2402 1485 3772 3732 4764 9126 3575 5564 4768 5641 1884 2330 1804 344 698 3089 1532 4454 761 7289 8094 3432 1747 6811 8722 8826 4646 3222 8614 2901 7003 652 8663 4266 413 810 75 3334 4905 6438 4756 5137 6528 6534 6988 6177 8533 889 5384 7201 5132 7802 6864 3973 873 4840 1482 8376 3769 5858 6675 4286 2593 5863 4353 7817 7540 4999 4838 2303 6002 7913 1508 5317 7755 2784 4964 3431 6209 3755 6022 6399 6232 3954 455 5416 6448 1558 7591 245 140 9210 6585 4084 967 7798 6795 7095 6733 3861 9264 361 1045 755 8042 7074 7778 6415 4724 6450 2049 1563 1307 3485 1790 7869 3282 6907 3920 2868 5801 5632 1079 5009 3955 7517 5128 3417 3019 2725 1784 2312 2753 6976 342 8266 1849 2273 5037 7880 3793 7401 5412 8279 1257 3670 9049 3266 8955 6519 8916 2858 694 5650 1019 4669 1785 3533 5877 2704 8603 3726 6668 497 1085 6815 6157 6646 6964 186 8097 5645 8481 8215 3775 2542 7514 5699 4072 3518 5767 3239 3740 1404 8981 4086 6397 6984 4204 6899 682 6589 3317 2944 3456 4340 7424 9208 6504 4409 1 145 1882 4620 2634 4992 5453 4481 3377 266 7875 530 1235 7605 504 1771 8489 345 7353 7797 7174 5914 2871 5721 6067 3582 7653 5467 6234 691 8758 2122 1213 2908 1492 1437 2187 1266 2395 7278 8491 5256 1554 8163 5966 7128 7904 1691 6272 1264 3996 1706 1334 1316 6478 6935 1518 6700 8703 8744 8152 8778 5367 4218 9007 6312 606 7565 5293 2891 675 2125 2120 826 7008 5705 7748 8010 1498 5330 5472 2215 7627 3016 6588 1850 4128 8569 6987 7566 148 8151 8789 7907 8596 715 6018 9060 3872 1750 5889 4047 5960 3120 3449 1421 1102 3333 9197 8796 8123 8007 2028 8404 1945 1985 8109 5380 8438 3504 6739 4180 5835 4243 25 4002 1976 3482 8392 158 5181 4885 8985 11 6872 6425 5926 7062 5083 8394 4259 5844 1990 3942 5532 2220 28 5957 149 6748 1663 3559 7647 2566 1359 8787 5259 7010 554 8231 4229 6005 8172 8125 1350 3571 9051 1973 1386 1781 5788 159 7007 3220 1846 3093 4445 2056 8370 3211 1113 4384 2231 273 4276 642 7663 5311 265 226 9012 7879 118 7109 7251 1760 8667 2876 7162 3552 6901 6779 5021 6524 4957 3114 4544 441 1848 2136 2458 8662 1127 5541 3026 1080 6780 2224 8259 1073 9000 7244 7977 500 4435 7376 7979 1435 9291 7704 3791 3521 210 7388 1039 6269 4052 8570 3285 564 8039 3546 6203 1183 6107 4147 6216 2234 7185 3192 7155 2001 7777 876 944 908 7791 5465 6784 65 9172 5675 7075 3886 7891 2978 1008 5630 591 5067 1139 577 9015 574 8137 7786 5765 4900 4090 7842 5741 ================================================ FILE: lib/train/data_specs/got10k_train_split.txt ================================================ 3784 8998 3906 1631 8277 8358 2338 7938 2988 8302 2662 2663 2825 7447 4781 2218 6348 5860 4517 2819 8075 5391 116 3606 7976 7941 1024 4519 1970 557 8579 6908 993 7204 1991 3674 8781 6840 5 3225 3763 8688 6778 5777 4794 2744 8126 3864 1733 2923 6829 701 683 2081 1831 2404 1459 2741 5972 3618 7462 2654 103 2174 6224 2989 2506 2766 5912 2699 3295 3986 609 4895 6673 801 1098 1602 2490 3129 8476 3186 7355 4784 4270 1812 4226 2267 8873 6544 6112 2381 4752 753 3776 6511 6016 731 2559 7369 5866 563 7731 1105 5603 50 4238 2208 8725 4994 4719 1444 8807 7298 8139 8760 8173 2332 4131 5207 1065 8562 3992 4024 2188 9095 6765 1707 6105 6922 5362 1486 7898 4135 6574 1551 998 6565 8127 8927 2544 4365 510 768 3535 3875 6808 2931 487 1088 4451 368 2470 8111 3493 7338 8281 6390 1271 4373 3667 3494 3757 2966 3756 7840 6315 7827 3300 6261 4163 2217 6549 94 7236 9136 1857 6691 3470 6271 807 516 9311 6098 3144 8420 5425 5694 2643 6696 6072 7285 3781 903 8522 6092 5979 2622 2529 855 3420 3261 8953 7866 2492 3157 359 1520 2642 7452 759 36 8931 1744 4350 1089 9199 4295 1889 1908 4868 4498 1968 9103 3273 8723 7413 4114 5584 4874 1427 5211 7618 1542 1353 8158 4168 3200 6345 8560 5619 5953 3158 8849 5831 1411 7294 8103 6539 7397 1006 5450 3119 4274 5352 4571 2319 4217 4976 902 1814 2651 3299 3398 982 2428 5793 1346 7057 3737 7329 4449 2110 7405 1773 958 3901 4127 8234 2994 7066 1289 2995 5871 3556 9085 846 2366 585 7032 5516 5230 3481 2732 6658 7423 1855 6384 3554 5823 4948 7058 4667 5377 2503 7694 9191 9144 655 3409 62 8019 8970 5523 7403 3379 2323 4833 5750 3178 6548 8891 7501 3280 7404 343 2171 8397 1367 8611 6118 6603 3729 7182 9048 7733 5642 7141 3335 4845 5449 3467 6250 163 5168 2040 5339 3609 8352 3426 8567 769 187 6151 6437 7028 8507 3970 9146 2068 5028 7492 1661 2815 2469 2563 3814 8430 4305 3479 5678 9115 4132 1211 5459 4814 545 4556 238 4296 2724 1260 2581 6087 4632 4313 380 1209 5447 3032 7942 8943 806 2432 6130 4314 2131 9045 6531 5706 6747 7724 2017 3292 5469 2743 424 4233 7643 8619 5192 4516 9324 3537 9152 8058 7526 8711 1949 5982 1732 6702 7027 6388 7012 328 2130 452 306 7669 3134 5761 3703 44 4189 695 7672 5224 9215 5644 3143 3704 5443 2348 7177 2328 4725 354 1418 7810 7746 9002 5759 7226 4535 9160 4385 5397 7249 2936 3204 6287 385 2371 2738 3636 9033 2246 2680 6940 4310 2054 9250 9080 4568 5586 4469 2038 3410 7900 4332 6108 678 3319 9079 1054 4048 4751 1320 6890 7931 1398 4349 5299 5025 7932 5738 7787 4590 4020 1274 2488 8497 3372 8965 3219 799 3664 6500 7093 4362 6205 4244 4652 1964 5945 6434 2031 2684 6632 4588 8271 3232 5782 2904 6789 5636 7200 3632 5435 8203 3480 4786 7579 3351 1921 798 3646 3094 4359 1654 5975 376 5965 780 7821 9224 6738 3185 2133 6248 5996 2834 531 5688 2448 7925 7974 5924 6401 5778 6594 5442 8336 4522 3770 6340 6328 4946 4161 2954 2588 8465 2885 1606 5787 3407 3121 7310 1413 1932 4787 2579 3325 508 5610 6480 4290 479 3792 6628 2545 6717 6972 2665 6730 3547 6845 5929 3540 4356 8993 1052 2235 8356 3403 8818 8260 572 4159 1180 5348 941 7948 2676 3539 4866 6422 8365 3217 1310 2059 9177 1419 2283 8892 8162 1212 6277 3725 7806 6149 7874 718 6888 7118 277 656 8763 8289 4759 5854 8659 7710 3145 5981 1881 5799 6947 1609 6396 2631 2887 318 2550 6132 1736 2907 7816 48 4304 8133 6698 2760 7779 7732 7642 1154 7242 711 9262 539 8033 7440 1913 5480 5570 8594 8772 4654 8974 6128 6183 1071 8449 2142 2298 524 1695 820 4053 8241 1856 8641 3981 217 1063 9286 3152 221 5461 1270 2006 7164 1199 6951 5604 5400 5309 3498 6407 6661 7097 8165 5169 3852 7070 5702 4344 6648 6904 3272 7119 5795 2365 2659 353 5444 6968 2755 1924 2098 2972 6006 5865 8740 2418 3401 7856 5841 598 836 1147 931 8897 0 6049 1837 865 1871 6116 6831 5773 3587 303 1883 2163 3070 1308 7953 6300 6909 853 7301 3279 123 7186 3194 5553 5133 1931 4622 6075 4891 5722 5693 8 2339 6596 71 379 4506 4370 1238 2707 3344 4254 8767 1726 325 4148 5438 5357 548 1332 6824 2290 2335 3146 2594 2315 3389 3885 2621 4116 5389 7412 7222 4894 8595 2000 4978 4721 6444 3796 9321 2236 6409 1523 1468 9249 8270 2341 2874 174 4757 4502 4703 9034 9108 5451 2619 5022 9158 490 6540 1466 2962 8771 3036 2712 4539 1581 5638 9246 4308 4363 4647 4470 1636 2511 1311 6560 7519 8027 9217 6464 6364 3779 4822 3563 3982 5896 5510 6655 1524 2846 3137 621 141 1887 6567 8921 4671 6052 8445 8699 7349 3553 2117 7651 5034 5383 649 3818 9022 8414 1012 8159 5081 8571 4765 9135 4361 4073 9142 727 2835 8229 3989 4490 4923 5477 1638 3643 712 9044 2230 499 7166 96 3172 8431 8401 1470 6356 8817 927 4212 2152 1795 3812 4949 1219 1538 3029 6481 9042 7775 7742 423 2085 7715 4541 9061 5916 3950 7420 4878 7406 7046 7808 4911 8804 6927 8820 3264 300 8670 2979 252 4407 3383 4688 8504 6723 26 3837 2489 4137 8209 229 6490 2364 9016 1763 1728 338 8335 9063 5280 2791 641 5454 4581 5420 4548 2840 8508 3463 7231 7619 2560 1755 6201 165 1471 6279 5806 6867 5890 2396 3416 1981 6073 5872 3045 4182 7607 3318 4414 2998 6553 7139 5624 2123 3666 723 5110 6932 8200 2222 8399 1041 4138 1594 3569 9253 393 7940 8004 1475 6759 5393 1107 2597 878 9309 7576 5250 1759 3142 2015 571 3921 1255 7080 893 2160 1355 82 1562 9153 8583 4085 4644 7196 9165 3558 4550 6374 7826 8602 4146 9257 6083 874 8383 3731 3374 3653 8222 7344 470 1813 4478 6871 7245 6866 3998 7433 276 1915 1988 8168 2518 2686 831 6143 5205 8718 1703 7729 2077 7983 8450 1195 9232 507 7989 6974 4054 5828 8655 6679 5245 7783 5886 9098 6491 8782 3525 6542 131 8110 9186 9074 4933 9035 2607 4 2057 6273 2711 5829 3382 2696 3043 2048 619 2499 5295 1162 7807 3694 2194 3149 1940 7934 840 3592 8237 4731 1324 8486 8726 8573 2928 9078 2272 2564 1370 5911 7434 8026 407 7546 2004 5849 3034 7887 3425 1118 926 3430 1544 5902 2282 1124 2334 129 1372 4842 6473 4382 1028 415 8269 8073 6910 2796 3038 5735 5080 2852 6306 8842 9188 3637 1066 532 928 5485 2838 6753 9008 7984 2816 8819 7103 5977 5044 2064 2599 4973 382 3249 6446 6638 852 1724 3368 892 3250 8258 7962 4300 1616 167 8855 2090 4424 879 5136 5350 2635 7828 8506 63 3004 3847 3676 1184 1705 6745 1263 5020 746 1888 7036 1033 3914 5433 3905 4641 8909 228 4801 3766 8085 643 6914 9280 3013 5657 3696 1590 2920 8282 2403 416 911 3849 4215 1120 5490 296 2306 3140 3742 4819 6153 6414 760 3000 7498 7108 6429 3031 5314 751 3357 5808 7505 98 7652 4027 6257 3943 1799 8577 5577 4969 9163 2025 6061 4026 5732 588 7017 1415 4961 4940 7152 538 706 2802 8983 3375 1246 6593 5837 1789 7939 4997 5939 2411 6133 199 7593 1702 5406 6082 2359 2912 6109 100 8149 5470 2807 3384 6413 3362 5621 6019 9241 9268 7703 4111 7967 5458 7181 5492 1112 6729 4577 106 8853 3774 979 7082 4610 1853 9003 9292 2867 6262 2245 3460 1557 767 4796 8147 2658 5769 6985 7065 421 7990 3289 1540 9316 2251 6896 5947 4965 2652 4480 963 9047 7168 7824 3976 6210 7018 7179 5016 7789 6102 6828 7659 9109 9071 8115 7628 7110 16 7513 835 939 4078 2351 2322 3881 4945 560 6837 6094 6475 7901 3 771 8029 3135 8044 7127 3741 5156 7030 4906 113 3747 7042 5232 5225 3002 4747 6879 5379 4886 7192 4184 1896 1834 8689 3665 2957 6913 8009 4851 6420 7987 828 3003 8884 8815 3198 8008 194 6251 3303 3934 395 1285 4169 1648 1347 3600 4631 509 211 6230 7241 8250 2219 2582 8353 7790 7583 4462 3904 9004 6942 1704 5686 8051 2981 5511 6182 7088 1699 1222 3455 6189 1528 5197 6221 7893 3283 2837 7773 8766 2942 8021 614 4102 7362 1786 400 133 556 3127 5237 3727 1440 3873 6322 8448 6285 8696 8800 4009 3386 454 4847 5685 9093 246 1314 5895 6863 4302 4260 8405 8417 7116 255 3223 4737 7852 6337 814 710 1094 6103 5809 5882 6336 4974 1499 2806 3744 2664 2436 4482 8665 8918 1076 8676 5725 9248 4755 1447 9328 5500 78 2653 792 6854 6093 6172 3378 4492 5529 5476 3846 1391 383 4289 3883 2648 3265 2525 5402 4599 6870 6877 4413 2464 8519 2521 1839 5822 5664 7257 5375 6852 6764 5182 8914 3015 8509 3080 4562 8979 6215 6643 8601 6096 4812 5246 7862 527 7849 6737 12 2468 7961 275 27 5932 3840 7341 4996 8564 2154 3788 6138 7831 4442 757 4464 1170 2568 19 323 6584 7675 3441 2067 9027 2486 4379 4744 1737 7563 301 3907 4742 6857 1221 9284 8458 8236 2897 4004 1526 5345 4423 6246 8578 1057 3711 4986 4785 3997 7311 4788 107 8387 2041 2608 8628 5830 6031 783 6817 3293 541 773 8473 2501 7247 5667 804 483 1639 696 6060 5429 5762 1527 7342 1329 6225 7895 381 8030 8520 8362 4734 3526 9273 2039 4142 5084 875 6905 8968 5275 3052 650 7509 232 2595 3631 1810 4355 8315 8908 1777 4834 3164 2336 1543 6212 8346 3024 3719 1242 6265 8101 3133 6150 6358 3316 4089 1647 4629 7117 2596 5366 1225 6371 624 2209 1428 1158 7648 466 8765 802 153 4639 3657 6482 9320 2693 6591 3294 2617 5052 6305 3227 8784 7170 93 5868 6716 1671 178 2703 954 3254 2262 5046 5743 8647 6393 7706 6604 3728 6978 7489 7474 8754 2740 2233 6038 1491 8814 2080 2358 5944 5653 1164 9259 4518 7343 5748 3897 923 5967 2677 3503 1202 4966 1836 1863 6634 1962 9096 9064 977 4049 1464 658 536 3402 8064 1309 259 7999 8122 910 224 6152 7142 6070 7523 8411 2408 6766 9214 9312 8325 6192 626 6025 6240 8708 4630 6777 1075 8906 408 9269 6236 9067 2514 8568 2324 156 3136 3530 7878 7308 4335 2065 3845 4453 3356 1450 371 7219 5171 201 8642 2099 477 1603 8339 7430 3061 235 8291 1133 8474 7035 8653 989 4569 9092 8347 3102 1743 9086 5140 7438 1530 4342 2460 7646 5047 5071 5430 6944 610 2803 1448 4696 6156 4386 4248 4256 994 2112 805 8011 8276 8999 4956 1712 2795 7553 6436 2158 9083 3184 5784 4428 612 5288 6222 1365 5074 6848 575 5213 2175 4240 351 2086 2656 5150 9255 8189 7735 1261 1344 4097 8674 2984 4235 5998 6488 537 1267 7486 7124 6245 7955 7337 5436 1194 8226 209 1710 7906 4357 4139 5679 2584 2854 1004 8246 8586 5087 1878 4926 6637 3197 7757 8249 4055 6502 1248 990 3928 2770 2751 1020 6426 4190 6839 2671 884 3871 9212 4179 3394 10 5861 5316 6869 2985 8905 8559 4457 2480 2313 4100 4395 6835 7799 7890 2785 5468 7302 5862 1803 6376 3171 8591 717 7053 1655 4489 2522 2921 8555 1984 895 8949 1305 738 7606 112 3042 1325 437 3167 3340 511 3689 5813 8982 69 4421 7150 550 8829 8685 3147 8956 3166 7023 8633 3308 2014 3573 3880 4045 2069 6051 4950 702 6664 8418 2454 6181 4853 4166 7022 7418 3605 9181 7172 5031 4589 7858 6586 6351 8334 7504 634 3759 1890 890 6959 5085 4919 2161 1191 256 3610 7079 3427 4071 7323 2982 7263 7444 4251 5846 4864 3649 4311 7461 8120 4582 6373 2805 4872 4869 5493 5867 2670 7099 30 8933 930 7919 501 7261 5289 7449 7772 3613 7848 3196 474 205 841 2611 6185 3088 409 7239 5938 7871 1343 6705 1027 5596 2199 9113 5471 6134 838 2345 8359 4061 1474 3229 270 4245 1979 5995 1517 8652 4006 4880 6137 4693 2528 6996 2926 5798 2477 2549 1128 3341 6014 4479 2861 4208 5175 5174 5118 3736 5463 1588 2327 8380 7982 1514 1058 4586 6608 7985 3044 1822 3628 6851 549 1811 2184 2601 4608 8922 2540 6659 3859 307 3650 3767 8167 505 4366 4824 5520 461 1933 2401 8106 2055 7844 8544 8838 4797 7419 6686 7670 6039 5672 5141 6543 206 5252 4718 888 1601 3218 5114 713 4022 4419 6708 397 425 6612 5057 1729 6573 4729 4080 1034 2961 534 8194 5598 9218 2424 329 4154 1597 922 109 8823 3578 9038 8437 3307 128 8032 1412 7333 8762 8851 8865 3056 468 3808 3064 8798 7052 7767 9231 1086 2162 6566 2109 3439 6122 3642 7696 8610 5279 1808 8687 8377 817 8714 6066 4008 3640 6015 1021 7601 4855 6017 87 7071 2730 7268 3614 6084 6117 6924 9102 2829 375 8724 2095 22 1541 2970 633 139 451 4521 179 1396 3876 5824 8020 426 4982 4172 1157 190 4859 1455 3110 3323 9104 858 6719 6428 4495 8551 2141 3984 3066 67 4299 5821 8444 6581 6097 7090 7781 8944 3085 8606 2114 5355 8901 1461 3301 422 7000 4820 5790 1379 7536 4199 8736 8991 5241 1698 1294 1753 196 2987 8680 4658 4144 8639 6441 8255 8156 3677 6385 6520 7700 3760 6001 1144 5478 7394 8057 5018 4232 5235 6844 3111 8802 867 949 7843 573 2278 6801 7629 2714 5105 6946 2697 5315 1571 8677 2537 4374 3833 7820 3750 2033 6526 3884 8706 7195 417 3603 3001 6284 5873 5718 8576 8457 3589 5839 459 3626 6342 8729 6933 607 6053 8228 3773 1805 6365 5142 6069 1389 9026 570 4614 5712 5533 9222 2821 1897 819 766 4060 4902 5905 6842 5446 1277 4303 2836 934 1014 7822 7494 3466 665 1047 5881 3328 4664 315 1315 1462 8616 7725 2756 5749 1730 8184 4567 5065 7499 8867 1304 3669 9192 410 8177 6710 1210 2329 8443 3911 1899 7686 3315 7190 6180 3116 5341 4394 8337 9182 6969 5715 2172 1742 2782 3715 9195 7960 2517 4890 8294 2337 8014 3353 7475 2193 4843 8831 4200 4653 6196 6957 3063 2996 8959 8973 6529 3457 5274 8002 6823 6154 5561 1780 9318 7657 1758 6503 7678 3274 1625 4327 3236 8575 3155 4707 4331 1494 8756 3174 1074 8116 8295 8311 3048 3752 6050 6483 8003 9175 4674 1642 2556 6166 7165 8441 5413 3990 1640 1778 7500 8304 1395 4315 5949 3364 242 5763 1036 249 2430 7426 8131 411 6267 2045 6606 899 8065 9052 7507 5779 5616 2107 5408 2980 6310 5776 4328 821 3251 2354 7076 1700 5313 6736 79 8212 3959 5677 7545 160 6790 6859 3659 6770 1106 8846 956 7472 2050 8099 4795 8053 9293 7037 1646 9307 1069 5322 5332 2708 8977 917 2419 184 2105 1578 3923 5780 1903 2512 429 5582 493 4972 445 8286 555 320 8300 322 617 3413 4459 525 5631 6314 5157 5300 8545 182 1031 4429 2495 7586 1534 3099 3916 3738 1919 535 2119 1299 177 1838 2159 4099 8285 5172 8540 6020 7683 3073 3115 1673 3087 3488 2416 1894 5942 3597 5834 2007 43 1779 4174 2023 2546 2429 9006 436 4214 4536 3693 5426 6767 5903 4368 2170 5051 7490 7882 2859 5035 7835 5372 7122 925 3253 6338 8393 4093 5848 7588 2683 8049 5403 5894 8745 8550 2941 3484 9029 4461 8022 725 2355 1619 3030 1975 5623 2415 1957 6141 9278 3226 3062 5670 7326 8759 8496 6619 8187 8262 6199 951 7183 668 2388 4698 5681 8240 2851 871 4988 9084 9089 3162 1167 8244 5227 6461 2831 776 5010 5770 5282 3574 5102 1278 2281 5455 305 4628 4663 9119 7487 8746 4889 6569 1175 102 2386 8940 2479 5566 53 8833 1918 8001 321 6786 6861 4358 2771 7467 975 4777 605 3543 2600 7584 9299 4530 6477 7364 7328 183 4761 7543 304 1196 4623 7839 2139 5519 1953 533 5989 7590 7428 6346 6162 1091 1946 6260 4405 5676 8924 7171 8409 1866 6379 3411 2387 3051 7398 154 1185 6442 6004 1611 2165 9018 8323 616 3995 8952 1533 7853 4194 213 789 4991 3675 7456 5752 175 7556 4195 907 2248 9057 8467 4594 1017 7968 880 7446 3304 1666 4942 3867 4802 9156 6357 4621 887 6213 5261 1336 521 8928 1818 7864 4792 6742 157 1593 823 7235 5303 5633 1100 1692 8047 5993 1460 6714 1630 6440 6307 3608 292 212 401 5974 7107 8301 8342 2720 4583 2757 7315 833 4466 4236 1282 5273 2149 287 8484 2380 8119 7167 737 5076 6598 3596 5382 2650 8980 3421 1356 1954 7823 1172 2226 1941 6136 7274 2256 4928 324 1407 4410 4579 1061 7113 486 862 3435 6956 2873 1465 6113 8225 8512 6806 272 6008 1241 88 5662 3555 689 8733 2812 7453 6282 420 2471 4477 7495 1445 594 6939 1564 8704 8590 7992 7374 5796 9298 4213 5713 5864 326 5513 402 464 608 1951 8640 8180 3347 3459 4162 2690 7478 5856 5240 2389 3022 602 5547 1798 1345 9276 599 3673 3277 1635 8625 1567 5928 636 5671 2896 3477 412 7575 4201 685 4760 1229 4275 8960 3123 4471 5941 3355 3999 7157 6354 7741 6850 8783 1943 6769 7330 8721 8477 1381 848 778 6408 2644 5817 1441 1723 2144 2776 2368 120 367 8839 8749 5353 4158 3148 9114 1233 9228 8857 2895 1286 200 6755 5125 5857 1657 7658 5097 5000 942 7020 586 784 7078 6194 8658 8957 9325 1851 8911 4862 7004 1186 8824 1651 2999 561 7639 4316 5086 3187 7912 2624 9183 8487 5089 8475 7554 4031 6297 6059 5329 115 2058 7650 7634 7121 2485 7805 2241 7713 4352 2409 1026 2745 4549 6474 5124 5201 6556 6617 9091 3945 8402 5648 5257 2192 4901 7750 6131 6027 6352 4625 1254 5498 3720 8261 3939 5576 3685 6713 8472 991 8354 8068 5655 5997 1029 7506 6740 2575 2990 4898 583 7402 3290 5388 6715 8235 5361 4970 1363 3338 5731 9014 5358 2216 2856 635 1193 3705 6334 7666 5270 1384 6368 8604 3564 1937 2481 1341 721 2100 3958 6551 3813 2592 7980 5385 319 2357 8761 8910 8693 1204 489 4827 8024 7832 6427 3895 89 9068 8067 1708 1111 8963 1902 9251 5719 9143 5537 9169 77 5365 1840 485 4456 2841 1169 3271 7144 6886 9140 7173 6003 1659 1807 8371 2439 274 4660 3448 6623 347 2103 3400 2106 9073 8169 3687 3305 4416 8454 6635 332 2433 2909 3839 4063 1944 6509 1296 7770 1880 6610 4075 9331 4484 302 418 4219 1333 2350 6498 8424 4694 4883 5269 6580 5007 6722 1669 8470 2571 513 3810 7049 6332 7363 3532 8456 2097 297 8841 7180 714 1587 5234 4268 2320 7372 660 8503 1668 8847 1101 7275 3336 6460 722 7782 3947 502 4258 2132 1835 181 3841 427 3446 2551 8324 6963 4284 7297 7577 3399 9148 8213 5656 8440 851 657 2446 4292 6992 976 1108 2681 3237 8582 377 5969 5287 9209 8523 7178 7833 6175 2126 3023 5090 7491 6640 6077 2221 2780 1694 4094 144 6161 3203 7123 749 3625 3848 980 2270 7819 3672 7689 7203 2718 1714 2884 3474 3802 3851 4224 7237 5415 7998 7207 4106 9036 1046 8731 5070 6818 4592 6056 693 1328 3309 5791 2629 2736 202 388 7886 4417 8786 8822 4035 7718 8492 5505 1192 4388 8941 5019 7538 6732 7296 6389 5923 1405 3278 3917 1688 8374 443 4037 9099 5190 6402 4177 9310 7747 4348 7197 4844 4998 5609 4345 29 3332 8648 4107 346 2577 3941 1215 3782 8252 4706 2675 3790 7459 6164 7316 1149 6687 582 3139 5040 7645 3882 7322 4034 1861 4701 8757 3208 8801 6349 8907 1823 4528 4789 143 4746 9234 3866 9245 1911 1366 4393 2061 859 1959 6967 3138 7382 9031 6237 845 80 6911 7163 5229 4736 8738 33 8543 357 3193 7262 4448 6796 6793 3321 7569 6411 7692 7340 1417 5847 3836 2678 1188 8727 223 8615 7417 5771 3170 8061 2935 8263 8257 6883 1276 1239 812 6258 3922 7525 8117 3039 603 8554 7573 2787 3445 5115 3478 962 3961 6570 7722 216 2797 5154 2530 4904 2405 7542 4021 3252 5370 9302 236 4532 1361 3373 1716 2183 1583 3783 868 1687 8925 1433 6198 8208 6367 7603 882 3469 1645 7654 1176 4231 150 7997 5456 7031 4375 8840 5634 6945 705 3442 4774 3822 7148 1922 8459 6249 8713 6197 8599 6071 6756 1634 950 5640 7749 5920 6622 4783 7837 7479 7229 3919 1797 5272 8945 4908 5439 6903 5833 6930 8197 9261 1711 5483 6046 4285 8852 7409 8971 8278 7534 7792 2444 7496 8063 1665 248 3894 4585 1982 66 6651 4850 1240 7511 7524 9258 2075 3979 4714 7592 965 2919 8239 1842 8013 4750 2344 6155 3468 31 2087 1599 1573 5883 7613 195 3749 644 2189 8779 8743 9005 8081 1040 7785 5820 8830 5495 4867 2710 3843 491 7153 6217 1148 4741 1761 5484 3423 5474 6916 5876 7252 1739 8930 6647 5198 4903 8488 7366 2774 2726 2385 7625 3179 2211 8845 6600 399 6810 3447 6684 4915 8368 1867 2325 2101 1335 7734 3722 7437 3716 7025 4000 6897 1408 7154 5013 2204 9233 4225 3817 1877 9161 2197 6991 3390 280 1892 1612 7753 2801 7246 7909 6229 9314 8407 1436 3879 6432 6849 5326 5327 8535 7910 7745 5545 7916 207 1783 6158 8517 7361 8070 6430 119 6146 4183 1083 7385 4497 9133 1686 3765 5099 595 8046 4418 4043 2361 7915 9149 1717 1141 6375 1018 5602 1262 7485 9178 6629 3339 8934 4648 7988 6252 3440 864 5418 3874 7280 6191 8388 4323 6792 4324 2232 7228 8684 7813 6187 6678 3177 3534 4953 4402 7739 6319 2414 8700 5946 8238 4533 6917 4167 4618 2115 2268 3081 1247 4001 8580 7636 3101 2195 1559 3714 2484 7188 6028 7530 2828 1977 3238 6496 2340 110 3247 7532 7541 924 1632 484 4487 4439 6447 1319 4944 6347 1791 2285 8087 5452 91 1166 162 5185 7933 4743 1627 7259 8620 8525 8207 5845 9011 5525 4269 4700 1824 8186 8872 8299 3957 8242 4558 6439 2666 5943 6958 8112 5121 8806 6170 7688 3486 2082 7436 2778 1096 786 2206 5170 1443 6030 3312 9151 8485 6404 8498 2883 8961 2280 8341 9137 4337 2809 2445 809 8298 8643 8316 4951 6853 1572 3215 3938 2249 6515 1337 8328 7712 1429 4117 5441 3230 4152 7225 3513 6953 1507 348 3639 5739 2673 1550 6301 1652 8453 204 6833 8056 2200 5217 1854 4711 7368 4572 4032 7531 1013 3634 2875 6058 8307 7609 1766 904 667 5410 6578 3601 1664 3233 7390 8178 4486 4952 4427 4876 9166 3107 2772 6295 5001 5296 3371 6518 6327 854 1615 8288 1912 5927 6202 5814 9032 1059 3214 6547 7038 5781 6926 4390 6114 1622 4318 5803 5984 736 3561 6554 5045 4277 7386 9081 8462 2034 4955 2701 932 1298 7758 7176 9205 2276 3077 3803 3562 8054 7946 295 1843 7728 1629 7768 3663 6363 2971 431 9285 2513 1116 3656 4529 6366 5758 6339 8398 816 4153 648 2536 1826 7870 8113 7730 7101 6555 9256 6774 1072 4578 2598 3604 5880 861 8273 3350 3117 4685 9219 4334 5165 2035 7224 4066 4253 4447 3815 5038 253 3658 2252 330 3967 6443 2143 7336 6135 593 2734 8390 4655 7800 1399 1173 5618 2822 7905 7503 4431 2443 1568 3909 1974 2496 4772 5164 4105 2138 2864 3799 3924 4882 8245 1585 5528 5692 5730 5832 137 3175 2894 2062 3899 2752 4028 2113 5411 293 2647 730 3758 1667 8879 9303 6653 3698 3968 3053 503 2150 4645 2257 4627 8303 7966 8742 4692 5901 8547 2277 5546 986 370 4697 8712 4804 4881 1182 6650 7290 3487 2814 5668 7567 5333 3724 4164 3084 8896 3888 6537 17 6882 3531 704 1037 8866 5263 6758 3762 1393 3824 5575 5112 214 1439 5700 8932 1306 5011 6928 5173 4098 1132 7352 4778 7723 1368 2390 670 2685 5855 1772 6380 3853 940 5424 6091 1748 6193 5297 6572 8877 6874 430 5041 5267 1145 7448 620 9112 4294 1432 72 130 2393 7920 4597 6614 8889 3697 1895 3462 2616 3978 4791 7846 7780 8372 428 6559 8326 9211 2363 1525 5980 7888 3331 8118 7899 615 7377 791 5930 6627 8322 1138 770 8460 5100 8274 8350 6316 2893 7594 9236 5082 8150 1986 1909 8902 2145 3617 3501 7 2426 5056 8016 2702 5360 8135 8385 8378 8018 8574 720 8893 3021 1978 4782 1816 2083 4051 1446 5870 971 9097 8006 4222 8287 686 1377 611 8153 4920 4808 1536 679 4096 3891 4884 432 4615 8988 5560 3451 5589 3514 6169 1414 3244 1490 7100 3588 690 7317 4171 2266 6800 108 2793 5151 6977 2587 8188 8752 6318 5815 5116 263 3311 5191 5689 289 3392 5755 1022 5548 9319 8937 6011 7632 5328 4993 4141 5407 1865 520 7305 7208 526 3645 1859 2520 3523 8629 7304 8881 3076 4005 8329 2205 2214 6925 8691 4136 8883 974 7873 7952 3965 5887 7964 7189 2406 2783 8086 405 6568 5147 2021 4727 4826 7674 1600 5078 2949 6624 6541 8986 5740 4679 8500 3591 4434 398 983 7544 1478 4570 6012 465 9330 7206 808 8737 2356 4959 8812 6955 3599 2168 1420 1721 1794 5897 8422 2 4023 2739 3619 8797 5496 8951 8181 6893 9254 1809 5682 4309 6929 2742 5988 3363 4493 8434 4210 1503 1876 5094 4600 4936 4798 3933 5216 646 7660 3098 8773 4076 1576 5335 3746 3327 47 4602 8636 4129 363 6417 7416 9025 4377 4766 2779 4151 9046 7860 3154 3476 7620 966 2052 8344 1752 7199 4412 8895 8882 2463 339 56 5390 4821 7555 6558 1905 5258 8880 4205 3580 6735 1023 4511 3850 161 7395 2532 3349 7055 7387 758 1907 872 3006 659 815 1961 6902 7668 4708 1904 4433 5159 6816 8664 6918 1016 6513 7314 5364 7480 9313 716 3395 6843 2292 918 4329 1035 6344 8593 3404 5212 837 480 8524 1342 3690 6797 7414 288 8863 3352 1628 24 135 3314 2181 8650 5915 8078 6812 1375 6040 906 5635 7126 1387 7458 6119 5591 3795 1531 95 1960 7522 3033 898 4607 4921 3913 2623 4430 6268 7063 1326 9075 2505 7400 1284 2951 747 6466 1357 6493 7320 5892 576 5107 5559 97 2583 6361 8843 3509 7892 6086 1476 4612 7427 4267 9094 7050 6048 8455 8382 2227 284 2898 3221 2353 2157 5990 5810 3581 7279 6188 7859 3549 5539 7918 2022 9066 630 2500 5111 6561 5127 8095 5569 6123 1338 8605 3491 4187 8220 7334 9213 3067 6997 2853 4735 4372 1489 5954 6662 2207 973 3361 960 6350 4170 7431 8076 1129 750 7559 7194 2261 2300 6590 5893 6889 3125 8788 334 7286 3472 8164 7693 1469 1181 669 7515 5563 4773 3210 6324 3113 9070 3638 7551 2541 3506 5138 4069 7198 7560 3306 6100 2932 4473 1741 14 4672 7564 8748 8874 3804 3678 2240 2610 2862 1358 5716 42 5176 9326 8464 1038 2993 3017 9072 32 4809 4364 2808 4125 448 152 7299 5431 6178 793 3444 9120 8410 4963 772 5457 6954 3014 6881 286 553 1948 6398 6255 3057 8646 6176 2700 7106 5663 6683 1281 6013 8799 7635 9289 1885 442 2225 6294 5054 2674 7884 8730 8216 4203 1488 7111 4013 3623 7950 1971 1966 3248 2900 1553 472 3865 7796 6937 4591 8098 5208 294 5627 5691 5687 7149 4879 3624 7005 2773 3112 9185 1633 7830 5101 8707 8469 4678 4860 700 5527 9194 2794 5068 2639 1177 4282 6492 8128 5859 5029 5123 2877 522 5048 7230 2104 6642 6731 2717 5149 2043 9059 5277 844 1394 3262 5515 6706 3651 9105 7671 2880 3607 6410 2508 8463 2394 1916 1125 5343 3322 5307 4547 1589 8478 8899 2955 8028 7293 4619 4058 2781 8715 1272 5734 4474 4863 4367 49 8844 5605 8671 6743 4281 7077 1874 2626 2516 258 5249 6186 7958 5432 3801 6288 4732 9121 7558 2527 4661 6819 3835 7508 584 215 5036 4261 8978 5228 647 4657 2591 5931 5088 9204 929 4381 5421 2965 5050 6495 5033 4799 959 6115 3520 1232 5811 317 8976 7705 3842 2178 7187 1373 7112 2694 8627 8493 3991 7441 6308 2589 6462 3406 7673 8660 2902 752 1025 849 7682 6982 6652 3612 298 5148 4873 3414 1693 1458 327 2016 5002 6768 7016 5583 3270 857 8232 7158 7981 4676 4675 2164 8360 6709 8143 365 4062 4527 7928 9009 6228 5818 2533 9305 8887 55 2507 8870 6649 5158 76 5595 6693 5306 8666 3020 7527 3082 6304 1591 6145 6868 7205 9107 1165 6773 172 1993 4176 8400 4611 7589 8702 5386 6095 6335 1561 8805 5963 7393 3681 2037 4968 7451 3360 7466 8361 4455 4064 5422 1689 3977 7269 362 4178 4145 6127 5162 2399 9225 7068 1650 794 3007 1348 7736 444 6081 5298 2026 2543 9087 3593 7425 3730 8468 2641 7529 1720 6377 8732 5851 7956 3150 3785 6485 3611 2869 8510 4775 4463 1251 9124 6873 3391 6505 4118 1617 8837 7051 3213 3668 5347 8452 6289 5840 478 3522 453 3376 6190 3342 2237 2870 5178 5567 5952 6919 3005 134 3397 7443 8539 6822 5264 3288 5962 8421 6744 8608 4656 1802 2073 4271 1043 2922 8211 2196 5260 3789 7211 7571 7834 5680 2047 5502 3369 3437 3286 5517 3912 8386 1442 6961 2191 2417 9088 5155 6813 4520 7375 1224 811 1891 3748 4123 2789 5305 8419 7248 9237 992 4038 4499 2060 5538 850 2669 7612 104 9290 2526 1287 4160 4633 7125 742 744 4534 2407 7714 4555 8764 7661 4722 7721 3205 6657 1214 3754 6080 4593 3018 8792 2294 4450 7701 9301 127 7069 4513 6243 8025 4010 8632 4715 5284 4574 726 4252 4561 7354 299 6088 1090 5012 5684 3489 5639 4888 1584 1969 4846 2915 6804 2775 7306 6506 9306 5231 7740 4283 953 6725 458 8290 1504 1539 8885 138 3764 1256 257 335 1011 7060 5986 9323 4740 8994 4140 6807 8254 3963 9297 2102 2964 9207 4910 8709 4411 1672 457 5852 8037 4932 3679 8794 2362 8592 495 8432 1608 2155 7411 2881 9244 37 6535 8219 4505 8635 1928 8384 2570 8996 7610 2128 8728 6656 8935 6681 2070 176 9062 972 514 1796 4039 6838 2462 230 569 5521 4637 4939 4420 2863 672 4995 3807 447 1656 2005 5113 3297 8858 2118 6309 1926 481 1156 1509 1228 1787 5978 8678 3951 2929 4980 5039 4713 7002 151 5536 8148 3823 4709 2299 142 7067 2372 3761 9 2265 5747 2764 724 2913 3151 4525 6370 4247 9329 5494 3721 629 3621 7371 59 1999 6704 3734 2698 4691 6938 9117 8415 6353 6750 9077 2679 7623 2478 7321 6611 4007 2076 5772 6416 2264 8348 2672 6546 754 6934 7908 8546 4404 592 4748 6625 2129 7944 2377 6 8929 8275 3515 4524 3660 8710 419 6878 170 8313 7460 8753 2917 6891 6663 4918 7129 396 7256 3500 631 5585 8343 2695 6168 6292 3176 5092 5160 3701 9021 7221 7825 1216 1438 3471 2318 8923 6223 2182 7621 8514 9010 8987 1252 1972 1872 1715 8205 6463 8138 8989 5661 2890 565 2427 8946 1303 3718 6000 3620 1560 5276 8089 9260 1467 6173 7641 7520 5061 4677 5757 4400 2620 2719 8995 2079 6644 1683 8141 7754 5744 2952 7568 654 7457 5368 3310 1510 4440 1513 3072 8034 1456 9164 3163 3035 6111 5042 7161 1401 1084 8000 6672 8531 5404 6550 8379 9141 8681 7752 6394 7011 3739 8253 978 4771 6024 4828 7959 1649 1727 7073 8349 6952 661 7283 3159 2590 3496 8741 3969 2956 4565 920 1830 8558 1930 6677 6825 8256 7454 7521 4710 1768 3753 6459 5606 5292 1397 240 2733 946 6711 3242 2627 4929 5006 3202 132 2295 2746 1293 2124 5405 4065 818 7464 1820 4398 1312 6994 6920 261 987 6120 3109 331 2986 4338 7774 5122 8396 1364 8969 6712 8161 7083 7595 5940 1566 6419 8634 4432 6047 4749 6076 1161 8217 674 8494 3688 2447 4704 969 7477 1160 3243 3173 4979 9288 6860 1662 6171 225 5143 313 8327 3275 3385 7626 3103 4401 6794 5600 5043 7664 933 6830 4452 3980 1604 5875 6633 4635 5756 3329 1751 8108 4817 1989 1237 1893 2848 9334 51 8875 4981 5417 4134 877 6688 3545 4943 5615 2476 1684 3652 7396 1769 1171 6563 3415 3644 340 6630 8284 3256 7240 5371 3405 2108 6360 1734 5612 8638 2343 1103 7803 6809 3055 188 8031 3124 3683 4537 988 2297 4893 6499 3396 839 4467 5195 4041 6457 4441 6378 6472 6195 4912 6884 5922 7014 1660 38 1595 6752 4554 1292 2709 3800 6057 1980 8775 6587 6392 6263 7214 5219 282 309 6685 2253 6311 4092 18 7570 5543 4081 2515 6278 8690 5294 6184 5215 9130 6720 250 7250 4983 639 3567 7841 2636 4067 8446 5703 8609 2586 7695 1253 6701 7930 6317 5921 7719 8501 7312 4110 6219 4552 5059 4088 7975 9132 6054 692 3412 4079 6754 6950 5281 3028 8321 3877 7614 8939 4188 2223 239 4745 6875 7096 5571 4403 2640 5556 1845 6690 1825 4157 314 4682 8825 1003 6206 8093 7215 6465 99 8077 6631 4206 2523 366 1208 6043 4640 1457 5475 4985 1351 3090 5625 7307 8466 2003 8854 218 1500 4476 2293 1847 5032 2147 866 3710 2552 1749 6692 3926 4112 6458 735 9171 60 9304 6726 2630 2882 1178 1151 4922 4662 173 7233 1776 6533 4113 2423 2425 4343 5800 970 6372 1009 6607 3068 8435 6423 3126 4813 1709 1201 7104 5620 3932 5701 5724 3366 8050 4984 5023 9203 5079 627 290 779 5572 5233 1392 4975 8534 8210 2269 1143 2475 2562 905 4546 267 3536 8538 449 101 7367 2722 4605 7356 6781 8537 8697 6820 8340 8926 3821 2349 2259 6545 8100 8395 2258 2911 5108 3946 1406 8683 8296 5579 2177 8264 1425 3940 957 3647 515 5342 8363 2449 3108 1001 2937 3452 5574 4319 9184 8381 945 6876 600 5714 4871 8532 1852 8856 392 2018 8878 369 5711 9230 5304 7266 1681 7829 2309 4683 8938 2255 6159 3207 4651 2029 4341 5106 5794 9024 4712 2434 7151 7359 6431 1290 5918 8705 3438 5554 8876 7415 6290 5373 3805 2950 2331 6772 8997 6576 2307 8515 4033 3428 6487 6595 45 5792 333 762 2383 3388 666 2166 460 943 364 6980 8223 8221 637 6218 4108 5381 4649 5096 1614 8768 5095 3809 5030 984 3538 5120 2498 5222 5613 5486 5119 241 5707 9227 544 4109 7771 728 3671 9327 1230 9270 1070 8565 4769 7056 5654 7965 1793 5956 7883 1362 5479 8769 8821 8320 1901 1994 2461 5552 389 2839 6467 2762 4763 3499 1487 7599 4488 3241 8272 1131 4496 7006 7265 4897 2747 6618 5291 4563 5146 1939 6369 8548 6163 5526 4068 9030 5349 8433 748 1477 4265 9200 3878 462 6846 9040 4806 3519 6798 5464 5179 546 6044 8114 7216 6276 1495 494 8146 5434 856 8403 8071 3972 5544 3337 6855 1546 2824 1718 6009 2042 251 9076 3330 5004 192 4717 3797 1146 394 7814 7699 4659 4689 4156 7903 9054 7332 7811 1119 5531 6782 5210 8412 2633 7924 4624 8314 5666 3240 2310 4262 8160 4553 8196 2661 7213 7455 7399 870 6126 1227 1226 781 937 6343 2578 2892 4124 2792 5696 6865 6455 8312 5193 6026 5251 3787 4460 4687 7923 1140 9106 796 2482 9170 8695 2749 6734 4825 114 8319 827 4175 390 7611 7484 1249 7727 955 579 3629 8915 2958 885 7227 1424 4810 4604 1535 774 7518 5428 1955 8233 2645 2167 6484 3855 1502 4861 2333 2973 4829 1906 3966 476 9023 6960 3483 2748 5891 8174 7702 8948 5324 4396 1605 2823 7348 7347 5933 310 9082 916 4255 203 4239 5976 6200 6435 4425 787 1121 6034 13 39 3104 5961 5507 5785 1463 7339 1575 7801 5445 8283 5951 6995 999 5163 6023 3786 6536 5850 3524 3528 4508 6674 2939 8227 4598 7550 8495 8622 1152 4538 4003 1318 739 3296 8202 1552 6204 5236 3576 4699 9238 1879 488 2274 433 5587 1678 9282 7914 8552 6445 7971 8331 6880 7476 7282 1570 7271 3827 6489 8091 9287 7351 1765 5286 6921 542 1762 8553 4987 894 3622 7855 92 3131 4811 3590 6517 4510 733 4954 1360 5669 2842 8107 5646 5968 1618 1827 7709 8521 5807 5321 9239 5501 3745 4437 1586 7273 5265 6605 7917 1607 6074 4668 7061 1580 8694 8461 4573 618 9173 5243 435 8770 2421 7450 3870 8308 2605 2934 9240 6887 4512 1198 7585 7691 7738 2843 8423 7929 6971 7854 86 9128 4298 622 790 9155 6579 2203 7716 1265 8645 3834 1174 7380 623 8936 4306 8082 4312 8661 5753 7243 2768 8155 85 4143 3047 8479 7809 2833 5555 7578 1637 1936 8130 5549 8062 7143 5522 8966 5614 8105 8719 7655 7502 8268 5760 6695 5565 7615 9226 4870 4507 3160 4835 1598 2465 4422 5248 7867 1078 5015 6660 1676 5354 6391 5351 7184 6280 5936 6124 1327 2906 269 8292 2466 8809 5167 8142 8204 2713 1910 2930 2494 5592 7384 7726 5727 625 1735 5710 5518 2491 1410 4989 5183 8777 6562 4947 3692 6129 384 1097 2084 5209 3723 7272 6895 2459 543 8621 5394 6211 2074 1511 2524 7776 5055 7191 6207 7922 281 8436 2918 3141 4800 6323 7631 8903 2716 3735 3012 5301 3975 2800 7963 105 1920 7391 4909 1754 4816 5488 5145 5098 5139 5268 9317 8631 4346 7318 136 3993 1220 2151 308 7483 7582 3071 1339 3777 8191 5378 7087 1056 7465 5608 6564 512 2754 2687 1596 5376 1512 566 6382 7360 1757 8035 2296 4264 3551 1053 4716 1537 8518 254 6253 7132 8557 3490 9267 5473 2412 7539 7136 6670 3974 891 1323 5958 1217 2879 9118 1259 2317 7033 2467 6665 6244 2180 2140 7098 5126 6395 4150 547 4120 4307 1725 2737 8549 8195 1245 6286 935 1756 1701 1626 7379 3492 3717 5802 2817 1234 1005 4101 21 2576 4650 3381 1030 2844 1641 936 2729 6469 8913 8369 5994 341 81 4083 1685 5152 3380 8739 6615 3829 164 7927 4779 829 4216 8528 3641 4606 2769 6970 1545 8850 4971 5489 2008 4564 8682 7784 5768 9252 901 438 3577 2765 5904 664 3348 6298 3602 2502 8617 7684 4293 5166 5805 4126 2451 6906 7234 9243 3778 2940 1087 9053 5026 2504 5283 2820 4242 797 3925 1383 8750 7861 1403 6973 7617 968 3065 5395 4347 8144 2688 6527 8597 8673 7327 6331 1422 7115 244 7013 2092 54 7970 5742 3464 4823 8588 2938 3060 6406 4149 2375 6616 8803 1555 4369 1380 3011 6144 3367 4990 7370 7131 1995 2602 985 8785 8480 9125 1927 3269 3771 1032 7378 6900 5726 2731 2020 4503 3313 6727 8793 2304 523 6036 58 7993 5512 5049 2721 8482 673 7937 1168 4472 8247 7287 9017 6421 9190 3584 1819 1792 2810 6033 638 6749 7677 981 7160 4726 1886 7845 7911 6975 568 7422 4613 4501 2569 4263 3206 4133 2420 3706 8894 2263 5774 4925 9180 8888 2945 2091 1873 6303 729 6728 2156 3267 1860 6597 1374 4930 5253 938 580 5825 4839 166 8198 6892 8701 74 7094 7284 8954 3156 6140 4279 5594 2229 7535 5466 8413 7105 8192 2632 7638 9308 8530 832 4643 2201 3268 4322 6510 2967 262 403 7973 1258 8828 4036 5838 9263 8529 2788 4202 237 3838 1291 2305 4056 5628 7281 1430 6476 7935 2850 6041 2013 4016 4576 5312 6827 6321 8669 8439 830 1942 1519 2750 6106 6993 6235 5899 7313 5331 4371 7086 4399 8600 2660 5409 3465 5499 6231 5745 1801 5337 4468 1451 4192 1275 8230 2302 1114 4960 8860 3900 6468 5058 1505 8868 5588 3858 1947 2565 1472 8499 243 8442 6583 7085 5374 2250 4291 4426 492 2311 8305 3662 5338 8780 7488 3890 5005 2442 4680 7358 9116 4397 5999 587 7902 83 3566 2134 8942 4767 6601 2456 1745 5736 5254 8017 4015 7690 3798 8947 1067 2116 7945 590 2547 2535 64 2053 5359 2493 6669 4351 6412 7473 6147 7175 6983 5196 745 2657 3497 697 3161 7528 2239 5991 3201 7681 2440 5189 2959 2044 8917 2046 6313 6333 5318 2763 4301 2555 2213 2933 4121 1340 3903 4392 7889 5323 1055 707 3857 518 6078 5134 6645 9138 1592 680 4446 7943 3461 3887 5601 2321 6621 558 4914 913 5637 6453 8511 4531 1218 5508 2603 6802 8426 8297 2947 5971 6552 5262 5935 782 7435 8357 6139 1136 1473 5008 3585 3627 2914 5356 2997 2347 881 5652 4849 8808 8351 4017 2010 6836 7616 4391 3630 3712 6099 2969 5238 4333 2301 4406 1236 1050 1864 1104 8408 8251 8795 5879 3365 7481 8206 2452 1767 8859 124 3948 4444 8962 4438 5003 1740 8428 3105 5117 1095 1480 8755 7881 3097 4877 155 1917 2455 6042 337 6724 6045 8483 7135 2242 4566 1679 834 1746 795 3548 2314 2036 4046 9129 6979 7084 5091 2413 8170 5775 1817 529 7220 813 2916 5130 8972 126 1243 2370 4831 9122 3010 5104 2613 6761 7482 909 2146 4595 5340 3512 6283 2346 653 6121 2615 7421 1869 1002 8834 2991 8992 632 1093 4543 645 2352 4115 373 1483 6966 8598 3896 3434 5987 8318 1815 1223 1548 6885 5073 6330 2573 1369 4095 1431 2185 5766 1301 7258 8048 7598 2847 1996 2378 8561 743 6381 271 1956 7439 7596 7134 6636 5804 1858 6214 4730 8536 1203 3118 9202 1875 5885 8975 168 5898 4014 4186 3346 3041 5558 9296 8157 4339 3234 1738 2604 6803 5387 5590 125 2173 8012 8005 4858 3069 651 372 378 8366 6299 1449 7793 8541 3235 8043 3086 3983 6949 4690 2176 6494 7637 8406 3856 7408 350 7021 8224 7044 7662 6697 7679 169 528 7029 2790 7138 7432 7602 8333 1582 1378 519 482 9279 8015 6592 4514 3542 2612 628 5053 6699 6227 2094 1621 847 3598 2728 8490 7276 6620 8345 9216 4278 4059 9058 5063 5816 4173 8134 1997 3182 3224 8129 5109 4494 189 7640 8243 180 2963 1123 5593 3263 4185 7140 8990 6320 9275 4601 4854 5907 1135 8083 5964 7788 1992 8069 9174 6160 35 8572 2865 46 3952 6418 2510 5783 20 3816 2715 3930 2548 5204 4122 4103 708 7756 3825 777 3550 8502 3929 5440 6751 7764 4070 7331 3743 9131 9206 3828 23 41 4197 234 5723 7622 8832 4626 2169 5599 2976 5266 1967 1150 5334 90 822 2538 3169 6771 7442 498 4967 5580 7581 7680 4728 1115 4040 1064 3106 6266 4415 9294 5597 7059 197 7218 6948 5690 4234 1653 4485 4019 3370 919 1330 6085 2078 3768 5427 4545 2435 8862 3633 8145 5221 1388 5913 8140 7471 7156 6989 1190 6832 2830 4387 3454 7469 2910 4526 5187 2410 9223 6247 6912 4681 1300 7407 8612 6523 3616 6894 7253 4515 5874 5448 7137 7957 1130 3092 7054 3516 5797 1000 2727 4336 9090 6403 7255 8919 6522 6760 8898 4803 1938 374 8686 9150 3985 7045 3475 6065 7991 1409 7851 6671 6090 5826 7857 1155 8964 1117 7072 6064 2497 4899 2397 3189 2369 15 5027 5754 8950 5617 8391 914 6264 279 6174 5184 3733 7392 5278 2924 567 7994 352 8084 2148 2723 3359 70 1870 7708 220 3994 9013 3191 9220 4155 5717 1110 2198 9179 785 5325 4770 4250 52 4634 5072 9037 601 8036 7996 2483 7232 8675 8836 1279 5346 7676 6104 1515 4603 5607 7894 5144 2628 68 440 3586 3083 4830 4378 7762 1134 4542 7850 6296 2866 4011 8751 4776 7954 7102 5697 2032 5729 5017 6962 2051 1092 764 9019 2759 8581 1484 8618 912 2382 4892 8447 8176 5491 5695 5504 1060 7064 709 578 4320 2379 7649 8416 1613 5344 7512 7865 3037 6689 6557 1569 5955 3707 9168 8566 1775 5950 6943 7804 434 6179 9300 1142 7947 6456 6291 5789 6538 9134 3049 5075 5399 5161 1623 948 6302 6063 7516 117 506 3302 7146 355 3854 1081 2827 1496 2574 6167 3183 4287 5482 1722 7319 7277 3860 3443 3298 8364 3826 7254 2360 5093 7039 6325 4230 2567 6241 4443 559 2625 4228 8967 6405 1674 3936 4475 8556 8585 896 3713 6259 4297 6718 2392 2279 4927 1283 2374 2860 7665 663 596 6293 6805 2811 7383 8306 8330 3153 2153 2618 2441 3615 8092 552 5285 5255 8124 9247 5530 8175 6242 5660 3433 1610 1832 3892 3862 640 2127 2474 4196 3495 7217 5206 4836 7759 4376 800 4227 3699 9055 5665 6826 7463 9065 4720 5069 3245 3453 3358 6532 5970 7921 4087 1547 3424 8040 7995 6787 9069 8716 2561 8199 1479 2767 7818 7145 604 7597 4896 9281 4666 185 8171 7978 3059 9196 9221 2135 1800 2974 1529 5948 446 4436 8672 3508 6208 5673 6998 5203 278 7041 9110 5853 8121 1764 3046 2400 6575 4738 2228 7761 9322 7019 6931 6383 6762 283 3935 2534 7717 6785 471 8214 231 4241 5310 3844 5746 2011 7209 336 6433 756 9167 6741 3345 7685 4018 6682 9147 4790 5836 5906 8747 676 3964 6362 3510 7510 2308 1806 5917 1189 4012 3387 1331 5319 5423 8900 147 3780 1696 9111 6783 6497 4104 1898 3987 260 4616 2121 9283 1400 2437 4670 2735 1163 2096 6521 1423 4523 2243 6667 6990 3944 6915 6763 5611 404 2691 1015 7092 7562 8624 2291 4193 5934 5503 2326 4408 2960 842 1963 3354 5568 9050 3806 439 9154 6055 6451 2190 7633 688 4354 8890 2813 2872 8102 8317 6609 1497 8389 6449 1682 3594 5103 5812 863 268 3054 8079 2260 2027 3091 7687 6703 3557 2019 8427 2799 8182 6641 3168 2284 1934 4865 1077 6507 1658 3811 1774 7897 2238 2943 191 3869 3246 4057 3188 414 8072 7838 1382 4962 6010 5363 4042 1983 4077 7429 1833 3583 4044 1109 1295 386 5481 3927 311 ================================================ FILE: lib/train/data_specs/got10k_val_split.txt ================================================ 1349 5651 5878 562 2202 8904 765 1501 8654 2975 2689 3680 5180 1900 7707 4723 8912 4029 3579 869 2888 8657 6599 741 4288 2244 7357 5704 8791 208 8587 7969 4805 8526 4887 8871 7468 3343 886 7794 5764 2646 6454 6101 7885 7744 1297 4119 4856 122 2286 2925 5131 3570 5843 3027 5320 5626 540 1862 5401 7335 699 7760 9198 3259 7345 8698 1280 6479 3100 3988 1322 5737 1268 3257 6791 3326 4815 7644 1082 2826 6821 8984 2553 5290 5909 4762 9242 8096 8066 4325 6666 7193 7114 8060 2376 7872 6788 3544 5460 3507 2509 6626 3429 5542 4220 2968 5271 4249 3863 1868 5581 2012 6270 8038 4050 121 2845 1565 1998 2275 5524 6068 7624 4913 9277 1506 803 8848 5925 2450 2072 8190 4753 9162 1923 825 7303 9028 2088 8516 1556 5937 7847 2367 7549 1049 1521 4739 3931 8958 4130 7877 7876 897 5985 7346 7537 111 3700 1126 7896 1288 3419 4673 1051 5720 1068 3458 146 291 6256 5514 2857 4580 6239 6525 8717 391 4841 6676 4360 1453 4211 73 1675 1987 4025 1321 662 8265 6424 2758 7765 7656 3209 7497 7600 9039 7697 5177 2983 5622 9295 1200 3284 964 2024 1269 4551 8088 5659 2212 5199 5551 8607 5573 2247 5200 6341 7951 8429 7720 5919 1273 3529 6707 9176 7552 3255 5649 6110 9235 1137 9272 775 788 5786 5186 6746 2667 9145 7630 3953 1828 8827 6471 4702 7815 467 6387 3195 6238 6508 2373 5983 4931 2948 921 2438 517 3949 2137 3216 5683 3695 1719 4837 9159 6981 860 7410 5497 1770 5557 8810 5194 4857 9100 6329 2609 1925 3686 9041 4924 349 9187 3393 3661 7120 6858 4587 3831 3130 5396 5060 6486 3937 8023 824 5398 1354 8861 5534 7292 4389 6029 6226 3505 4326 7445 581 6089 3450 7324 6516 6775 1207 4575 5135 9265 3918 9020 3473 3898 7812 6571 6757 6639 2557 1206 6148 7325 8790 4938 7026 4383 8041 1250 7267 1952 7561 8811 4941 8373 4848 6602 8355 8104 5214 6654 4330 995 3181 3422 456 1782 3408 6530 719 7587 5910 3058 740 2009 4207 5336 2798 9229 8668 2473 4221 1493 3281 171 9157 9139 7766 6220 9127 3324 5308 3708 2431 8080 2093 2585 406 7040 5064 5247 4758 6512 2953 4257 4935 2705 2572 3436 8513 5884 1385 4852 2637 7091 2761 6007 8332 6694 2422 4917 2186 6898 1390 6965 3132 7698 475 2002 2692 5024 7365 7373 4091 1731 947 3962 8692 1788 8734 8656 6862 6856 1950 1914 5658 3635 1620 4780 2580 1454 2786 687 7238 3648 6452 1197 3190 5900 9043 4958 1935 1821 1187 1153 7737 7223 3820 7169 7350 5674 6254 3025 6680 1690 2899 3893 1577 5728 9189 5077 34 3560 2179 5462 1402 3654 1376 7936 4246 5506 1179 5647 4686 8644 1352 2855 6079 2254 2668 2287 2457 3418 7264 677 3074 2655 1042 2210 4504 7089 8309 4209 4280 3258 2977 84 4705 1244 3511 6355 8813 3228 9266 1122 613 732 5202 8425 2638 6470 2886 3541 8132 2063 8201 5129 2818 7949 6936 8090 4465 7295 5239 7009 9271 8563 2832 952 8136 6776 3565 5188 7288 6999 285 5487 7763 7608 8584 2071 7868 2804 3655 7048 6847 3276 4082 4272 3910 3709 1574 4559 7580 7081 5014 7769 8183 6386 7574 356 4937 2487 9315 7572 3040 671 2682 8626 3868 8623 387 8679 4074 1481 3527 3595 4754 2453 1579 4638 9123 1829 316 3009 3691 763 4875 3572 4642 3128 4273 2777 6032 4793 233 7147 996 3199 8835 3517 7210 6125 6037 3684 8589 3915 3095 8310 3180 7043 4458 2889 57 4483 7667 8375 1434 7493 6986 4733 8471 5827 2111 1313 7986 3075 2614 7547 4977 8527 3212 7300 5842 5244 3291 597 1007 2030 227 3830 5540 247 5643 9333 1958 3096 1371 5220 7926 2927 1516 7130 193 1522 6165 6923 3794 4223 5535 2472 8630 3971 9101 2946 222 4609 7291 8542 6501 7548 4557 6274 1010 5226 7309 1317 9056 6275 1624 1099 4191 4030 7270 5392 2316 3819 1670 8154 8045 4807 8864 2391 5908 8338 8218 6400 9193 3165 843 6613 6941 4380 9332 5629 7557 4321 3702 681 734 1159 4665 5959 1697 5509 8774 7389 3832 3751 8637 3079 1680 6841 703 684 8293 3682 5733 4818 3231 3078 5562 9001 3889 7024 2519 1713 3287 219 6021 8776 2289 7212 4832 4684 4617 4237 2649 8185 6326 3568 551 1426 4181 8869 312 2905 4165 8248 2558 900 1044 8613 7743 5437 7604 3122 5708 8649 2878 4695 4491 1929 7533 5223 7711 915 1844 5751 3008 8055 961 6142 4636 61 198 2271 5698 4596 4500 5709 5819 7972 2992 1643 1048 6281 8886 360 4198 1841 6814 3960 2606 7001 5888 450 7133 7015 7034 5153 8920 5066 469 1302 8816 463 8651 5869 8193 6582 5578 1231 9274 7260 7751 8052 6799 2089 2342 8451 3260 5550 7795 2288 1205 40 496 8367 7836 5973 3908 5242 5062 2706 997 6514 5419 9201 1965 6062 3050 5302 8735 358 2398 7470 1644 8179 7047 1549 5414 2539 7381 589 8166 8505 6035 3956 4540 6721 8074 1062 2384 2531 7159 3502 3902 4584 2554 264 8720 2849 4916 5218 7202 883 4560 1677 4317 7863 4509 6577 2903 1452 1416 5369 473 6233 6359 5992 4934 8059 6834 4907 3320 8267 8280 2066 2402 1485 3772 3732 4764 9126 3575 5564 4768 5641 1884 2330 1804 344 698 3089 1532 4454 761 7289 8094 3432 1747 6811 8722 8826 4646 3222 8614 2901 7003 652 8663 4266 413 810 75 3334 4905 6438 4756 5137 6528 6534 6988 6177 8533 889 5384 7201 5132 7802 6864 3973 873 4840 1482 8376 3769 5858 6675 4286 2593 5863 4353 7817 7540 4999 4838 2303 6002 7913 1508 5317 7755 2784 4964 3431 6209 3755 6022 6399 6232 3954 455 5416 6448 1558 7591 245 140 9210 6585 4084 967 7798 6795 7095 6733 3861 9264 361 1045 755 8042 7074 7778 6415 4724 6450 2049 1563 1307 3485 1790 7869 3282 6907 3920 2868 5801 5632 1079 5009 3955 7517 5128 3417 3019 2725 1784 2312 2753 6976 342 8266 1849 2273 5037 7880 3793 7401 5412 8279 1257 3670 9049 3266 8955 6519 8916 2858 694 5650 1019 4669 1785 3533 5877 2704 8603 3726 6668 497 1085 6815 6157 6646 6964 186 8097 5645 8481 8215 3775 2542 7514 5699 4072 3518 5767 3239 3740 1404 8981 4086 6397 6984 4204 6899 682 6589 3317 2944 3456 4340 7424 9208 6504 4409 1 145 1882 4620 2634 4992 5453 4481 3377 266 7875 530 1235 7605 504 1771 8489 345 7353 7797 7174 5914 2871 5721 6067 3582 7653 5467 6234 691 8758 2122 1213 2908 1492 1437 2187 1266 2395 7278 8491 5256 1554 8163 5966 7128 7904 1691 6272 1264 3996 1706 1334 1316 6478 6935 1518 6700 8703 8744 8152 8778 5367 4218 9007 6312 606 7565 5293 2891 675 2125 2120 826 7008 5705 7748 8010 1498 5330 5472 2215 7627 3016 6588 1850 4128 8569 6987 7566 148 8151 8789 7907 8596 715 6018 9060 3872 1750 5889 4047 5960 3120 3449 1421 1102 3333 9197 8796 8123 8007 2028 8404 1945 1985 8109 5380 8438 3504 6739 4180 5835 4243 25 4002 1976 3482 8392 158 5181 4885 8985 11 6872 6425 5926 7062 5083 8394 4259 5844 1990 3942 5532 2220 28 5957 149 6748 1663 3559 7647 2566 1359 8787 5259 7010 554 8231 4229 6005 8172 8125 1350 3571 9051 1973 1386 1781 5788 159 7007 3220 1846 3093 4445 2056 8370 3211 1113 4384 2231 273 4276 642 7663 5311 265 226 9012 7879 118 7109 7251 1760 8667 2876 7162 3552 6901 6779 5021 6524 4957 3114 4544 441 1848 2136 2458 8662 1127 5541 3026 1080 6780 2224 8259 1073 9000 7244 7977 500 4435 7376 7979 1435 9291 7704 3791 3521 210 7388 1039 6269 4052 8570 3285 564 8039 3546 6203 1183 6107 4147 6216 2234 7185 3192 7155 2001 7777 876 944 908 7791 5465 6784 65 9172 5675 7075 3886 7891 2978 1008 5630 591 5067 1139 577 9015 574 8137 7786 5765 4900 4090 7842 5741 ================================================ FILE: lib/train/data_specs/got10k_vot_exclude.txt ================================================ GOT-10k_Train_000004 GOT-10k_Train_000013 GOT-10k_Train_000015 GOT-10k_Train_000020 GOT-10k_Train_000024 GOT-10k_Train_000034 GOT-10k_Train_000038 GOT-10k_Train_000048 GOT-10k_Train_000051 GOT-10k_Train_000059 GOT-10k_Train_000077 GOT-10k_Train_000081 GOT-10k_Train_000089 GOT-10k_Train_000093 GOT-10k_Train_000094 GOT-10k_Train_000096 GOT-10k_Train_000104 GOT-10k_Train_000107 GOT-10k_Train_000108 GOT-10k_Train_000120 GOT-10k_Train_000132 GOT-10k_Train_000170 GOT-10k_Train_000186 GOT-10k_Train_000212 GOT-10k_Train_000213 GOT-10k_Train_000222 GOT-10k_Train_000223 GOT-10k_Train_000240 GOT-10k_Train_000246 GOT-10k_Train_000249 GOT-10k_Train_000266 GOT-10k_Train_000268 GOT-10k_Train_000287 GOT-10k_Train_000293 GOT-10k_Train_000305 GOT-10k_Train_000316 GOT-10k_Train_000319 GOT-10k_Train_000322 GOT-10k_Train_000331 GOT-10k_Train_000334 GOT-10k_Train_000354 GOT-10k_Train_000361 GOT-10k_Train_000368 GOT-10k_Train_000382 GOT-10k_Train_000401 GOT-10k_Train_000417 GOT-10k_Train_000448 GOT-10k_Train_000454 GOT-10k_Train_000458 GOT-10k_Train_000466 GOT-10k_Train_000475 GOT-10k_Train_000484 GOT-10k_Train_000488 GOT-10k_Train_000501 GOT-10k_Train_000510 GOT-10k_Train_000512 GOT-10k_Train_000519 GOT-10k_Train_000539 GOT-10k_Train_000544 GOT-10k_Train_000555 GOT-10k_Train_000564 GOT-10k_Train_000568 GOT-10k_Train_000583 GOT-10k_Train_000587 GOT-10k_Train_000593 GOT-10k_Train_000621 GOT-10k_Train_000624 GOT-10k_Train_000625 GOT-10k_Train_000638 GOT-10k_Train_000648 GOT-10k_Train_000654 GOT-10k_Train_000669 GOT-10k_Train_000701 GOT-10k_Train_000709 GOT-10k_Train_000712 GOT-10k_Train_000731 GOT-10k_Train_000734 GOT-10k_Train_000737 GOT-10k_Train_000744 GOT-10k_Train_000746 GOT-10k_Train_000748 GOT-10k_Train_000762 GOT-10k_Train_000764 GOT-10k_Train_000765 GOT-10k_Train_000766 GOT-10k_Train_000767 GOT-10k_Train_000775 GOT-10k_Train_000783 GOT-10k_Train_000790 GOT-10k_Train_000829 GOT-10k_Train_000857 GOT-10k_Train_000859 GOT-10k_Train_000867 GOT-10k_Train_000872 GOT-10k_Train_000880 GOT-10k_Train_000884 GOT-10k_Train_000909 GOT-10k_Train_000915 GOT-10k_Train_000922 GOT-10k_Train_000928 GOT-10k_Train_000933 GOT-10k_Train_000941 GOT-10k_Train_000961 GOT-10k_Train_000966 GOT-10k_Train_000968 GOT-10k_Train_000971 GOT-10k_Train_000972 GOT-10k_Train_000995 GOT-10k_Train_001003 GOT-10k_Train_001010 GOT-10k_Train_001011 GOT-10k_Train_001019 GOT-10k_Train_001021 GOT-10k_Train_001035 GOT-10k_Train_001039 GOT-10k_Train_001047 GOT-10k_Train_001057 GOT-10k_Train_001069 GOT-10k_Train_001077 GOT-10k_Train_001079 GOT-10k_Train_001085 GOT-10k_Train_001088 GOT-10k_Train_001091 GOT-10k_Train_001104 GOT-10k_Train_001112 GOT-10k_Train_001113 GOT-10k_Train_001124 GOT-10k_Train_001128 GOT-10k_Train_001143 GOT-10k_Train_001145 GOT-10k_Train_001146 GOT-10k_Train_001148 GOT-10k_Train_001150 GOT-10k_Train_001154 GOT-10k_Train_001156 GOT-10k_Train_001157 GOT-10k_Train_001163 GOT-10k_Train_001181 GOT-10k_Train_001184 GOT-10k_Train_001189 GOT-10k_Train_001200 GOT-10k_Train_001225 GOT-10k_Train_001264 GOT-10k_Train_001288 GOT-10k_Train_001296 GOT-10k_Train_001298 GOT-10k_Train_001299 GOT-10k_Train_001314 GOT-10k_Train_001319 GOT-10k_Train_001329 GOT-10k_Train_001331 GOT-10k_Train_001340 GOT-10k_Train_001374 GOT-10k_Train_001384 GOT-10k_Train_001394 GOT-10k_Train_001407 GOT-10k_Train_001415 GOT-10k_Train_001430 GOT-10k_Train_001433 GOT-10k_Train_001453 GOT-10k_Train_001457 GOT-10k_Train_001471 GOT-10k_Train_001473 GOT-10k_Train_001480 GOT-10k_Train_001484 GOT-10k_Train_001489 GOT-10k_Train_001514 GOT-10k_Train_001537 GOT-10k_Train_001544 GOT-10k_Train_001545 GOT-10k_Train_001551 GOT-10k_Train_001558 GOT-10k_Train_001560 GOT-10k_Train_001562 GOT-10k_Train_001563 GOT-10k_Train_001570 GOT-10k_Train_001576 GOT-10k_Train_001604 GOT-10k_Train_001615 GOT-10k_Train_001617 GOT-10k_Train_001618 GOT-10k_Train_001619 GOT-10k_Train_001624 GOT-10k_Train_001650 GOT-10k_Train_001651 GOT-10k_Train_001663 GOT-10k_Train_001673 GOT-10k_Train_001685 GOT-10k_Train_001692 GOT-10k_Train_001700 GOT-10k_Train_001722 GOT-10k_Train_001731 GOT-10k_Train_001732 GOT-10k_Train_001738 GOT-10k_Train_001740 GOT-10k_Train_001742 GOT-10k_Train_001747 GOT-10k_Train_001759 GOT-10k_Train_001769 GOT-10k_Train_001781 GOT-10k_Train_001791 GOT-10k_Train_001794 GOT-10k_Train_001795 GOT-10k_Train_001818 GOT-10k_Train_001833 GOT-10k_Train_001836 GOT-10k_Train_001841 GOT-10k_Train_001852 GOT-10k_Train_001863 GOT-10k_Train_001865 GOT-10k_Train_001878 GOT-10k_Train_001898 GOT-10k_Train_001919 GOT-10k_Train_001923 GOT-10k_Train_001929 GOT-10k_Train_001935 GOT-10k_Train_001938 GOT-10k_Train_001942 GOT-10k_Train_001955 GOT-10k_Train_001964 GOT-10k_Train_001966 GOT-10k_Train_001982 GOT-10k_Train_002005 GOT-10k_Train_002009 GOT-10k_Train_002035 GOT-10k_Train_002068 GOT-10k_Train_002073 GOT-10k_Train_002076 GOT-10k_Train_002084 GOT-10k_Train_002112 GOT-10k_Train_002115 GOT-10k_Train_002116 GOT-10k_Train_002123 GOT-10k_Train_002125 GOT-10k_Train_002129 GOT-10k_Train_002139 GOT-10k_Train_002146 GOT-10k_Train_002166 GOT-10k_Train_002168 GOT-10k_Train_002176 GOT-10k_Train_002184 GOT-10k_Train_002190 GOT-10k_Train_002192 GOT-10k_Train_002211 GOT-10k_Train_002216 GOT-10k_Train_002233 GOT-10k_Train_002240 GOT-10k_Train_002247 GOT-10k_Train_002250 GOT-10k_Train_002252 GOT-10k_Train_002253 GOT-10k_Train_002261 GOT-10k_Train_002274 GOT-10k_Train_002276 GOT-10k_Train_002292 GOT-10k_Train_002302 GOT-10k_Train_002304 GOT-10k_Train_002305 GOT-10k_Train_002320 GOT-10k_Train_002345 GOT-10k_Train_002355 GOT-10k_Train_002359 GOT-10k_Train_002363 GOT-10k_Train_002374 GOT-10k_Train_002376 GOT-10k_Train_002389 GOT-10k_Train_002393 GOT-10k_Train_002400 GOT-10k_Train_002408 GOT-10k_Train_002418 GOT-10k_Train_002437 GOT-10k_Train_002440 GOT-10k_Train_002442 GOT-10k_Train_002454 GOT-10k_Train_002456 GOT-10k_Train_002465 GOT-10k_Train_002466 GOT-10k_Train_002474 GOT-10k_Train_002479 GOT-10k_Train_002484 GOT-10k_Train_002511 GOT-10k_Train_002514 GOT-10k_Train_002517 GOT-10k_Train_002523 GOT-10k_Train_002527 GOT-10k_Train_002534 GOT-10k_Train_002555 GOT-10k_Train_002587 GOT-10k_Train_002589 GOT-10k_Train_002612 GOT-10k_Train_002627 GOT-10k_Train_002639 GOT-10k_Train_002652 GOT-10k_Train_002693 GOT-10k_Train_002699 GOT-10k_Train_002716 GOT-10k_Train_002725 GOT-10k_Train_002727 GOT-10k_Train_002730 GOT-10k_Train_002755 GOT-10k_Train_002756 GOT-10k_Train_002760 GOT-10k_Train_002763 GOT-10k_Train_002837 GOT-10k_Train_002841 GOT-10k_Train_002856 GOT-10k_Train_002862 GOT-10k_Train_002863 GOT-10k_Train_002866 GOT-10k_Train_002877 GOT-10k_Train_002884 GOT-10k_Train_002886 GOT-10k_Train_002887 GOT-10k_Train_002907 GOT-10k_Train_002908 GOT-10k_Train_002909 GOT-10k_Train_002914 GOT-10k_Train_002920 GOT-10k_Train_002922 GOT-10k_Train_002936 GOT-10k_Train_002940 GOT-10k_Train_002944 GOT-10k_Train_002953 GOT-10k_Train_002961 GOT-10k_Train_002964 GOT-10k_Train_002996 GOT-10k_Train_003003 GOT-10k_Train_003004 GOT-10k_Train_003007 GOT-10k_Train_003012 GOT-10k_Train_003027 GOT-10k_Train_003028 GOT-10k_Train_003033 GOT-10k_Train_003034 GOT-10k_Train_003036 GOT-10k_Train_003044 GOT-10k_Train_003056 GOT-10k_Train_003069 GOT-10k_Train_003078 GOT-10k_Train_003079 GOT-10k_Train_003095 GOT-10k_Train_003096 GOT-10k_Train_003107 GOT-10k_Train_003108 GOT-10k_Train_003127 GOT-10k_Train_003128 GOT-10k_Train_003129 GOT-10k_Train_003132 GOT-10k_Train_003146 GOT-10k_Train_003155 GOT-10k_Train_003173 GOT-10k_Train_003208 GOT-10k_Train_003239 GOT-10k_Train_003245 GOT-10k_Train_003246 GOT-10k_Train_003262 GOT-10k_Train_003275 GOT-10k_Train_003283 GOT-10k_Train_003296 GOT-10k_Train_003308 GOT-10k_Train_003310 GOT-10k_Train_003313 GOT-10k_Train_003317 GOT-10k_Train_003318 GOT-10k_Train_003354 GOT-10k_Train_003379 GOT-10k_Train_003384 GOT-10k_Train_003396 GOT-10k_Train_003401 GOT-10k_Train_003423 GOT-10k_Train_003435 GOT-10k_Train_003438 GOT-10k_Train_003442 GOT-10k_Train_003444 GOT-10k_Train_003455 GOT-10k_Train_003456 GOT-10k_Train_003464 GOT-10k_Train_003466 GOT-10k_Train_003474 GOT-10k_Train_003482 GOT-10k_Train_003488 GOT-10k_Train_003502 GOT-10k_Train_003515 GOT-10k_Train_003520 GOT-10k_Train_003530 GOT-10k_Train_003551 GOT-10k_Train_003570 GOT-10k_Train_003571 GOT-10k_Train_003578 GOT-10k_Train_003583 GOT-10k_Train_003590 GOT-10k_Train_003593 GOT-10k_Train_003618 GOT-10k_Train_003626 GOT-10k_Train_003650 GOT-10k_Train_003652 GOT-10k_Train_003663 GOT-10k_Train_003690 GOT-10k_Train_003704 GOT-10k_Train_003709 GOT-10k_Train_003716 GOT-10k_Train_003721 GOT-10k_Train_003722 GOT-10k_Train_003724 GOT-10k_Train_003729 GOT-10k_Train_003756 GOT-10k_Train_003768 GOT-10k_Train_003782 GOT-10k_Train_003786 GOT-10k_Train_003788 GOT-10k_Train_003791 GOT-10k_Train_003820 GOT-10k_Train_003821 GOT-10k_Train_003827 GOT-10k_Train_003834 GOT-10k_Train_003835 GOT-10k_Train_003839 GOT-10k_Train_003843 GOT-10k_Train_003854 GOT-10k_Train_003856 GOT-10k_Train_003881 GOT-10k_Train_003899 GOT-10k_Train_003904 GOT-10k_Train_003906 GOT-10k_Train_003913 GOT-10k_Train_003937 GOT-10k_Train_003940 GOT-10k_Train_003943 GOT-10k_Train_003950 GOT-10k_Train_003972 GOT-10k_Train_003974 GOT-10k_Train_003978 GOT-10k_Train_003981 GOT-10k_Train_003982 GOT-10k_Train_004003 GOT-10k_Train_004004 GOT-10k_Train_004008 GOT-10k_Train_004012 GOT-10k_Train_004013 GOT-10k_Train_004030 GOT-10k_Train_004036 GOT-10k_Train_004040 GOT-10k_Train_004052 GOT-10k_Train_004054 GOT-10k_Train_004055 GOT-10k_Train_004057 GOT-10k_Train_004063 GOT-10k_Train_004068 GOT-10k_Train_004072 GOT-10k_Train_004075 GOT-10k_Train_004078 GOT-10k_Train_004082 GOT-10k_Train_004102 GOT-10k_Train_004103 GOT-10k_Train_004105 GOT-10k_Train_004111 GOT-10k_Train_004120 GOT-10k_Train_004122 GOT-10k_Train_004124 GOT-10k_Train_004142 GOT-10k_Train_004158 GOT-10k_Train_004170 GOT-10k_Train_004175 GOT-10k_Train_004181 GOT-10k_Train_004190 GOT-10k_Train_004193 GOT-10k_Train_004194 GOT-10k_Train_004199 GOT-10k_Train_004202 GOT-10k_Train_004217 GOT-10k_Train_004225 GOT-10k_Train_004229 GOT-10k_Train_004230 GOT-10k_Train_004234 GOT-10k_Train_004241 GOT-10k_Train_004246 GOT-10k_Train_004249 GOT-10k_Train_004255 GOT-10k_Train_004268 GOT-10k_Train_004276 GOT-10k_Train_004292 GOT-10k_Train_004293 GOT-10k_Train_004295 GOT-10k_Train_004296 GOT-10k_Train_004302 GOT-10k_Train_004324 GOT-10k_Train_004337 GOT-10k_Train_004342 GOT-10k_Train_004351 GOT-10k_Train_004356 GOT-10k_Train_004376 GOT-10k_Train_004380 GOT-10k_Train_004395 GOT-10k_Train_004398 GOT-10k_Train_004399 GOT-10k_Train_004408 GOT-10k_Train_004430 GOT-10k_Train_004439 GOT-10k_Train_004440 GOT-10k_Train_004462 GOT-10k_Train_004473 GOT-10k_Train_004476 GOT-10k_Train_004478 GOT-10k_Train_004481 GOT-10k_Train_004483 GOT-10k_Train_004484 GOT-10k_Train_004503 GOT-10k_Train_004513 GOT-10k_Train_004517 GOT-10k_Train_004533 GOT-10k_Train_004536 GOT-10k_Train_004594 GOT-10k_Train_004595 GOT-10k_Train_004607 GOT-10k_Train_004619 GOT-10k_Train_004626 GOT-10k_Train_004642 GOT-10k_Train_004646 GOT-10k_Train_004652 GOT-10k_Train_004658 GOT-10k_Train_004660 GOT-10k_Train_004661 GOT-10k_Train_004668 GOT-10k_Train_004673 GOT-10k_Train_004679 GOT-10k_Train_004694 GOT-10k_Train_004702 GOT-10k_Train_004709 GOT-10k_Train_004717 GOT-10k_Train_004757 GOT-10k_Train_004768 GOT-10k_Train_004824 GOT-10k_Train_004826 GOT-10k_Train_004833 GOT-10k_Train_004839 GOT-10k_Train_004843 GOT-10k_Train_004852 GOT-10k_Train_004862 GOT-10k_Train_004865 GOT-10k_Train_004878 GOT-10k_Train_004880 GOT-10k_Train_004881 GOT-10k_Train_004902 GOT-10k_Train_004906 GOT-10k_Train_004920 GOT-10k_Train_004950 GOT-10k_Train_004951 GOT-10k_Train_004952 GOT-10k_Train_004973 GOT-10k_Train_004983 GOT-10k_Train_004984 GOT-10k_Train_004990 GOT-10k_Train_004993 GOT-10k_Train_004995 GOT-10k_Train_005004 GOT-10k_Train_005007 GOT-10k_Train_005022 GOT-10k_Train_005024 GOT-10k_Train_005040 GOT-10k_Train_005046 GOT-10k_Train_005047 GOT-10k_Train_005058 GOT-10k_Train_005063 GOT-10k_Train_005072 GOT-10k_Train_005097 GOT-10k_Train_005098 GOT-10k_Train_005099 GOT-10k_Train_005108 GOT-10k_Train_005113 GOT-10k_Train_005119 GOT-10k_Train_005126 GOT-10k_Train_005146 GOT-10k_Train_005166 GOT-10k_Train_005191 GOT-10k_Train_005207 GOT-10k_Train_005255 GOT-10k_Train_005269 GOT-10k_Train_005280 GOT-10k_Train_005310 GOT-10k_Train_005317 GOT-10k_Train_005319 GOT-10k_Train_005334 GOT-10k_Train_005338 GOT-10k_Train_005339 GOT-10k_Train_005354 GOT-10k_Train_005364 GOT-10k_Train_005382 GOT-10k_Train_005385 GOT-10k_Train_005389 GOT-10k_Train_005390 GOT-10k_Train_005396 GOT-10k_Train_005398 GOT-10k_Train_005399 GOT-10k_Train_005401 GOT-10k_Train_005413 GOT-10k_Train_005415 GOT-10k_Train_005420 GOT-10k_Train_005457 GOT-10k_Train_005465 GOT-10k_Train_005488 GOT-10k_Train_005493 GOT-10k_Train_005510 GOT-10k_Train_005523 GOT-10k_Train_005538 GOT-10k_Train_005553 GOT-10k_Train_005556 GOT-10k_Train_005575 GOT-10k_Train_005577 GOT-10k_Train_005582 GOT-10k_Train_005594 GOT-10k_Train_005606 GOT-10k_Train_005611 GOT-10k_Train_005636 GOT-10k_Train_005639 GOT-10k_Train_005642 GOT-10k_Train_005651 GOT-10k_Train_005652 GOT-10k_Train_005653 GOT-10k_Train_005681 GOT-10k_Train_005686 GOT-10k_Train_005689 GOT-10k_Train_005701 GOT-10k_Train_005712 GOT-10k_Train_005716 GOT-10k_Train_005724 GOT-10k_Train_005731 GOT-10k_Train_005732 GOT-10k_Train_005734 GOT-10k_Train_005741 GOT-10k_Train_005764 GOT-10k_Train_005767 GOT-10k_Train_005788 GOT-10k_Train_005791 GOT-10k_Train_005800 GOT-10k_Train_005813 GOT-10k_Train_005816 GOT-10k_Train_005830 GOT-10k_Train_005852 GOT-10k_Train_005876 GOT-10k_Train_005877 GOT-10k_Train_005884 GOT-10k_Train_005910 GOT-10k_Train_005929 GOT-10k_Train_005943 GOT-10k_Train_005958 GOT-10k_Train_005995 GOT-10k_Train_006002 GOT-10k_Train_006010 GOT-10k_Train_006018 GOT-10k_Train_006021 GOT-10k_Train_006022 GOT-10k_Train_006040 GOT-10k_Train_006046 GOT-10k_Train_006057 GOT-10k_Train_006075 GOT-10k_Train_006087 GOT-10k_Train_006099 GOT-10k_Train_006115 GOT-10k_Train_006126 GOT-10k_Train_006129 GOT-10k_Train_006142 GOT-10k_Train_006161 GOT-10k_Train_006163 GOT-10k_Train_006193 GOT-10k_Train_006195 GOT-10k_Train_006204 GOT-10k_Train_006206 GOT-10k_Train_006215 GOT-10k_Train_006216 GOT-10k_Train_006220 GOT-10k_Train_006224 GOT-10k_Train_006232 GOT-10k_Train_006241 GOT-10k_Train_006247 GOT-10k_Train_006287 GOT-10k_Train_006300 GOT-10k_Train_006315 GOT-10k_Train_006318 GOT-10k_Train_006322 GOT-10k_Train_006337 GOT-10k_Train_006341 GOT-10k_Train_006344 GOT-10k_Train_006348 GOT-10k_Train_006349 GOT-10k_Train_006363 GOT-10k_Train_006366 GOT-10k_Train_006376 GOT-10k_Train_006378 GOT-10k_Train_006395 GOT-10k_Train_006402 GOT-10k_Train_006406 GOT-10k_Train_006412 GOT-10k_Train_006413 GOT-10k_Train_006427 GOT-10k_Train_006448 GOT-10k_Train_006459 GOT-10k_Train_006464 GOT-10k_Train_006474 GOT-10k_Train_006477 GOT-10k_Train_006482 GOT-10k_Train_006483 GOT-10k_Train_006496 GOT-10k_Train_006498 GOT-10k_Train_006499 GOT-10k_Train_006505 GOT-10k_Train_006506 GOT-10k_Train_006514 GOT-10k_Train_006533 GOT-10k_Train_006563 GOT-10k_Train_006569 GOT-10k_Train_006573 GOT-10k_Train_006584 GOT-10k_Train_006585 GOT-10k_Train_006587 GOT-10k_Train_006591 GOT-10k_Train_006592 GOT-10k_Train_006598 GOT-10k_Train_006605 GOT-10k_Train_006631 GOT-10k_Train_006633 GOT-10k_Train_006644 GOT-10k_Train_006651 GOT-10k_Train_006654 GOT-10k_Train_006672 GOT-10k_Train_006717 GOT-10k_Train_006728 GOT-10k_Train_006736 GOT-10k_Train_006740 GOT-10k_Train_006746 GOT-10k_Train_006754 GOT-10k_Train_006759 GOT-10k_Train_006766 GOT-10k_Train_006789 GOT-10k_Train_006796 GOT-10k_Train_006797 GOT-10k_Train_006817 GOT-10k_Train_006818 GOT-10k_Train_006849 GOT-10k_Train_006851 GOT-10k_Train_006855 GOT-10k_Train_006872 GOT-10k_Train_006879 GOT-10k_Train_006900 GOT-10k_Train_006912 GOT-10k_Train_006926 GOT-10k_Train_006936 GOT-10k_Train_006955 GOT-10k_Train_006968 GOT-10k_Train_006969 GOT-10k_Train_006979 GOT-10k_Train_006980 GOT-10k_Train_006984 GOT-10k_Train_006986 GOT-10k_Train_006991 GOT-10k_Train_007017 GOT-10k_Train_007032 GOT-10k_Train_007035 GOT-10k_Train_007048 GOT-10k_Train_007064 GOT-10k_Train_007065 GOT-10k_Train_007075 GOT-10k_Train_007077 GOT-10k_Train_007081 GOT-10k_Train_007083 GOT-10k_Train_007089 GOT-10k_Train_007106 GOT-10k_Train_007107 GOT-10k_Train_007131 GOT-10k_Train_007138 GOT-10k_Train_007144 GOT-10k_Train_007150 GOT-10k_Train_007168 GOT-10k_Train_007170 GOT-10k_Train_007177 GOT-10k_Train_007181 GOT-10k_Train_007183 GOT-10k_Train_007190 GOT-10k_Train_007208 GOT-10k_Train_007220 GOT-10k_Train_007223 GOT-10k_Train_007247 GOT-10k_Train_007273 GOT-10k_Train_007284 GOT-10k_Train_007289 GOT-10k_Train_007293 GOT-10k_Train_007294 GOT-10k_Train_007296 GOT-10k_Train_007316 GOT-10k_Train_007322 GOT-10k_Train_007355 GOT-10k_Train_007360 GOT-10k_Train_007362 GOT-10k_Train_007364 GOT-10k_Train_007388 GOT-10k_Train_007392 GOT-10k_Train_007403 GOT-10k_Train_007404 GOT-10k_Train_007426 GOT-10k_Train_007427 GOT-10k_Train_007443 GOT-10k_Train_007446 GOT-10k_Train_007461 GOT-10k_Train_007482 GOT-10k_Train_007489 GOT-10k_Train_007499 GOT-10k_Train_007503 GOT-10k_Train_007507 GOT-10k_Train_007515 GOT-10k_Train_007521 GOT-10k_Train_007523 GOT-10k_Train_007525 GOT-10k_Train_007535 GOT-10k_Train_007559 GOT-10k_Train_007566 GOT-10k_Train_007582 GOT-10k_Train_007586 GOT-10k_Train_007596 GOT-10k_Train_007616 GOT-10k_Train_007623 GOT-10k_Train_007634 GOT-10k_Train_007637 GOT-10k_Train_007643 GOT-10k_Train_007645 GOT-10k_Train_007653 GOT-10k_Train_007660 GOT-10k_Train_007661 GOT-10k_Train_007663 GOT-10k_Train_007672 GOT-10k_Train_007700 GOT-10k_Train_007710 GOT-10k_Train_007714 GOT-10k_Train_007717 GOT-10k_Train_007718 GOT-10k_Train_007737 GOT-10k_Train_007741 GOT-10k_Train_007746 GOT-10k_Train_007763 GOT-10k_Train_007769 GOT-10k_Train_007780 GOT-10k_Train_007803 GOT-10k_Train_007821 GOT-10k_Train_007825 GOT-10k_Train_007839 GOT-10k_Train_007848 GOT-10k_Train_007873 GOT-10k_Train_007877 GOT-10k_Train_007882 GOT-10k_Train_007894 GOT-10k_Train_007905 GOT-10k_Train_007908 GOT-10k_Train_007911 GOT-10k_Train_007914 GOT-10k_Train_007918 GOT-10k_Train_007929 GOT-10k_Train_007936 GOT-10k_Train_007938 GOT-10k_Train_007965 GOT-10k_Train_007969 GOT-10k_Train_007973 GOT-10k_Train_007987 GOT-10k_Train_007999 GOT-10k_Train_008001 GOT-10k_Train_008034 GOT-10k_Train_008050 GOT-10k_Train_008056 GOT-10k_Train_008068 GOT-10k_Train_008073 GOT-10k_Train_008089 GOT-10k_Train_008095 GOT-10k_Train_008101 GOT-10k_Train_008128 GOT-10k_Train_008139 GOT-10k_Train_008147 GOT-10k_Train_008154 GOT-10k_Train_008171 GOT-10k_Train_008180 GOT-10k_Train_008193 GOT-10k_Train_008194 GOT-10k_Train_008201 GOT-10k_Train_008212 GOT-10k_Train_008226 GOT-10k_Train_008230 GOT-10k_Train_008231 GOT-10k_Train_008236 GOT-10k_Train_008239 GOT-10k_Train_008241 GOT-10k_Train_008243 GOT-10k_Train_008249 GOT-10k_Train_008250 GOT-10k_Train_008273 GOT-10k_Train_008278 GOT-10k_Train_008291 GOT-10k_Train_008310 GOT-10k_Train_008311 GOT-10k_Train_008317 GOT-10k_Train_008319 GOT-10k_Train_008331 GOT-10k_Train_008332 GOT-10k_Train_008344 GOT-10k_Train_008369 GOT-10k_Train_008377 GOT-10k_Train_008386 GOT-10k_Train_008392 GOT-10k_Train_008396 GOT-10k_Train_008432 GOT-10k_Train_008438 GOT-10k_Train_008439 GOT-10k_Train_008440 GOT-10k_Train_008442 GOT-10k_Train_008443 GOT-10k_Train_008455 GOT-10k_Train_008471 GOT-10k_Train_008484 GOT-10k_Train_008490 GOT-10k_Train_008492 GOT-10k_Train_008499 GOT-10k_Train_008502 GOT-10k_Train_008507 GOT-10k_Train_008520 GOT-10k_Train_008525 GOT-10k_Train_008568 GOT-10k_Train_008587 GOT-10k_Train_008589 GOT-10k_Train_008591 GOT-10k_Train_008606 GOT-10k_Train_008612 GOT-10k_Train_008623 GOT-10k_Train_008628 GOT-10k_Train_008633 GOT-10k_Train_008634 GOT-10k_Train_008645 GOT-10k_Train_008656 GOT-10k_Train_008668 GOT-10k_Train_008670 GOT-10k_Train_008702 GOT-10k_Train_008714 GOT-10k_Train_008723 GOT-10k_Train_008731 GOT-10k_Train_008732 GOT-10k_Train_008734 GOT-10k_Train_008747 GOT-10k_Train_008787 GOT-10k_Train_008794 GOT-10k_Train_008805 GOT-10k_Train_008829 GOT-10k_Train_008837 GOT-10k_Train_008838 GOT-10k_Train_008853 GOT-10k_Train_008878 GOT-10k_Train_008879 GOT-10k_Train_008880 GOT-10k_Train_008891 GOT-10k_Train_008895 GOT-10k_Train_008907 GOT-10k_Train_008909 GOT-10k_Train_008922 GOT-10k_Train_008935 GOT-10k_Train_008939 GOT-10k_Train_008972 GOT-10k_Train_008975 GOT-10k_Train_008976 GOT-10k_Train_009002 GOT-10k_Train_009031 GOT-10k_Train_009040 GOT-10k_Train_009052 GOT-10k_Train_009056 GOT-10k_Train_009057 GOT-10k_Train_009066 GOT-10k_Train_009076 GOT-10k_Train_009103 GOT-10k_Train_009115 GOT-10k_Train_009117 GOT-10k_Train_009127 GOT-10k_Train_009137 GOT-10k_Train_009145 GOT-10k_Train_009150 GOT-10k_Train_009155 GOT-10k_Train_009156 GOT-10k_Train_009160 GOT-10k_Train_009179 GOT-10k_Train_009181 GOT-10k_Train_009196 GOT-10k_Train_009203 GOT-10k_Train_009216 GOT-10k_Train_009219 GOT-10k_Train_009222 GOT-10k_Train_009224 GOT-10k_Train_009229 GOT-10k_Train_009231 GOT-10k_Train_009235 GOT-10k_Train_009242 GOT-10k_Train_009263 GOT-10k_Train_009265 GOT-10k_Train_009280 GOT-10k_Train_009282 GOT-10k_Train_009300 GOT-10k_Train_009301 GOT-10k_Train_009329 GOT-10k_Train_009332 GOT-10k_Train_009334 ================================================ FILE: lib/train/data_specs/got10k_vot_train_split.txt ================================================ 3784 8998 1631 8277 8358 2338 2988 8302 2662 2663 2825 7447 4781 2218 5860 2819 8075 5391 116 3606 7976 7941 1024 4519 1970 557 8579 6908 993 7204 1991 3674 8781 6840 5 3225 3763 8688 6778 5777 4794 2744 8126 3864 1733 2923 6829 683 2081 1831 2404 1459 2741 5972 7462 2654 103 2174 2989 2506 2766 5912 3295 3986 609 4895 6673 801 1098 1602 2490 8476 3186 4784 4270 1812 4226 2267 8873 6544 6112 2381 4752 753 3776 6511 6016 2559 7369 5866 563 7731 1105 5603 50 4238 2208 8725 4994 4719 1444 8807 7298 8760 8173 2332 4131 1065 8562 3992 4024 2188 9095 6765 1707 6105 6922 5362 1486 7898 4135 6574 998 6565 8127 8927 2544 4365 768 3535 3875 6808 2931 487 4451 2470 8111 3493 7338 8281 6390 1271 4373 3667 3494 3757 2966 7840 7827 3300 6261 4163 2217 6549 7236 9136 1857 6691 3470 6271 807 516 9311 6098 3144 8420 5425 5694 2643 6696 6072 7285 3781 903 8522 6092 5979 2622 2529 855 3420 3261 8953 7866 2492 3157 359 1520 2642 7452 759 36 8931 1744 4350 1089 9199 1889 1908 4868 4498 1968 3273 7413 4114 5584 4874 1427 5211 7618 1542 1353 8158 4168 3200 6345 8560 5619 5953 3158 8849 5831 1411 8103 6539 7397 1006 5450 3119 4274 5352 4571 2319 4976 902 1814 2651 3299 3398 982 2428 5793 1346 7057 3737 7329 4449 2110 7405 1773 958 3901 4127 8234 2994 7066 1289 2995 5871 3556 9085 846 2366 585 5516 5230 3481 2732 6658 7423 1855 6384 3554 5823 4948 7058 4667 5377 2503 7694 9191 9144 655 3409 62 8019 8970 2323 5750 3178 6548 7501 3280 343 2171 8397 1367 8611 6118 6603 7182 9048 7733 7141 3335 4845 5449 3467 6250 163 5168 2040 3609 8352 3426 8567 769 187 6151 6437 7028 3970 9146 5028 7492 1661 2815 2469 2563 3814 8430 4305 3479 5678 4132 1211 5459 4814 545 4556 238 2724 1260 2581 4632 4313 380 1209 5447 3032 7942 8943 806 2432 6130 4314 2131 9045 6531 5706 6747 7724 2017 3292 5469 2743 424 4233 8619 5192 4516 9324 3537 9152 8058 7526 8711 1949 5982 6702 7027 6388 7012 328 2130 452 306 7669 3134 5761 3703 44 4189 695 5224 9215 5644 3143 5443 2348 2328 4725 1418 7810 5759 7226 4535 4385 5397 7249 3204 385 2371 2738 3636 9033 2246 2680 6940 4310 2054 9250 9080 4568 5586 4469 2038 3410 7900 4332 6108 678 3319 9079 1054 4048 4751 1320 6890 7931 1398 4349 5299 5025 7932 5738 7787 4590 4020 1274 2488 8497 3372 8965 3219 799 3664 6500 7093 4362 6205 4244 5945 6434 2031 2684 6632 4588 8271 3232 5782 2904 7200 3632 5435 8203 3480 4786 7579 3351 1921 798 3646 3094 4359 1654 5975 376 5965 780 6738 3185 2133 6248 5996 2834 531 5688 2448 7925 7974 5924 6401 5778 6594 5442 8336 4522 3770 6340 6328 4946 4161 2954 2588 8465 2885 1606 5787 3407 3121 7310 1413 1932 4787 2579 3325 508 5610 6480 4290 479 3792 6628 2545 6972 2665 6730 3547 6845 3540 8993 1052 2235 8356 3403 8818 8260 572 4159 1180 5348 7948 2676 3539 4866 6422 8365 3217 1310 2059 9177 1419 2283 8892 8162 1212 6277 3725 7806 6149 7874 718 6888 7118 277 656 8763 8289 4759 5854 8659 3145 5981 1881 5799 6947 1609 6396 2631 318 2550 6132 1736 7816 4304 8133 6698 7779 7732 7642 7242 711 9262 8033 7440 1913 5480 5570 8594 8772 4654 8974 6128 6183 1071 8449 2142 2298 524 1695 820 4053 1856 8641 217 1063 9286 3152 221 5461 1270 2006 7164 1199 6951 5604 5400 5309 3498 6407 6661 7097 8165 5169 3852 7070 5702 4344 6648 6904 3272 7119 5795 2365 2659 353 5444 1924 2098 2972 6006 5865 8740 7856 5841 598 836 1147 931 8897 0 6049 1837 865 1871 6116 6831 5773 3587 303 1883 2163 3070 1308 7953 6909 853 7301 3279 123 7186 3194 5133 1931 4622 4891 5722 5693 8 2339 6596 71 379 4506 4370 1238 2707 3344 4254 8767 1726 325 4148 5438 5357 548 1332 6824 2290 2335 2594 2315 3389 3885 2621 4116 7412 7222 4894 8595 2000 4978 4721 6444 3796 9321 2236 6409 1523 1468 9249 8270 2341 2874 174 4502 4703 9034 9108 5451 2619 9158 490 6540 1466 2962 8771 2712 4539 1581 5638 9246 4308 4363 4647 4470 1636 1311 6560 7519 8027 9217 6364 3779 4822 3563 5896 6655 1524 2846 3137 141 1887 6567 8921 4671 6052 8445 8699 7349 3553 2117 7651 5034 5383 649 3818 9022 8414 1012 8159 5081 8571 4765 9135 4361 4073 9142 727 2835 8229 3989 4490 4923 5477 1638 3643 9044 2230 499 7166 3172 8431 8401 1470 6356 8817 927 4212 2152 3812 4949 1219 1538 3029 6481 9042 7775 7742 423 2085 7715 4541 9061 5916 7420 7406 7046 7808 4911 8804 6927 8820 3264 300 2979 252 4407 3383 4688 8504 6723 26 3837 2489 4137 8209 229 6490 2364 9016 1763 1728 338 8335 9063 2791 641 5454 4581 4548 2840 8508 3463 7231 7619 2560 1755 6201 165 6279 5806 6867 5890 2396 3416 1981 6073 5872 3045 4182 7607 4414 2998 6553 7139 5624 3666 723 5110 6932 8200 2222 8399 1041 4138 1594 3569 9253 393 7940 8004 1475 5393 1107 2597 878 9309 7576 5250 3142 2015 571 3921 1255 7080 893 2160 1355 82 9153 8583 4085 4644 7196 9165 3558 4550 6374 7826 8602 4146 9257 6083 874 8383 3731 3374 3653 8222 7344 470 1813 6871 7245 6866 3998 7433 276 1915 1988 8168 2518 2686 831 6143 5205 8718 1703 7729 2077 7983 8450 1195 9232 507 7989 6974 5828 8655 6679 5245 7783 5886 9098 6491 8782 3525 6542 131 8110 9186 9074 4933 9035 2607 2057 6273 2711 5829 3382 2696 3043 2048 619 2499 5295 1162 7807 3694 2194 3149 1940 7934 840 3592 8237 4731 1324 8486 8726 8573 2928 9078 2272 2564 1370 5911 7434 8026 407 7546 2004 5849 7887 3425 1118 926 3430 5902 2282 2334 129 1372 4842 6473 4382 1028 415 8269 6910 2796 3038 5735 5080 2852 6306 8842 9188 3637 1066 532 5485 2838 6753 9008 7984 2816 8819 7103 5977 5044 2064 2599 3249 6446 6638 852 1724 3368 892 3250 8258 7962 4300 1616 167 8855 2090 4424 879 5136 5350 2635 7828 8506 63 3847 3676 1705 6745 1263 5020 1888 7036 1033 3914 5433 3905 4641 228 4801 3766 8085 643 6914 3013 5657 3696 1590 8282 2403 416 911 3849 4215 1120 5490 296 2306 3140 3742 4819 6153 6414 760 3000 7498 7108 6429 3031 5314 751 3357 5808 7505 98 7652 4027 6257 1799 8577 4969 9163 2025 6061 4026 588 4961 4940 7152 538 706 2802 8983 3375 1246 6593 5837 1789 7939 4997 5939 2411 6133 199 7593 1702 5406 6082 2912 6109 100 8149 5470 2807 3362 5621 6019 9241 9268 7703 7967 5458 5492 6729 4577 106 3774 979 7082 4610 1853 9003 9292 2867 6262 2245 3460 1557 4796 2658 5769 6985 421 7990 3289 1540 9316 2251 6896 5947 4965 4480 963 9047 7824 3976 6210 7018 7179 5016 7789 6102 6828 7659 9109 9071 8115 7628 7110 16 7513 835 939 2351 2322 4945 560 6837 6094 6475 7901 3 771 8029 3135 8044 7127 3741 5156 7030 113 3747 7042 5232 5225 3002 4747 5379 4886 7192 4184 1896 1834 8689 3665 2957 6913 8009 4851 6420 828 8884 8815 3198 8008 194 6251 3303 3934 395 1285 4169 1648 1347 3600 4631 509 211 6230 7241 2219 2582 8353 7790 7583 9004 6942 1704 8051 2981 5511 6182 7088 1699 1222 6189 1528 5197 6221 7893 7773 8766 2942 8021 614 1786 400 133 556 5237 3727 1440 3873 8448 6285 8696 8800 4009 3386 4847 5685 9093 5895 6863 4260 8405 8417 7116 255 3223 4737 7852 814 710 1094 6103 5809 5882 6336 4974 1499 2806 3744 2664 2436 4482 8665 8918 1076 8676 5725 9248 4755 1447 9328 5500 78 2653 792 6854 6093 6172 3378 4492 5529 5476 3846 1391 383 4289 3883 2648 3265 2525 5402 4599 6870 6877 4413 2464 8519 2521 1839 5822 5664 7257 5375 6852 6764 5182 8914 3015 8509 3080 4562 8979 6643 8601 6096 4812 5246 7862 527 7849 6737 12 2468 7961 275 27 5932 3840 7341 4996 8564 2154 6138 7831 4442 757 4464 1170 2568 19 323 7675 3441 2067 9027 2486 4379 4744 1737 7563 301 3907 4742 6857 1221 9284 8458 2897 1526 5345 4423 6246 8578 3711 4986 4785 3997 7311 4788 8387 2041 2608 6031 3293 541 773 8473 2501 5667 804 483 1639 696 6060 5429 5762 1527 7342 6225 7895 381 8030 8362 4734 3526 9273 2039 5084 875 6905 8968 5275 3052 650 7509 232 2595 3631 1810 4355 8315 8908 1777 4834 3164 2336 1543 6212 8346 3024 3719 1242 6265 3133 6150 6358 3316 4089 1647 4629 7117 2596 5366 6371 2209 1428 1158 7648 8765 802 153 4639 3657 9320 3294 2617 5052 6305 3227 8784 5868 6716 1671 178 2703 954 3254 2262 5743 8647 6393 7706 6604 3728 6978 7474 8754 2740 6038 1491 8814 2080 2358 5944 1164 9259 4518 7343 5748 3897 923 5967 2677 3503 1202 4966 6634 1962 9096 9064 977 4049 1464 658 536 3402 8064 1309 259 8122 910 224 6152 7142 6070 8411 9214 9312 8325 6192 626 6025 6240 8708 4630 6777 1075 8906 408 9269 6236 9067 2324 156 3136 7878 7308 4335 2065 3845 4453 3356 1450 371 7219 5171 201 8642 2099 477 1603 8339 7430 3061 235 1133 8474 8653 989 4569 9092 8347 3102 1743 9086 5140 7438 1530 2460 7646 5071 5430 6944 610 2803 1448 4696 6156 4386 4248 4256 994 805 8011 8276 8999 4956 1712 2795 7553 6436 2158 9083 3184 5784 4428 612 5288 6222 1365 5074 6848 575 5213 2175 4240 351 2086 2656 5150 9255 8189 7735 1261 1344 4097 8674 2984 4235 5998 6488 537 1267 7486 7124 6245 7955 7337 5436 1194 209 1710 7906 4357 4139 5679 2584 2854 1004 8246 8586 5087 4926 6637 3197 7757 6502 1248 990 3928 2770 2751 1020 6426 6839 2671 3871 9212 4179 3394 10 5861 5316 6869 2985 8905 8559 4457 2480 2313 4100 6835 7799 7890 2785 5468 7302 5862 1803 3171 717 7053 1655 4489 2522 2921 8555 1984 895 8949 1305 738 7606 112 3042 1325 437 3167 3340 511 3689 8982 69 4421 550 8685 3147 8956 3166 7023 2014 3573 3880 4045 2069 6051 702 6664 8418 6181 4853 4166 7022 7418 3605 7172 5031 4589 7858 6586 6351 8334 7504 634 3759 1890 890 6959 5085 4919 2161 1191 256 3610 7079 3427 4071 7323 2982 7263 7444 4251 5846 4864 3649 4311 8120 4582 6373 2805 4872 4869 5867 2670 7099 30 8933 930 7919 7261 5289 7449 7772 3613 3196 474 205 841 2611 6185 3088 409 7239 5938 7871 1343 6705 1027 5596 2199 9113 5471 6134 838 8359 4061 1474 3229 270 4245 1979 1517 8652 4006 6137 4693 2528 6996 2926 5798 2477 2549 3341 6014 4479 2861 4208 5175 5174 5118 3736 5463 1588 2327 8380 7982 1058 4586 6608 7985 1822 3628 549 1811 2601 4608 2540 6659 3859 307 3767 8167 505 4366 5520 461 1933 2401 8106 2055 7844 8544 4797 7419 6686 7670 6039 5672 5141 6543 206 5252 4718 888 1601 3218 5114 713 4022 4419 6708 397 425 6612 5057 1729 4729 4080 1034 534 5598 9218 2424 329 4154 1597 109 8823 9038 8437 3307 128 8032 1412 7333 8762 8851 8865 468 3808 3064 8798 7052 7767 1086 2162 6566 2109 3439 6122 3642 7696 8610 5279 1808 8687 817 6066 3640 6015 7601 4855 6017 87 7071 7268 3614 6084 6117 6924 9102 2829 375 8724 2095 22 1541 2970 633 139 451 4521 179 1396 3876 5824 8020 426 4982 4172 190 4859 1455 3110 3323 9104 858 6719 6428 4495 8551 2141 3984 3066 67 4299 5821 8444 6581 6097 7090 7781 8944 3085 2114 5355 8901 1461 3301 422 7000 4820 5790 1379 7536 8736 8991 5241 1698 1294 1753 196 2987 8680 4144 8639 6441 8255 8156 3677 6385 6520 3760 6001 1144 5478 7394 8057 5018 4232 5235 6844 3111 8802 949 7843 573 2278 6801 7629 2714 5105 6946 2697 5315 1571 8677 2537 4374 3833 7820 3750 2033 6526 3884 8706 7195 3603 3001 6284 5873 5718 8576 8457 3589 5839 459 6342 8729 6933 607 6053 8228 3773 1805 6365 5142 6069 1389 9026 570 4614 5533 2821 1897 819 4060 5905 6842 5446 1277 4303 2836 934 1014 7822 7494 665 5881 3328 4664 315 1315 1462 8616 7725 5749 1730 8184 4567 5065 8867 1304 3669 9192 410 8177 6710 1210 2329 3911 1899 7686 3315 6180 3116 5341 4394 8337 9182 5715 2172 2782 3715 9195 7960 4890 8294 2337 8014 3353 7475 2193 8831 4200 4653 6196 6957 3063 8959 8973 6529 3457 5274 8002 6823 6154 5561 1780 9318 7657 1758 6503 7678 3274 1625 4327 3236 8575 4707 4331 1494 8756 3174 1074 8116 8295 3048 3752 6050 8003 9175 4674 1642 2556 6166 7165 8441 3990 1640 1778 7500 8304 1395 4315 5949 3364 242 5763 1036 2430 8131 411 6267 2045 6606 899 8065 5779 5616 2107 5408 2980 6310 5776 4328 821 3251 2354 7076 5313 79 3959 5677 7545 160 6790 6859 3659 6770 1106 8846 956 7472 2050 8099 4795 8053 9293 7037 1646 9307 5322 5332 2708 8977 917 2419 184 2105 1578 3923 5780 1903 2512 429 493 4972 445 8286 320 8300 617 3413 4459 525 5631 6314 5157 5300 8545 182 1031 4429 2495 1534 3099 3916 3738 535 2119 177 1838 2159 4099 8285 5172 8540 6020 7683 3073 3115 3087 2416 1894 5942 3597 5834 2007 43 1779 4174 2023 2546 2429 9006 436 4214 3693 5426 6767 5903 4368 2170 5051 7490 2859 5035 7835 5372 7122 925 3253 6338 8393 4093 5848 7588 2683 8049 5403 5894 8745 8550 2941 3484 9029 4461 8022 725 3030 1975 5623 2415 1957 6141 9278 3226 3062 5670 7326 8759 8496 6619 8187 8262 6199 951 668 2388 4698 8240 2851 871 4988 9084 9089 3162 1167 8244 5227 6461 2831 776 5010 5770 5282 3574 5102 1278 2281 5455 4628 4663 9119 7487 8746 4889 1175 102 2386 8940 5566 53 8833 1918 321 6786 6861 4358 2771 7467 975 4777 605 3543 2600 7584 9299 4530 7328 183 4761 7543 304 1196 4623 5519 1953 533 5989 7590 7428 6346 6162 1946 6260 4405 5676 8924 7171 8409 1866 6379 3411 2387 3051 7398 154 1185 6442 6004 1611 2165 9018 8323 616 3995 8952 1533 7853 789 4991 3675 7456 5752 175 7556 4195 907 2248 8467 1017 7968 3304 1666 4942 3867 4802 6357 4621 887 6213 5261 1336 521 8928 7864 4792 6742 157 1593 823 7235 5303 5633 1100 8047 5993 1460 6714 1630 6440 6307 3608 292 5974 8301 8342 2720 4583 2757 7315 833 4466 4236 1282 5273 2149 2380 8119 7167 5076 3596 2650 8980 3421 1356 1954 7823 1172 2226 1941 6136 7274 2256 4928 324 4410 4579 1061 7113 486 862 6956 2873 1465 6113 8225 8512 6806 272 6008 1241 88 5662 3555 689 8733 2812 7453 6282 420 2471 4477 7495 1445 594 6939 1564 8704 8590 7992 7374 5796 9298 4213 5713 5864 326 5513 402 464 608 1951 8640 3347 3459 4162 2690 7478 5856 5240 3022 602 5547 1798 1345 9276 599 3673 3277 1635 8625 1567 5928 636 5671 2896 3477 412 7575 4201 685 4760 1229 4275 8960 3123 4471 5941 3355 3999 7157 6354 6850 8783 1943 6769 7330 8721 8477 1381 848 778 6408 2644 5817 1441 1723 2144 2776 2368 367 8839 8749 5353 3148 9114 1233 9228 8857 2895 1286 200 6755 5125 5857 1657 7658 5000 942 7020 586 784 7078 6194 8658 8957 9325 1851 8911 7004 1186 8824 2999 561 7639 4316 5086 3187 7912 2624 9183 8487 5089 8475 7554 4031 6297 6059 5329 115 2058 7650 7121 2485 7805 2241 7713 4352 2409 1026 2745 4549 5124 5201 6556 6617 9091 3945 8402 5648 5257 4901 7750 6131 6027 6352 4625 1254 5498 3720 8261 3939 5576 3685 6713 8472 991 8354 5655 5997 1029 7506 2575 2990 4898 7402 3290 5388 6715 8235 5361 4970 1363 3338 9014 5358 635 1193 3705 6334 7666 5270 6368 8604 3564 1937 2481 1341 721 2100 3958 6551 3813 2592 7980 2357 8761 8910 8693 1204 489 4827 8024 7832 3895 9068 8067 1708 1111 8963 1902 9251 5719 9143 5537 9169 5365 1840 485 4456 1169 3271 6886 9140 7173 6003 1659 1807 8371 2439 274 3448 6623 347 2103 3400 2106 9073 8169 3687 3305 4416 8454 6635 332 2433 1944 6509 7770 1880 6610 9331 302 418 4219 1333 2350 8424 4883 6580 6722 1669 8470 2571 513 3810 7049 6332 7363 3532 8456 2097 297 8841 7180 714 1587 5234 7372 660 8503 1668 8847 1101 7275 3336 6460 722 7782 3947 502 4258 2132 1835 181 3841 427 3446 2551 8324 6963 4284 7297 7577 3399 9148 8213 5656 851 657 2446 6992 976 1108 2681 3237 8582 377 5969 5287 9209 8523 7178 7833 6175 2126 3023 5090 7491 6640 6077 2221 2780 1694 4094 144 3203 7123 749 3625 3848 980 2270 7819 3672 7689 7203 2718 1714 3802 3851 4224 7237 7998 7207 4106 9036 1046 5070 4592 6056 693 1328 3309 2629 2736 202 388 7886 4417 8786 8822 4035 5505 1192 4388 8941 5019 7538 6732 6389 5923 1405 3278 3917 1688 8374 443 4037 9099 5190 4177 9310 7747 4348 7197 4844 4998 5609 4345 29 3332 8648 4107 346 2577 3941 1215 8252 4706 2675 3790 7459 6164 1149 6687 582 3139 3882 4034 1861 4701 8757 8801 1823 4528 4789 143 4746 9234 3866 9245 1911 1366 4393 2061 1959 6967 3138 7382 6237 845 80 6911 7163 5229 4736 8738 33 8543 357 3193 7262 4448 6793 3321 7569 6411 7692 7340 1417 5847 3836 2678 1188 8727 8615 7417 5771 3170 8061 2935 8263 8257 6883 1276 1239 812 6258 3922 8117 3039 603 8554 7573 2787 3445 5115 3478 962 3961 6570 7722 216 2797 5154 2530 4904 2405 7542 4021 3252 5370 9302 236 4532 1361 3373 1716 2183 1583 3783 868 1687 8925 6198 8208 6367 7603 882 3469 1645 7654 1176 4231 150 7997 5456 7031 4375 8840 5634 6945 705 4774 3822 7148 1922 8459 6249 8713 6197 8599 6071 6756 1634 950 5640 7749 5920 6622 4783 7837 7479 7229 3919 1797 5272 8945 4908 5439 6903 5833 6930 8197 9261 1711 5483 4285 8852 7409 8971 7534 7792 2444 7496 8063 1665 248 3894 4585 66 4850 1240 7511 7524 9258 2075 3979 4714 7592 965 2919 1842 8013 4750 2344 6155 3468 31 2087 1599 1573 5883 7613 195 3749 644 2189 8779 8743 9005 8081 1040 7785 5820 8830 5495 4867 2710 491 7153 6217 4741 1761 5484 5474 6916 7252 1739 8930 6647 5198 4903 8488 7366 2774 2726 2385 7625 3179 8845 6600 399 6810 3447 6684 4915 8368 1867 2325 2101 1335 7734 7437 7025 4000 6897 1408 7154 5013 2204 9233 3817 1877 9161 2197 3390 280 1892 1612 7753 2801 7246 7909 6229 9314 8407 1436 3879 6432 5326 5327 8535 7910 7745 5545 7916 207 1783 6158 8517 7361 8070 6430 119 6146 4183 1083 7385 4497 9133 1686 3765 595 8046 4418 4043 2361 7915 9149 1717 1141 6375 1018 5602 1262 7485 9178 6629 3339 8934 4648 7988 6252 3440 864 5418 3874 7280 6191 8388 4323 6792 2232 7228 8684 7813 6187 6678 3177 3534 4953 4402 7739 6319 2414 8700 5946 8238 6917 4167 4618 2268 3081 1247 4001 8580 7636 3101 2195 1559 3714 7188 6028 7530 2828 1977 3238 2340 110 3247 7532 7541 924 1632 4487 6447 4944 6347 2285 8087 5452 91 1166 162 5185 7933 4743 1627 7259 8620 8207 5845 9011 5525 4269 4700 1824 8186 8872 8299 3957 8242 4558 6439 2666 6958 8112 5121 8806 6170 7688 3486 2082 7436 2778 1096 786 2206 5170 1443 6030 3312 9151 8485 6404 8498 2883 8961 2280 8341 2809 2445 809 8298 8643 8316 6853 1572 3215 3938 2249 6515 1337 8328 7712 1429 4117 5441 3230 4152 7225 3513 6953 1507 348 3639 5739 2673 1550 6301 1652 8453 204 6833 2200 5217 1854 4711 7368 4572 4032 7531 1013 3634 2875 6058 8307 7609 1766 904 667 5410 6578 3601 1664 3233 7390 8178 4486 4427 4876 9166 2772 6295 5001 5296 3371 6518 6327 854 8288 1912 5927 6202 5814 9032 1059 3214 6547 7038 5781 4390 6114 1622 4318 5803 5984 736 3561 6554 5045 4277 7386 9081 8462 2034 4955 2701 932 7758 7176 9205 3077 3803 3562 8054 7946 295 1843 7728 1629 7768 2971 431 9285 2513 1116 3656 4529 5758 6339 8398 816 4153 2536 1826 7870 8113 7730 7101 6555 9256 6774 1072 4578 2598 3604 5880 861 3350 3117 4685 4334 5165 7224 4066 4253 4447 3815 5038 253 3658 330 3967 6443 2143 7336 6135 2734 8390 4655 7800 1399 1173 5618 2822 4431 2443 1568 3909 1974 2496 4772 5164 2138 2864 3799 3924 4882 8245 1585 5528 5692 5730 5832 137 3175 2894 2062 2752 4028 2113 5411 2647 730 3758 1667 9303 6653 3698 3968 3053 503 2150 4645 2257 4627 8303 7966 8742 4692 5901 8547 2277 5546 986 370 4697 8712 4804 1182 6650 7290 3487 2814 5668 7567 5333 4164 3084 8896 3888 6537 17 6882 3531 704 1037 8866 5263 6758 3762 1393 3824 5112 214 1439 5700 8932 1306 5011 6928 5173 4098 1132 7352 4778 7723 1368 2390 670 2685 5855 1772 6380 3853 940 5424 6091 1748 5297 6572 8877 6874 430 5041 5267 7448 620 9112 4294 1432 72 130 7920 4597 6614 8889 3697 1895 3462 2616 4791 7846 8372 428 6559 8326 9211 1525 5980 7888 3331 8118 7899 615 7377 791 5930 6627 8322 1138 770 8460 5100 8274 8350 6316 2893 7594 9236 5082 8150 1986 1909 8902 2145 3617 3501 7 2426 5056 8016 2702 5360 8135 8385 8378 8018 8574 720 8893 3021 1978 4782 1816 2083 4051 1446 5870 9097 8006 4222 8287 686 1377 611 8153 4808 1536 679 4096 3891 4884 432 4615 8988 5560 3451 5589 3514 6169 1414 3244 1490 7100 3588 690 7317 4171 2266 6800 2793 5151 6977 8188 8752 5815 5116 263 3311 289 3392 5755 1022 5548 9319 8937 6011 7632 5328 4141 5407 520 7305 526 3645 1859 2520 3523 8629 7304 8881 3076 4005 8329 2205 2214 6925 8691 4136 8883 974 7952 3965 5887 7964 7189 2406 2783 8086 405 6568 5147 2021 4727 7674 1600 5078 2949 6624 6541 8986 5740 8500 3591 4434 398 983 7544 1478 4570 6012 465 9330 7206 808 8737 2356 4959 8812 3599 1420 1721 5897 8422 2 4023 2739 3619 8797 5496 8951 8181 6893 9254 1809 5682 4309 6929 2742 5988 3363 4493 8434 4210 1503 1876 5094 4600 4936 4798 3933 5216 646 3098 8773 4076 5335 3746 3327 47 4602 8636 4129 363 6417 7416 9025 4377 4766 2779 4151 9046 7860 3154 3476 7620 2052 1752 7199 4412 8882 2463 339 56 4821 7555 6558 1905 5258 4205 3580 6735 1023 4511 3850 161 7395 2532 3349 7055 7387 758 1907 3006 659 815 1961 6902 7668 4708 1904 4433 5159 6816 8664 6918 1016 6513 7314 7480 9313 716 3395 6843 918 4329 8593 3404 5212 837 480 8524 1342 7414 288 8863 3352 1628 135 3314 2181 8650 5915 8078 6812 1375 906 5635 7126 1387 7458 6119 5591 3795 1531 95 1960 7522 898 4921 2623 6268 7063 1326 9075 2505 7400 1284 2951 747 6466 1357 6493 7320 5892 576 5107 5559 97 2583 6361 8843 3509 7892 6086 1476 4612 4267 9094 7050 6048 8382 2227 284 2898 3221 2353 2157 5990 5810 3581 7279 6188 7859 3549 5539 2022 630 2500 5111 6561 5127 5569 6123 1338 8605 3491 4187 8220 7334 9213 3067 6997 2853 4735 4372 5954 6662 2207 973 3361 960 6350 7431 8076 1129 750 7194 2300 6590 5893 6889 3125 8788 7286 3472 8164 7693 1469 5563 4773 3210 6324 3113 9070 3638 7551 2541 3506 5138 4069 7198 7560 3306 6100 2932 1741 14 4672 7564 8748 8874 3804 3678 2610 1358 42 5176 9326 8464 1038 2993 3017 9072 32 4809 4364 2808 4125 152 7299 5431 6178 793 9120 8410 4963 772 6954 3014 6881 286 553 1948 6398 6255 3057 8646 6176 2700 5663 6683 1281 6013 8799 7635 9289 1885 442 2225 6294 5054 2674 7884 8730 8216 4203 1488 7111 3623 7950 1971 3248 2900 1553 472 3865 7796 6937 4591 8098 5208 294 5627 5691 5687 7149 4879 3624 7005 2773 3112 9185 1633 7830 5101 8707 8469 4678 4860 700 5527 9194 2794 5068 1177 4282 6492 5859 5029 5123 522 5048 7230 2104 6642 6731 2717 5149 2043 9059 5277 844 5515 6706 3651 9105 7671 2880 3607 6410 2508 8463 2394 1916 1125 5343 3322 5307 4547 1589 8478 8899 2955 8028 4058 2781 8715 1272 4474 4863 4367 49 8844 5605 8671 6743 4281 1874 2626 2516 258 5249 6186 7958 5432 3801 6288 4732 9121 7558 6819 7508 584 215 5036 4261 8978 5228 647 4657 2591 5931 5088 9204 929 4381 5421 2965 5050 6495 5033 4799 959 1232 5811 317 7705 3842 2178 7187 1373 7112 2694 8627 8493 3991 7441 6308 6462 3406 7673 8660 2902 752 1025 849 7682 6982 6652 3612 298 5148 4873 3414 1693 1458 327 2016 5002 6768 7016 5583 3270 8232 7158 7981 4676 4675 2164 8360 6709 8143 365 4062 4527 7928 9009 6228 5818 2533 9305 8887 55 2507 8870 6649 5158 76 5595 6693 5306 8666 3020 7527 3082 6304 1591 6145 6868 7205 9107 1165 6773 172 1993 4176 8400 4611 7589 5386 6095 6335 1561 5963 7393 3681 2037 4968 7451 3360 7466 8361 4455 4064 5422 1689 3977 7269 362 4178 4145 6127 5162 2399 9225 7068 794 1348 7736 444 6081 5298 2026 2543 9087 7425 3730 8468 2641 7529 1720 6377 5851 7956 3150 3785 6485 3611 2869 8510 4775 4463 1251 9124 6873 3391 4118 7051 3213 3668 5347 8452 6289 5840 478 3522 453 3376 6190 3342 2237 2870 5178 5567 5952 6919 3005 134 3397 8539 6822 5264 3288 5962 8421 6744 8608 4656 1802 4271 1043 8211 2196 5260 3789 7211 7571 7834 5680 2047 5502 3369 3437 3286 5517 3912 1442 6961 2191 2417 9088 5155 6813 4520 7375 1224 811 1891 3748 4123 2789 5305 8419 7248 9237 992 4038 4499 2060 850 2669 7612 9290 2526 1287 4160 4633 7125 742 4534 2407 4555 8764 4722 7721 3205 6657 1214 3754 6080 4593 3018 8792 2294 4450 7701 127 7069 6243 8025 4010 8632 4715 5284 4574 726 4252 4561 7354 299 6088 1090 5012 5684 3489 4888 1584 1969 4846 2915 6804 2775 7306 9306 5231 7740 4283 953 6725 8290 1504 1539 8885 138 3764 1256 257 335 7060 5986 9323 4740 8994 4140 6807 8254 3963 9297 2102 9207 4910 8709 4411 1672 457 8037 4932 3679 2362 8592 495 1608 2155 7411 2881 9244 37 6535 8219 4505 8635 1928 8384 2570 8996 7610 2128 8728 6656 6681 2070 176 9062 514 1796 4039 6838 2462 230 569 5521 4637 4939 4420 672 3807 447 1656 3297 8858 2118 6309 1926 481 1509 1228 1787 5978 8678 3951 2929 4980 5039 4713 7002 151 5536 8148 3823 2299 142 7067 2372 3761 9 2265 5747 2764 724 2913 3151 4525 6370 4247 5494 629 3621 7371 1999 6704 3734 2698 4691 6938 8415 6353 6750 9077 2679 2478 7321 6611 4007 5772 6416 2264 8348 2672 6546 754 6934 8546 4404 592 4748 6625 7944 2377 6 8929 8275 4524 3660 8710 419 6878 8313 7460 8753 2917 6891 6663 4918 7129 396 7256 3500 631 5585 8343 2695 6168 6292 3176 5092 5160 3701 9021 7221 1216 1438 3471 2318 8923 6223 2182 7621 8514 9010 8987 1252 1972 1872 1715 8205 6463 8138 8989 5661 2890 565 2427 8946 1303 3718 6000 3620 5276 9260 1467 6173 7641 7520 5061 4677 5757 4400 2620 2719 8995 2079 1683 8141 7754 5744 2952 7568 7457 5368 1510 1513 3072 1456 9164 3163 3035 6111 5042 7161 1401 1084 8000 8531 5404 6550 8379 9141 8681 7752 6394 7011 3739 8253 978 4771 6024 4828 7959 1649 1727 7073 8349 6952 661 7283 3159 2590 3496 8741 3969 2956 4565 920 1830 8558 1930 6677 6825 8256 7454 4710 1768 3753 5292 1397 2733 946 6711 3242 4929 5006 3202 2295 2746 1293 2124 5405 4065 818 7464 1820 1312 6994 6920 261 987 6120 3109 2986 4338 7774 5122 1364 8969 6712 8161 7595 5940 1566 6419 4432 6047 4749 6076 1161 8217 674 8494 3688 2447 4704 969 7477 1160 3243 4979 9288 6860 1662 6171 225 5143 313 8327 3385 7626 3103 4401 6794 5600 5043 7664 6830 4452 3980 5875 4635 5756 3329 1751 8108 4817 1989 1237 1893 2848 8875 4981 5417 4134 877 6688 3545 4943 5615 2476 1684 7396 1171 3415 3644 340 6630 8284 3256 7240 5371 3405 2108 6360 1734 5612 8638 2343 1103 6809 3055 188 8031 3124 3683 4537 988 2297 4893 839 4467 5195 4041 6457 4441 6472 4912 6884 5922 7014 1660 1595 6752 4554 1292 2709 3800 1980 8775 6392 6263 7214 5219 282 309 6685 6311 4092 18 7570 5543 4081 2515 6278 8690 5294 6184 5215 9130 6720 250 7250 639 3567 7841 2636 4067 8446 5703 8609 2586 7695 1253 6701 7930 6317 5921 7719 8501 7312 4110 6219 4552 5059 4088 7975 9132 6054 692 3412 4079 6950 5281 8321 3877 7614 4188 2223 239 4745 6875 7096 5571 4403 2640 1845 6690 1825 4157 314 4682 8825 8093 7215 6465 99 8077 4206 366 1208 6043 4640 5475 4985 1351 3090 5625 7307 8466 2003 8854 218 1500 2293 1847 5032 2147 866 3710 2552 1749 6692 3926 4112 6458 735 9171 60 9304 6726 2630 2882 1178 1151 4922 4662 173 7233 1776 4113 2423 2425 4343 970 6372 1009 6607 3068 8435 6423 3126 4813 1709 1201 7104 5620 3932 3366 5023 5079 627 290 779 5572 5233 1392 4975 8534 8210 2269 2475 2562 905 4546 267 3536 8538 449 101 7367 2722 4605 7356 6781 8537 8697 6820 8340 8926 2349 2259 6545 8100 8395 2258 2911 3946 1406 8683 8296 5579 2177 8264 1425 957 3647 515 5342 8363 2449 1001 2937 3452 5574 4319 9184 8381 945 6876 600 5714 4871 8532 8856 392 2018 369 5711 9230 5304 7266 1681 7829 2309 4683 8938 2255 6159 3207 4651 2029 4341 5106 5794 9024 4712 2434 7151 7359 6431 1290 5918 8705 5554 8876 7415 6290 5373 3805 2950 2331 6772 8997 6576 2307 8515 4033 3428 6487 6595 45 5792 333 2383 3388 666 460 943 364 8223 8221 637 6218 4108 5381 4649 5096 1614 8768 5095 3809 5030 984 3538 5120 2498 5222 5613 5486 241 5707 9227 4109 7771 728 3671 9327 1230 9270 1070 8565 4769 7056 5654 1793 5956 7883 1362 5479 8769 8821 8320 1901 1994 2461 5552 389 2839 6467 2762 4763 3499 1487 7599 4488 3241 8272 1131 4496 7006 7265 4897 2747 6618 5291 4563 1939 6369 8548 5526 9030 5349 8433 1477 4265 9200 3878 462 6846 4806 3519 6798 5464 5179 546 6044 8114 7216 6276 1495 494 8146 5434 856 8403 8071 5544 3337 1546 2824 1718 6009 2042 251 3330 192 3797 394 7814 7699 4659 4689 4156 7903 9054 7332 7811 1119 5531 6782 5210 8412 2633 7924 4624 8314 5666 3240 2310 4262 8160 4553 8196 2661 7213 7455 7399 870 1227 1226 781 937 6343 2578 2892 2792 5696 6865 6455 8312 5193 6026 5251 3787 4460 4687 7923 1140 9106 796 2482 9170 8695 2749 6734 4825 114 827 390 7611 7484 1249 7727 955 579 3629 8915 2958 885 7227 1424 4810 4604 1535 774 7518 5428 8233 2645 2167 6484 3855 1502 4861 2333 2973 4829 1906 3966 476 9023 6960 3483 2748 5891 8174 7702 8948 5324 4396 1605 2823 7348 7347 5933 310 9082 916 203 4239 5976 6200 6435 4425 787 1121 6034 39 3104 5961 5507 5785 1463 7339 1575 7801 5445 8283 5951 6995 999 5163 6023 6536 5850 3524 3528 4508 6674 2939 8227 4598 7550 8495 8622 1152 4538 1318 739 8202 1552 5236 3576 4699 9238 1879 433 5587 1678 8552 6445 7971 6880 7476 7282 7271 6489 8091 9287 7351 1765 5286 6921 542 1762 8553 4987 894 3622 7855 92 3131 4811 6517 4510 733 4954 1360 5669 2842 8107 5646 5968 1827 7709 8521 5807 5321 9239 5501 3745 4437 1586 5265 7917 1607 6074 7061 1580 8694 8461 4573 618 9173 5243 435 8770 2421 7450 3870 8308 2605 2934 9240 6887 4512 1198 7585 7691 7738 2843 8423 6971 7854 86 9128 4298 622 6579 2203 7716 1265 1174 7380 623 8936 4306 8082 4312 8661 5753 7243 2768 8155 85 4143 3047 8479 7809 2833 5555 7578 1637 1936 8130 5549 8062 7143 5522 8966 5614 8105 8719 7655 7502 8268 5760 6695 5565 7615 9226 4870 4507 3160 4835 1598 4422 5248 7867 1078 5015 6660 1676 6391 5351 7184 6280 5936 6124 1327 2906 269 8292 8809 5167 8142 8204 2713 1910 2930 2494 5592 7384 7726 5727 1735 5710 5518 2491 1410 4989 5183 8777 6562 4947 3692 384 1097 5209 3723 7272 6895 2459 543 8621 5394 6211 2074 1511 2524 7776 5055 7191 6207 7922 281 8436 2918 3141 4800 6323 7631 8903 3735 5301 3975 2800 7963 105 1920 7391 4909 1754 4816 5145 5139 5268 9317 8631 4346 7318 136 3993 1220 2151 308 7483 3071 1339 3777 8191 5378 7087 1056 7465 5608 6564 2754 2687 1596 5376 1512 566 6382 1757 8035 2296 4264 1053 4716 8518 254 6253 7132 8557 3490 9267 5473 2412 7539 7136 6670 891 1323 1217 2879 9118 1259 2317 7033 2467 6665 6244 2180 2140 7098 4150 547 4307 1725 2737 8549 8195 1245 6286 935 1756 1701 1626 7379 3492 3717 5802 2817 1234 1005 4101 21 2576 4650 3381 1030 2844 1641 936 2729 6469 8913 5994 341 4083 5152 3380 8739 6615 3829 164 7927 4779 4216 8528 3641 4606 2769 6970 8850 4971 5489 2008 4564 8682 7784 5768 9252 901 438 3577 2765 5904 664 3348 6298 3602 2502 8617 7684 5805 4126 2451 6906 7234 9243 3778 1087 9053 5026 2504 5283 2820 4242 797 3925 1383 8750 7861 1403 6973 7617 3065 5395 4347 8144 2688 6527 8597 8673 7327 6331 1422 7115 244 7013 2092 54 7970 5742 4823 8588 2938 3060 4149 2375 6616 8803 1555 4369 1380 3011 6144 3367 7370 1995 2602 985 8785 8480 9125 1927 3269 3771 1032 7378 5726 2731 2020 6727 8793 523 6036 58 7993 5512 5049 2721 8482 673 7937 1168 4472 8247 7287 9017 6421 9190 3584 1819 1792 2810 6033 6749 7677 981 7160 4726 1886 7845 6975 7422 4613 4501 2569 4263 3206 4133 2420 3706 8894 2263 5774 4925 9180 8888 2945 2091 1873 6303 729 2156 3267 1860 6597 4930 5253 938 580 5825 166 8198 6892 8701 74 7094 8954 3156 6140 4279 2229 5466 8413 7105 8192 2632 7638 9308 8530 832 4643 2201 3268 4322 6510 2967 262 403 1258 8828 5838 8529 2788 237 3838 1291 4056 5628 7281 6476 7935 2850 6041 2013 4016 4576 5312 6827 6321 8669 830 1519 2750 6106 6993 6235 5899 7313 5331 4371 7086 8600 2660 5409 3465 5499 6231 5745 1801 5337 4468 1451 4192 1275 1114 4960 8860 3900 6468 1505 8868 5588 3858 1947 2565 1472 243 6583 7085 5374 4291 4426 492 2311 8305 3662 8780 7488 3890 5005 4680 7358 9116 4397 5999 7902 83 3566 2134 8942 4767 6601 1745 5736 5254 8017 4015 7690 3798 8947 1067 7945 590 2547 2535 64 2053 5359 2493 6669 7473 6147 7175 6983 5196 745 2657 3497 697 3161 7528 2239 5991 3201 7681 5189 2959 2044 8917 2046 6313 6333 5318 4301 2213 2933 4121 3903 4392 7889 5323 1055 707 3857 518 6078 5134 6645 9138 1592 680 4446 7943 3461 3887 5601 2321 6621 558 4914 913 5637 6453 8511 4531 1218 5508 2603 6802 8426 8297 2947 5971 6552 5262 5935 782 7435 8357 6139 1136 5008 3585 3627 5356 2997 2347 881 4849 8808 8351 4017 2010 6836 4391 3630 3712 2969 5238 4333 2301 4406 1236 1050 1864 8408 8251 8795 5879 3365 7481 8206 2452 1767 8859 124 3948 4444 8962 4438 5003 8428 3105 5117 1095 8755 7881 3097 4877 155 1917 2455 6042 337 6724 6045 8483 7135 2242 4566 1679 834 1746 795 3548 2314 2036 4046 9129 7084 5091 2413 8170 5775 1817 529 813 2916 5130 126 1243 2370 4831 9122 3010 5104 2613 6761 5340 3512 6283 2346 653 6121 2615 7421 1869 1002 8834 2991 8992 632 1093 4543 645 2352 4115 373 1483 6966 8598 3896 3434 5987 8318 1815 1223 1548 6885 5073 6330 2573 1369 4095 1431 2185 5766 1301 7258 8048 7598 2847 1996 2378 8561 743 6381 271 1956 7439 7134 6636 5804 1858 6214 4730 8536 1203 3118 9202 1875 5885 168 5898 4014 4186 3346 3041 5558 9296 8157 4339 3234 2604 6803 5387 5590 125 2173 8012 8005 4858 651 372 378 8366 6299 1449 7793 8541 3235 8043 3086 3983 6949 4690 6494 8406 7408 350 7021 8224 7044 7662 6697 7679 169 528 7029 2790 7432 7602 8333 1582 1378 482 9279 8015 4514 3542 628 5053 6699 6227 2094 1621 847 3598 2728 7276 6620 8345 4278 4059 9058 4173 8134 1997 3182 3224 8129 5109 4494 189 7640 180 2963 1123 5593 3263 4185 7140 8990 6320 9275 4601 4854 5907 1135 8083 5964 7788 1992 8069 9174 6160 35 8572 2865 46 3952 6418 2510 5783 3816 2715 3930 2548 5204 708 7756 3825 777 3550 3929 5440 6751 7764 4070 7331 3743 9131 9206 3828 23 41 4197 234 5723 7622 8832 2169 5599 2976 5266 1967 90 822 2538 3169 6771 7442 498 4967 5580 7581 7680 4728 1115 1064 3106 6266 4415 9294 5597 7059 197 7218 6948 5690 1653 4485 4019 3370 919 1330 6085 2078 5427 4545 2435 8862 3633 8145 5221 1388 5913 8140 7471 7156 6989 1190 6832 2830 4387 3454 7469 2910 4526 5187 2410 9223 4681 1300 7407 6523 3616 6894 7253 4515 5874 5448 7137 7957 1130 3092 7054 3516 5797 1000 4336 9090 6403 7255 8919 6522 6760 8898 4803 374 8686 3985 7045 3475 6065 7991 1409 7851 6671 6090 5826 7857 1155 8964 1117 7072 6064 2497 4899 2397 3189 2369 5027 5754 8950 5617 8391 914 6264 279 6174 5184 3733 5278 2924 567 7994 352 8084 2148 2723 3359 70 1870 7708 220 3994 9013 3191 9220 4155 5717 1110 2198 785 5325 4770 4250 52 4634 9037 601 8036 7996 2483 7232 8675 8836 1279 5346 7676 6104 1515 4603 5607 5144 2628 68 440 3586 3083 4830 4378 7762 1134 4542 7850 6296 4011 8751 4776 7954 7102 5697 2032 5729 5017 6962 2051 1092 9019 2759 8581 8618 912 2382 4892 8447 8176 5491 5695 5504 1060 578 4320 2379 7649 8416 1613 5344 7512 7865 3037 6689 6557 1569 5955 3707 9168 8566 1775 5950 6943 7804 434 6179 1142 7947 6456 6291 5789 6538 9134 3049 5075 5161 1623 948 6302 6063 7516 117 506 3302 7146 355 1081 2827 1496 2574 6167 3183 4287 5482 7319 7277 3860 3443 3298 8364 3826 7254 2360 5093 7039 6325 2567 4443 559 2625 4228 8967 6405 1674 3936 4475 8556 8585 896 3713 6259 4297 6718 2392 2279 4927 1283 2860 7665 663 596 6293 6805 2811 7383 8306 8330 3153 2153 2618 2441 3615 8092 552 5285 8124 9247 5530 8175 6242 5660 3433 1610 1832 3892 3862 640 2127 4196 3495 7217 5206 4836 7759 800 4227 3699 9055 5665 6826 7463 9065 4720 5069 3453 3358 6532 5970 7921 4087 1547 3424 8040 7995 6787 9069 8716 2561 8199 1479 2767 7818 7145 604 7597 4896 9281 4666 185 7978 3059 9221 2135 1800 2974 1529 5948 446 4436 8672 3508 6208 5673 6998 5203 278 7041 9110 5853 8121 1764 3046 6575 4738 2228 7761 9322 7019 6931 6383 6762 283 3935 6785 471 8214 231 3844 5746 2011 7209 336 6433 756 9167 6741 3345 7685 4018 6682 9147 4790 5836 5906 676 3964 6362 3510 7510 2308 1806 5917 3387 5423 8900 147 3780 1696 9111 6783 6497 4104 3987 260 4616 2121 9283 1400 4670 2735 2096 6521 1423 4523 2243 6667 6990 3944 6915 6763 404 2691 1015 7092 7562 8624 2291 5934 5503 2326 2960 842 1963 5568 9050 3806 439 9154 6055 6451 7633 688 4354 8890 2813 2872 8102 6609 1497 8389 6449 1682 3594 5103 5812 863 3054 8079 2260 2027 3091 7687 6703 3557 2019 8427 2799 8182 6641 3168 2284 1934 6507 1658 3811 1774 7897 2238 2943 191 3869 3188 414 8072 7838 1382 4962 5363 4042 1983 4077 7429 4044 1109 1295 386 5481 3927 311 ================================================ FILE: lib/train/data_specs/got10k_vot_val_split.txt ================================================ 1349 5878 562 2202 8904 1501 8654 2975 2689 3680 5180 1900 7707 4723 8912 4029 3579 869 2888 8657 6599 741 4288 2244 7357 5704 8791 208 4805 8526 4887 8871 7468 3343 886 7794 2646 6454 6101 7885 7744 1297 4119 4856 122 2286 2925 5131 5843 5320 5626 540 1862 7335 699 7760 9198 3259 7345 8698 1280 6479 3100 3988 1322 5737 1268 3257 6791 3326 4815 7644 1082 2826 6821 8984 2553 5290 5909 4762 8096 8066 4325 6666 7193 7114 8060 7872 6788 3544 5460 3507 2509 6626 3429 5542 4220 2968 5271 3863 1868 5581 2012 6270 8038 4050 121 2845 1565 1998 2275 5524 6068 7624 4913 9277 1506 803 8848 5925 2450 2072 8190 4753 9162 825 7303 9028 2088 8516 1556 5937 7847 2367 7549 1049 1521 4739 3931 8958 4130 7876 897 5985 7346 7537 111 3700 1126 7896 3419 1051 5720 1068 3458 146 291 6256 5514 2857 4580 6239 6525 8717 391 4841 6676 4360 4211 73 1675 1987 4025 1321 662 8265 6424 2758 7765 7656 3209 7497 7600 9039 7697 5177 2983 5622 9295 3284 964 2024 1269 4551 8088 5659 2212 5199 5551 8607 5573 5200 7951 8429 7720 5919 1273 3529 6707 9176 7552 3255 5649 6110 1137 9272 788 5786 5186 2667 7630 3953 1828 8827 6471 7815 467 6387 3195 6238 6508 2373 5983 4931 2948 921 2438 517 3949 2137 3216 5683 3695 1719 4837 9159 6981 860 7410 5497 1770 5557 8810 5194 4857 9100 6329 2609 1925 3686 9041 4924 349 9187 3393 3661 7120 6858 4587 3831 3130 5060 6486 8023 824 1354 8861 5534 7292 4389 6029 6226 3505 4326 7445 581 6089 3450 7324 6516 6775 1207 4575 5135 3918 9020 3473 3898 7812 6571 6757 6639 2557 1206 6148 7325 8790 4938 7026 4383 8041 1250 7267 1952 7561 8811 4941 8373 4848 6602 8355 8104 5214 4330 3181 3422 456 1782 3408 6530 719 7587 3058 740 4207 5336 2798 2473 4221 1493 3281 171 9157 9139 7766 3324 5308 3708 2431 8080 2093 2585 406 7040 5064 5247 4758 6512 4257 4935 2705 2572 3436 8513 1385 2637 7091 2761 6007 6694 2422 4917 2186 6898 1390 6965 7698 2002 2692 7365 7373 4091 947 3962 8692 1788 6862 6856 1950 1914 5658 3635 1620 4780 2580 1454 2786 687 7238 3648 6452 1197 3190 5900 9043 4958 1821 1187 1153 7169 7350 5674 6254 3025 6680 1690 2899 3893 1577 5728 9189 5077 3560 2179 5462 1402 3654 1376 5506 1179 5647 4686 8644 1352 2855 6079 2254 2668 2287 2457 3418 7264 677 3074 2655 1042 2210 4504 8309 4209 4280 3258 2977 84 4705 1244 3511 6355 8813 3228 9266 1122 613 732 5202 8425 2638 6470 3541 8132 2063 5129 2818 7949 8090 4465 7295 5239 7009 9271 8563 2832 952 8136 6776 3565 5188 7288 6999 285 5487 7608 8584 2071 7868 2804 3655 6847 3276 4272 3910 1574 4559 7580 5014 8183 6386 7574 356 4937 2487 9315 7572 3040 671 2682 8626 3868 387 8679 4074 1481 3527 3595 4754 2453 1579 4638 9123 1829 3009 3691 763 4875 3572 4273 2777 6032 4793 233 7147 996 3199 8835 3517 7210 6125 6037 3684 3915 3180 7043 4458 2889 57 7667 8375 1434 7493 4733 5827 2111 1313 7986 3075 2614 7547 4977 8527 3212 7300 5842 5244 3291 597 1007 2030 227 3830 5540 247 5643 9333 1958 1371 5220 7926 2927 1516 7130 193 1522 6165 6923 3794 4223 5535 2472 8630 3971 9101 2946 4609 7291 8542 6501 7548 4557 6274 5226 7309 1317 6275 1099 4191 7270 5392 2316 3819 1670 8045 4807 8864 2391 5908 8338 8218 6400 9193 3165 843 6613 6941 5629 7557 4321 3702 681 1159 4665 5959 1697 5509 8774 7389 3832 3751 8637 1680 6841 703 684 8293 3682 5733 4818 3231 5562 9001 3889 7024 2519 1713 3287 219 8776 2289 7212 4832 4684 4617 4237 2649 8185 6326 3568 551 1426 8869 312 2905 4165 8248 2558 900 1044 8613 7743 5437 7604 3122 5708 8649 2878 4695 4491 7533 5223 7711 1844 5751 3008 8055 4636 61 198 2271 5698 4596 4500 5709 5819 7972 2992 1643 1048 6281 8886 360 4198 6814 3960 2606 7001 5888 450 7133 7015 7034 5153 8920 5066 469 1302 8816 463 8651 5869 6582 5578 1231 9274 7260 7751 8052 6799 2089 2342 8451 3260 5550 7795 2288 1205 40 496 8367 7836 5973 3908 5242 5062 2706 997 5419 9201 1965 6062 3050 5302 8735 358 2398 7470 1644 8179 7047 1549 5414 2539 7381 589 8166 8505 6035 3956 4540 6721 8074 1062 2384 2531 7159 3902 4584 2554 264 8720 2849 4916 5218 7202 883 4560 1677 4317 7863 4509 6577 2903 1452 1416 5369 473 6233 6359 5992 4934 8059 6834 4907 3320 8267 8280 2066 2402 1485 3772 3732 4764 9126 3575 5564 5641 1884 2330 1804 344 698 3089 1532 4454 761 8094 3432 6811 8722 8826 3222 8614 2901 7003 652 8663 4266 413 810 75 3334 4905 6438 4756 5137 6528 6534 6988 6177 8533 889 5384 7201 5132 7802 6864 3973 873 4840 1482 8376 3769 5858 6675 4286 2593 5863 4353 7817 7540 4999 4838 2303 7913 1508 7755 2784 4964 3431 6209 3755 6399 3954 455 5416 7591 245 140 9210 4084 967 7798 6795 7095 6733 3861 9264 1045 755 8042 7074 7778 6415 4724 6450 2049 1307 3485 1790 7869 3282 6907 3920 2868 5801 5632 5009 3955 7517 5128 3417 3019 1784 2312 2753 6976 342 8266 1849 2273 5037 7880 3793 7401 5412 8279 1257 3670 9049 3266 8955 6519 8916 2858 694 5650 4669 1785 3533 2704 8603 3726 6668 497 6815 6157 6646 6964 8097 5645 8481 8215 3775 2542 7514 5699 3518 3740 1404 8981 4086 6397 4204 6899 682 6589 4340 7424 9208 6504 4409 1 145 1882 4620 2634 4992 5453 3377 7875 530 1235 7605 504 1771 8489 345 7353 7797 7174 5914 2871 5721 6067 3582 5467 6234 691 8758 2122 1213 1492 1437 2187 1266 2395 7278 8491 5256 1554 8163 5966 7128 7904 1691 6272 3996 1706 1334 1316 6478 6935 1518 6700 8703 8744 8152 8778 5367 4218 9007 6312 606 7565 5293 2891 675 2120 826 7008 5705 7748 8010 1498 5330 5472 2215 7627 3016 6588 1850 4128 8569 6987 148 8151 8789 7907 8596 715 9060 3872 1750 5889 4047 5960 3120 3449 1421 1102 3333 9197 8796 8123 8007 2028 8404 1945 1985 8109 5380 3504 6739 4180 5835 4243 25 4002 1976 158 5181 4885 8985 11 6425 5926 7062 5083 8394 4259 5844 1990 3942 5532 2220 28 5957 149 6748 3559 7647 2566 1359 5259 7010 554 6005 8172 8125 1350 9051 1973 1386 159 7007 3220 1846 3093 4445 2056 8370 3211 4384 2231 273 642 5311 265 226 9012 7879 118 7109 7251 1760 8667 2876 7162 3552 6901 6779 5021 6524 4957 3114 4544 441 1848 2136 2458 8662 1127 5541 3026 1080 6780 2224 8259 1073 9000 7244 7977 500 4435 7376 7979 1435 9291 7704 3521 210 6269 8570 3285 8039 3546 6203 1183 6107 4147 2234 7185 3192 7155 2001 7777 876 944 908 7791 6784 65 9172 5675 3886 7891 2978 1008 5630 591 5067 1139 577 9015 574 8137 7786 5765 4900 4090 7842 ================================================ FILE: lib/train/data_specs/lasot_train_split.txt ================================================ airplane-10 airplane-11 airplane-12 airplane-14 airplane-16 airplane-17 airplane-18 airplane-19 airplane-2 airplane-20 airplane-3 airplane-4 airplane-5 airplane-6 airplane-7 airplane-8 basketball-10 basketball-12 basketball-13 basketball-14 basketball-15 basketball-16 basketball-17 basketball-18 basketball-19 basketball-2 basketball-20 basketball-3 basketball-4 basketball-5 basketball-8 basketball-9 bear-1 bear-10 bear-11 bear-12 bear-13 bear-14 bear-15 bear-16 bear-18 bear-19 bear-20 bear-3 bear-5 bear-7 bear-8 bear-9 bicycle-1 bicycle-10 bicycle-11 bicycle-12 bicycle-13 bicycle-14 bicycle-15 bicycle-16 bicycle-17 bicycle-19 bicycle-20 bicycle-3 bicycle-4 bicycle-5 bicycle-6 bicycle-8 bird-1 bird-10 bird-11 bird-12 bird-13 bird-14 bird-16 bird-18 bird-19 bird-20 bird-4 bird-5 bird-6 bird-7 bird-8 bird-9 boat-1 boat-10 boat-11 boat-13 boat-14 boat-15 boat-16 boat-18 boat-19 boat-2 boat-20 boat-5 boat-6 boat-7 boat-8 boat-9 book-1 book-12 book-13 book-14 book-15 book-16 book-17 book-18 book-2 book-20 book-4 book-5 book-6 book-7 book-8 book-9 bottle-10 bottle-11 bottle-13 bottle-15 bottle-16 bottle-17 bottle-19 bottle-2 bottle-20 bottle-3 bottle-4 bottle-5 bottle-6 bottle-7 bottle-8 bottle-9 bus-1 bus-10 bus-11 bus-12 bus-13 bus-14 bus-15 bus-16 bus-18 bus-20 bus-3 bus-4 bus-6 bus-7 bus-8 bus-9 car-1 car-10 car-11 car-12 car-13 car-14 car-15 car-16 car-18 car-19 car-20 car-3 car-4 car-5 car-7 car-8 cat-10 cat-11 cat-12 cat-13 cat-14 cat-15 cat-16 cat-17 cat-19 cat-2 cat-4 cat-5 cat-6 cat-7 cat-8 cat-9 cattle-1 cattle-10 cattle-11 cattle-14 cattle-15 cattle-16 cattle-17 cattle-18 cattle-19 cattle-20 cattle-3 cattle-4 cattle-5 cattle-6 cattle-8 cattle-9 chameleon-1 chameleon-10 chameleon-12 chameleon-13 chameleon-14 chameleon-15 chameleon-16 chameleon-17 chameleon-18 chameleon-19 chameleon-2 chameleon-4 chameleon-5 chameleon-7 chameleon-8 chameleon-9 coin-1 coin-10 coin-11 coin-12 coin-13 coin-14 coin-15 coin-16 coin-17 coin-19 coin-2 coin-20 coin-4 coin-5 coin-8 coin-9 crab-1 crab-10 crab-11 crab-13 crab-14 crab-15 crab-16 crab-17 crab-19 crab-2 crab-20 crab-4 crab-5 crab-7 crab-8 crab-9 crocodile-1 crocodile-11 crocodile-12 crocodile-13 crocodile-15 crocodile-16 crocodile-17 crocodile-18 crocodile-19 crocodile-2 crocodile-20 crocodile-5 crocodile-6 crocodile-7 crocodile-8 crocodile-9 cup-10 cup-11 cup-12 cup-13 cup-14 cup-15 cup-16 cup-18 cup-19 cup-2 cup-20 cup-3 cup-5 cup-6 cup-8 cup-9 deer-1 deer-11 deer-12 deer-13 deer-15 deer-16 deer-17 deer-18 deer-19 deer-2 deer-20 deer-3 deer-5 deer-6 deer-7 deer-9 dog-10 dog-11 dog-12 dog-13 dog-14 dog-16 dog-17 dog-18 dog-2 dog-20 dog-3 dog-4 dog-5 dog-6 dog-8 dog-9 drone-1 drone-10 drone-11 drone-12 drone-14 drone-16 drone-17 drone-18 drone-19 drone-20 drone-3 drone-4 drone-5 drone-6 drone-8 drone-9 electricfan-11 electricfan-12 electricfan-13 electricfan-14 electricfan-15 electricfan-16 electricfan-17 electricfan-19 electricfan-2 electricfan-3 electricfan-4 electricfan-5 electricfan-6 electricfan-7 electricfan-8 electricfan-9 elephant-10 elephant-11 elephant-13 elephant-14 elephant-15 elephant-17 elephant-19 elephant-2 elephant-20 elephant-3 elephant-4 elephant-5 elephant-6 elephant-7 elephant-8 elephant-9 flag-1 flag-10 flag-11 flag-12 flag-13 flag-14 flag-15 flag-16 flag-17 flag-18 flag-19 flag-20 flag-4 flag-6 flag-7 flag-8 fox-1 fox-10 fox-11 fox-12 fox-13 fox-14 fox-15 fox-16 fox-17 fox-18 fox-19 fox-4 fox-6 fox-7 fox-8 fox-9 frog-1 frog-10 frog-11 frog-12 frog-13 frog-14 frog-15 frog-16 frog-17 frog-18 frog-19 frog-2 frog-5 frog-6 frog-7 frog-8 gametarget-10 gametarget-11 gametarget-12 gametarget-14 gametarget-15 gametarget-16 gametarget-17 gametarget-18 gametarget-19 gametarget-20 gametarget-3 gametarget-4 gametarget-5 gametarget-6 gametarget-8 gametarget-9 gecko-10 gecko-11 gecko-12 gecko-13 gecko-14 gecko-15 gecko-17 gecko-18 gecko-2 gecko-20 gecko-3 gecko-4 gecko-6 gecko-7 gecko-8 gecko-9 giraffe-1 giraffe-11 giraffe-12 giraffe-14 giraffe-16 giraffe-17 giraffe-18 giraffe-19 giraffe-20 giraffe-3 giraffe-4 giraffe-5 giraffe-6 giraffe-7 giraffe-8 giraffe-9 goldfish-1 goldfish-11 goldfish-12 goldfish-13 goldfish-14 goldfish-15 goldfish-16 goldfish-17 goldfish-18 goldfish-19 goldfish-2 goldfish-20 goldfish-4 goldfish-5 goldfish-6 goldfish-9 gorilla-1 gorilla-10 gorilla-11 gorilla-12 gorilla-14 gorilla-15 gorilla-16 gorilla-17 gorilla-18 gorilla-19 gorilla-2 gorilla-20 gorilla-3 gorilla-5 gorilla-7 gorilla-8 guitar-1 guitar-11 guitar-12 guitar-13 guitar-14 guitar-15 guitar-17 guitar-18 guitar-19 guitar-2 guitar-20 guitar-4 guitar-5 guitar-6 guitar-7 guitar-9 hand-1 hand-10 hand-11 hand-12 hand-13 hand-14 hand-15 hand-17 hand-18 hand-19 hand-20 hand-4 hand-5 hand-6 hand-7 hand-8 hat-10 hat-11 hat-12 hat-13 hat-14 hat-15 hat-16 hat-17 hat-19 hat-20 hat-3 hat-4 hat-6 hat-7 hat-8 hat-9 helmet-1 helmet-10 helmet-12 helmet-14 helmet-15 helmet-16 helmet-17 helmet-18 helmet-2 helmet-20 helmet-3 helmet-4 helmet-6 helmet-7 helmet-8 helmet-9 hippo-10 hippo-11 hippo-12 hippo-13 hippo-14 hippo-15 hippo-16 hippo-17 hippo-18 hippo-19 hippo-2 hippo-3 hippo-4 hippo-5 hippo-6 hippo-8 horse-10 horse-11 horse-13 horse-14 horse-16 horse-17 horse-18 horse-19 horse-2 horse-20 horse-3 horse-5 horse-6 horse-7 horse-8 horse-9 kangaroo-1 kangaroo-10 kangaroo-12 kangaroo-13 kangaroo-15 kangaroo-16 kangaroo-17 kangaroo-18 kangaroo-19 kangaroo-20 kangaroo-3 kangaroo-4 kangaroo-6 kangaroo-7 kangaroo-8 kangaroo-9 kite-1 kite-11 kite-12 kite-13 kite-14 kite-16 kite-17 kite-18 kite-19 kite-2 kite-20 kite-3 kite-5 kite-7 kite-8 kite-9 leopard-10 leopard-11 leopard-12 leopard-13 leopard-14 leopard-15 leopard-17 leopard-18 leopard-19 leopard-2 leopard-3 leopard-4 leopard-5 leopard-6 leopard-8 leopard-9 licenseplate-1 licenseplate-10 licenseplate-11 licenseplate-14 licenseplate-16 licenseplate-17 licenseplate-18 licenseplate-19 licenseplate-2 licenseplate-20 licenseplate-3 licenseplate-4 licenseplate-5 licenseplate-7 licenseplate-8 licenseplate-9 lion-10 lion-11 lion-13 lion-14 lion-15 lion-16 lion-17 lion-18 lion-19 lion-2 lion-3 lion-4 lion-6 lion-7 lion-8 lion-9 lizard-10 lizard-11 lizard-12 lizard-14 lizard-15 lizard-16 lizard-17 lizard-18 lizard-19 lizard-2 lizard-20 lizard-4 lizard-5 lizard-7 lizard-8 lizard-9 microphone-1 microphone-10 microphone-11 microphone-12 microphone-13 microphone-15 microphone-17 microphone-18 microphone-19 microphone-20 microphone-3 microphone-4 microphone-5 microphone-7 microphone-8 microphone-9 monkey-1 monkey-10 monkey-11 monkey-12 monkey-13 monkey-14 monkey-15 monkey-16 monkey-18 monkey-19 monkey-2 monkey-20 monkey-5 monkey-6 monkey-7 monkey-8 motorcycle-10 motorcycle-11 motorcycle-12 motorcycle-13 motorcycle-14 motorcycle-15 motorcycle-16 motorcycle-17 motorcycle-19 motorcycle-2 motorcycle-20 motorcycle-4 motorcycle-5 motorcycle-6 motorcycle-7 motorcycle-8 mouse-10 mouse-11 mouse-12 mouse-13 mouse-14 mouse-15 mouse-16 mouse-18 mouse-19 mouse-2 mouse-20 mouse-3 mouse-4 mouse-5 mouse-6 mouse-7 person-11 person-13 person-14 person-15 person-16 person-17 person-18 person-19 person-2 person-20 person-3 person-4 person-6 person-7 person-8 person-9 pig-1 pig-11 pig-12 pig-14 pig-15 pig-16 pig-17 pig-19 pig-20 pig-3 pig-4 pig-5 pig-6 pig-7 pig-8 pig-9 pool-1 pool-10 pool-11 pool-13 pool-14 pool-16 pool-17 pool-18 pool-19 pool-2 pool-20 pool-4 pool-5 pool-6 pool-8 pool-9 rabbit-1 rabbit-11 rabbit-12 rabbit-14 rabbit-15 rabbit-16 rabbit-18 rabbit-2 rabbit-20 rabbit-3 rabbit-4 rabbit-5 rabbit-6 rabbit-7 rabbit-8 rabbit-9 racing-1 racing-11 racing-12 racing-13 racing-14 racing-17 racing-18 racing-19 racing-2 racing-3 racing-4 racing-5 racing-6 racing-7 racing-8 racing-9 robot-10 robot-11 robot-12 robot-13 robot-14 robot-15 robot-16 robot-17 robot-18 robot-2 robot-20 robot-3 robot-4 robot-6 robot-7 robot-9 rubicCube-10 rubicCube-11 rubicCube-12 rubicCube-13 rubicCube-15 rubicCube-16 rubicCube-17 rubicCube-18 rubicCube-2 rubicCube-20 rubicCube-3 rubicCube-4 rubicCube-5 rubicCube-7 rubicCube-8 rubicCube-9 sepia-1 sepia-10 sepia-11 sepia-12 sepia-14 sepia-15 sepia-17 sepia-18 sepia-19 sepia-2 sepia-20 sepia-3 sepia-4 sepia-5 sepia-7 sepia-9 shark-1 shark-10 shark-11 shark-12 shark-13 shark-14 shark-15 shark-16 shark-17 shark-18 shark-19 shark-20 shark-4 shark-7 shark-8 shark-9 sheep-1 sheep-10 sheep-11 sheep-12 sheep-13 sheep-14 sheep-15 sheep-16 sheep-17 sheep-18 sheep-19 sheep-2 sheep-20 sheep-4 sheep-6 sheep-8 skateboard-1 skateboard-10 skateboard-11 skateboard-12 skateboard-13 skateboard-14 skateboard-15 skateboard-17 skateboard-18 skateboard-2 skateboard-20 skateboard-4 skateboard-5 skateboard-6 skateboard-7 skateboard-9 spider-1 spider-10 spider-11 spider-12 spider-13 spider-15 spider-17 spider-19 spider-2 spider-3 spider-4 spider-5 spider-6 spider-7 spider-8 spider-9 squirrel-1 squirrel-10 squirrel-12 squirrel-14 squirrel-15 squirrel-16 squirrel-17 squirrel-18 squirrel-2 squirrel-20 squirrel-3 squirrel-4 squirrel-5 squirrel-6 squirrel-7 squirrel-9 surfboard-1 surfboard-10 surfboard-11 surfboard-13 surfboard-14 surfboard-15 surfboard-16 surfboard-17 surfboard-18 surfboard-19 surfboard-2 surfboard-20 surfboard-3 surfboard-6 surfboard-7 surfboard-9 swing-1 swing-11 swing-12 swing-13 swing-15 swing-16 swing-18 swing-19 swing-2 swing-3 swing-4 swing-5 swing-6 swing-7 swing-8 swing-9 tank-1 tank-10 tank-11 tank-12 tank-13 tank-15 tank-17 tank-18 tank-19 tank-2 tank-20 tank-3 tank-4 tank-5 tank-7 tank-8 tiger-1 tiger-10 tiger-11 tiger-13 tiger-14 tiger-15 tiger-16 tiger-17 tiger-19 tiger-2 tiger-20 tiger-3 tiger-5 tiger-7 tiger-8 tiger-9 train-10 train-12 train-13 train-14 train-15 train-16 train-17 train-18 train-19 train-2 train-3 train-4 train-5 train-6 train-8 train-9 truck-1 truck-10 truck-11 truck-12 truck-13 truck-14 truck-15 truck-17 truck-18 truck-19 truck-2 truck-20 truck-4 truck-5 truck-8 truck-9 turtle-1 turtle-10 turtle-11 turtle-12 turtle-13 turtle-14 turtle-15 turtle-17 turtle-18 turtle-19 turtle-2 turtle-20 turtle-3 turtle-4 turtle-6 turtle-7 umbrella-1 umbrella-10 umbrella-11 umbrella-12 umbrella-13 umbrella-14 umbrella-15 umbrella-16 umbrella-18 umbrella-20 umbrella-3 umbrella-4 umbrella-5 umbrella-6 umbrella-7 umbrella-8 volleyball-10 volleyball-11 volleyball-12 volleyball-14 volleyball-15 volleyball-16 volleyball-17 volleyball-2 volleyball-20 volleyball-3 volleyball-4 volleyball-5 volleyball-6 volleyball-7 volleyball-8 volleyball-9 yoyo-1 yoyo-10 yoyo-11 yoyo-12 yoyo-13 yoyo-14 yoyo-16 yoyo-18 yoyo-2 yoyo-20 yoyo-3 yoyo-4 yoyo-5 yoyo-6 yoyo-8 yoyo-9 zebra-1 zebra-11 zebra-12 zebra-13 zebra-15 zebra-18 zebra-19 zebra-2 zebra-20 zebra-3 zebra-4 zebra-5 zebra-6 zebra-7 zebra-8 zebra-9 ================================================ FILE: lib/train/data_specs/trackingnet_classmap.txt ================================================ Nf1aqv5Fg5o_0 airplane AAB6lO-XiKE_0 person AACM71csS-Q_0 person AACM71csS-Q_1 person AARNQeeGCeM_1 person AARldOxX9Qc_0 bird AATSbTthMRo_1 person AAVQ--F7Bk8_7 bird AAVQ--F7Bk8_2 bird AAVQ--F7Bk8_8 bird AAWK6esRYaE_0 person AAWK6esRYaE_1 person AAjY2Ci68z8_0 person AA19zjGEPvg_1 bear AA28Bcp5cJ4_0 train ABBGULxaufw_0 person ABF8Qzi1y6k_1 bear ABIlEiPfEC4_0 bird ABJ_agLToOw_0 bird ABZMoeeFyek_0 bicycle ABny-jw1_S0_0 elephant ABrhnT3LRWs_2 cat ABxlnMGfo5c_0 umbrella AByCCGnybVU_1 person AB2MjrpRiEQ_0 horse AB-q-hxh9XQ_4 bus AB-q-hxh9XQ_1 bus AB-q-hxh9XQ_3 bus ACDuy9fWQCs_1 umbrella ACFxVnoXE2k_1 horse ACMvGMt8Neo_0 person ACM6PJWHfcM_0 person ACOGOPL4ZH0_1 person ACOGOPL4ZH0_0 person ACS5TtaAdG8_0 truck ACarEC5tuT8_0 truck ACiNZsAvVTE_0 person ACkYaVC9f9M_1 umbrella ACnQKLobnGE_4 airplane ACnQKLobnGE_5 airplane AC0Z4yw1hf0_0 person AC0Z4yw1hf0_1 person AC-10OYYnLM_1 person AC-10OYYnLM_0 person ADHNPU5iB_4_0 cat ADWpC6kDWFU_0 person ADiIG2D8pds_2 motorcycle ADiIG2D8pds_0 motorcycle ADi674XOuRY_0 dog ADn8ZdVYOcc_0 train ADn8ZdVYOcc_2 train AD1cVG81mpA_0 person AD4EACfWAIM_0 horse AD4EACfWAIM_1 horse AD531xkux4k_0 person AD7A6_o0Las_0 horse AEQT6XxEeT0_0 person AEQT6XxEeT0_1 person AESfphazWKA_0 person AESfphazWKA_1 person AEokTVMPd4A_0 person AEtwwIR9UkI_0 dog AE2TrzJHr2s_1 motorcycle AE3t_VNk3eo_0 person AE6G6W2CL9M_1 person AE7tEK8S9pk_0 bird AE7tEK8S9pk_3 bird AE-k9jcdaJk_1 giraffe AFLrK88FzTI_0 motorcycle AFOjy-9Kf-8_0 person AFSTw_O6inE_0 person AFSTw_O6inE_1 person AFT64SYoPTo_1 person AFeRUltwvNE_0 knife AFeRUltwvNE_2 knife AFf9I30fB6U_0 person AFkSCsJ_jeg_0 person AFkSCsJ_jeg_1 person AFnPp9mvoJs_0 horse AFpVfranYCA_1 knife AFrLubifeb4_0 airplane AFrLubifeb4_2 airplane AFsmSsZBS6I_1 person AFsmSsZBS6I_0 person AF0FDnfdpro_0 train AF0-2lDeBME_1 bird AF2bYjH_Q8c_0 person AF4nO1MeUis_1 train AGV9gZ6ePKk_0 airplane AGXVFK896Os_0 cow AGYehDNUqx0_1 airplane AGYehDNUqx0_0 airplane AGdqwMVGRoU_0 horse AGfcGfMXHPM_3 elephant AGsg2IV8FME_1 skateboard ZBPURFcpqDM_0 motorcycle ZBXAMWkamQk_2 knife ZBXAMWkamQk_1 knife ZBcCcSynS3Y_1 car ZBcTSnaCcqE_1 person ZBcTSnaCcqE_0 person ZBcjhADZaUk_0 bear ZBdz7fg01uE_0 umbrella ZBp5ICCzoK8_0 person ZBriZpPQR6Q_0 cat ZBvEIHeKcKg_2 zebra ZBvEIHeKcKg_9 zebra ZBvEIHeKcKg_0 zebra ZBvEIHeKcKg_1 zebra ZBvEIHeKcKg_3 zebra ZBvEIHeKcKg_4 zebra ZBvEIHeKcKg_5 zebra ZBvEIHeKcKg_6 zebra ZBvEIHeKcKg_7 zebra ZBvEIHeKcKg_8 zebra ZB0EfmbWfng_0 horse ZB0kV8Ni0e8_0 person ZB_pe6v1lVI_0 person ZB_pe6v1lVI_2 person ZCAOpABRfTI_10 elephant ZCAOpABRfTI_0 elephant ZCAOpABRfTI_3 elephant ZCAOpABRfTI_4 elephant ZCAOpABRfTI_6 elephant ZCAOpABRfTI_7 elephant ZCAOpABRfTI_8 elephant ZCFCltdIjeg_1 person ZCFCltdIjeg_0 person ZCGB4r_lWmY_0 horse ZCS_eyAufDo_0 person ZCTwXcewINc_0 cow ZCfqT4CDOYA_1 bird ZCgDbEHLsIg_0 person ZClABNZVqqw_1 person ZCmoG6WgVO4_1 person ZCmoG6WgVO4_0 person ZCnJ6weWtz8_1 person ZCnJ6weWtz8_0 person ZCnJ6weWtz8_2 person ZCzrSOZhkx8_1 person ZCzrSOZhkx8_2 person ZC3Y42jSG_0_0 person ZC5Jtr93Fc0_0 cat ZDDtjYsFrzY_0 motorcycle ZDMLHna_uZU_1 skateboard ZDMSLfnIpw0_0 person ZDS-TQTDheA_0 person ZDWUEeCoa0c_0 person ZDfRsMjEWrU_0 person ZDucdx9SldA_0 bicycle ZDwG7VWIZ2E_0 motorcycle ZDw-tgE8yQw_0 person ZEA5lDwY3hY_0 person ZERPmLuCNr0_1 skateboard ZEYyXBrvcIU_0 person ZEbxfeAOLec_1 motorcycle ZEdGptkowmk_2 cow ZEdsROg2ZAk_2 horse ZEgcTqeZxOk_1 person ZEiW5hvCQyM_0 bird ZE16Mis16oE_0 bus ZE3Vro7d4pA_0 cat ZE415SbIjYI_7 bird ZE5h8vmL_Vw_0 boat ZE6oeN8ZzDA_1 person ZE6oeN8ZzDA_0 person ZFKQ9r76HHU_1 elephant ZFKYTz9Jkhw_0 umbrella ZFSspVdQ_1M_0 person ZFSspVdQ_1M_1 person ZFe5vGzmYgY_0 bear ZFe5vGzmYgY_4 bear ZFfH8M8dMH8_5 bird ZFk9b7tQz1g_0 person ZFn422HSENU_2 airplane ZFw7fJO3h3U_0 motorcycle ZF2yE0Tm8D0_0 cow ZF5yV-qvHfg_0 bicycle ZF8rySXBivY_0 person ZF_u1UFqAvg_0 person ZGHtP6pLosk_0 person ZGT9Ky1jJ0E_0 horse ZGWqLNy2PDM_2 bird ZGeWYNFOH7U_0 person ZGhdqsb3kNA_0 car ZGhdqsb3kNA_3 car ZGhdqsb3kNA_1 car ZGkmBkelEBU_0 person ZGpMZT1HUiw_0 horse ZGsHiz0oPuw_0 bus ZGvfU-Fgk40_1 person ZGyWFwMmdbs_0 person ZG9dVnPGocw_0 person ZHDkDNgRSz0_0 train ZHFPykjdFAY_1 person ZHPeB20mRyI_0 cow ZHPeB20mRyI_1 cow ZHX1xXuU_Jw_0 person ZHlb-NoDPiE_1 elephant ZHlb-NoDPiE_2 elephant ZHlb-NoDPiE_4 elephant ZHl7b8RItn0_0 horse ZHnW6ge8wBc_0 cat ZHodaPFcFYU_0 person ZHovXJVH8xk_0 truck ZHpZ3CGHl44_0 person ZHrrW673jzQ_1 person ZHrrW673jzQ_0 person ZHrsTuxP7aI_1 horse ZHu6CNOlw3g_0 cow ZHu6CNOlw3g_1 cow ZHxx4jT0QY8_0 person ZH1tP4KBq4c_0 giraffe ZH5HXdNA_Vg_0 person ZH-X6nu5grI_33 horse ZH-X6nu5grI_2 horse ZH-X6nu5grI_3 horse ZH-X6nu5grI_6 horse ZH-X6nu5grI_7 horse ZH-X6nu5grI_8 horse ZH_6GNzE7AE_0 person ZIAnd6kIMac_0 bird ZIAnd6kIMac_1 bird ZICz-o8kLz0_0 skateboard AGx9YQ6C-6o_7 car AG1KXUn4YG0_0 person AG_bCNeWGbQ_0 elephant AHARpIfT490_0 dog AHIF--VOeQs_0 person AHJcPNPqKmI_0 horse AHKFqtjfRZA_2 bear AHLL47_EdEA_1 person AHLL47_EdEA_0 person AHNC2jifaeA_1 airplane AHQLEaBATbw_0 person AHQW1ru8IzY_0 airplane AHQrFFp5yq4_0 airplane AHiwgwMi8HU_0 dog AHjEWaIP4Us_0 cow AHkvSb7kMDQ_0 person AHn7KxEbpSw_0 person AHvhccaU6e0_0 bus AHx-m9m2WSM_0 person AIAtwCnT8D0_1 person AIBVp_3pm4U_1 person AIBVp_3pm4U_0 person AIFwUvUUIAU_1 person AIPKb-NMVjk_0 airplane AIPKb-NMVjk_3 airplane AIVpT8BRXaQ_1 horse AIYDjtWzamM_0 bear AIYDjtWzamM_1 bear AIZGolX95Do_0 person AIbvvs9Mppk_0 person AIduTWoo-tY_0 skateboard AIeFzUH7L38_1 train AIkHZuaZGZc_1 elephant AIkHZuaZGZc_2 elephant AIpwAHaTBsI_0 train AI00Hva5A8g_0 person AI38cuNcfsE_0 knife AI73dwp8OlI_1 train AJAy74dPvNA_0 person AJCXZxF7mEU_1 skateboard AJDMiWpRbdY_0 person AJILdTCo1mA_0 dog AJKXpUsj3I0_0 bird AJRdbCnFyVo_0 elephant AJTfeXepoNQ_0 bus AJZ65x_ashE_0 airplane AJaOK6nLWLU_0 person AJaOK6nLWLU_1 person AJaOK6nLWLU_2 person AJh6EhObuEU_0 person AJiQZJH_ZsU_0 bird AJiYw7-oCvA_1 knife AJiYw7-oCvA_2 knife AJiYw7-oCvA_0 knife AJkWw2b2Qjg_0 horse AJor90pfjM8_0 cow AJtuQLfNvSs_0 cat AKBoEjrtQwE_1 train AKDi2KVrR1Q_0 skateboard AKIcyYzL9C0_0 cat AKMl62ZFICw_3 bus AKMl62ZFICw_1 bus AKN6nvHB7P0_2 airplane AKN6nvHB7P0_3 airplane AKPDvaUNx94_1 horse AKPDvaUNx94_2 horse AKVUSpeg9Jk_0 knife AKxpzCrmsi8_0 bus AK4AJfDZfEo_0 cat AK64udGI1BA_0 umbrella AK8imx-InYk_1 horse AK8imx-InYk_2 horse AK_J57sNeeo_1 elephant AK_0-KHw9wc_1 horse ALCj6V-0pU8_0 person ALKBlOms7sk_0 truck ALLYkPepYRc_0 train ALRR_HHP500_0 person ALRzJ2FzEoY_0 person ALYKJChPG6k_0 knife ALjxXEqJFTg_0 train ALpnjTPWIN4_0 bird AL73oE_aovA_2 bicycle AL73oE_aovA_3 bicycle AMDjY36EpsU_0 truck AMEZhZVe7hk_0 person AMEZhZVe7hk_1 person AMI4Xu1mmNw_0 elephant AMZeyszxY78_0 knife AMn7aithVV8_0 car AMz8PhUkmpM_0 horse AMz8PhUkmpM_3 horse AMz8PhUkmpM_7 horse AMz8PhUkmpM_2 horse AMz8PhUkmpM_5 horse AM5_HQ705r4_1 giraffe AM6sweCILPU_0 airplane ANHdxFi36CM_1 bird ANNbcEcj8Do_0 person ANQZ1MB6gI4_0 skateboard ANVkluf6XZA_0 cat ANWtZTJoYYc_0 dog ANZDRJnX_Os_0 person ANlhuKqnObE_1 person ANlhuKqnObE_0 person ANmJ_3l01rw_2 horse ANmJ_3l01rw_3 horse ANmkxc2V7qQ_0 person ANufFQ7Fqao_0 car ANufFQ7Fqao_1 car ANvWNG7bZj0_0 person ANwXehjlmOU_0 giraffe ANwXehjlmOU_2 giraffe ANwXehjlmOU_6 giraffe ANwXehjlmOU_7 giraffe AOFbvqQZz1M_0 person AOJiO3o1Pgw_0 person AONi1Rhl0VI_2 person AONi1Rhl0VI_1 person AOmvm3OOZZQ_0 person AOn9I3GEHoU_0 person AOo1qXfZWsc_0 bus AOq0zSQhX1E_0 person AOq0zSQhX1E_1 person AO9zthhr-og_0 person AO9zthhr-og_1 person APAgxsDsZqs_0 person APCppiM1SL4_0 person APEd6F66jXU_1 airplane APHhGoshqFo_0 umbrella APIrIPchQwg_1 person APIrIPchQwg_0 person APJ4_CEV8HQ_0 bus APLJsXaOe1c_0 person APQ99QCF6pA_0 person APRuUBgcBZc_1 person APYAGnOjUQQ_0 person APa_Xoa9qgg_1 motorcycle APcliMIvBe4_2 person APcliMIvBe4_0 person APcliMIvBe4_1 person APp-0CsKxpY_1 person APp-0CsKxpY_0 person APqdtMhtWlU_0 motorcycle APtqUIS_Hyo_0 person APwqoNNZyaA_0 person APyVeEcEt1U_0 airplane APyxRCm1XlY_0 person AP5QrGcnGoU_0 cow AP_vNEBzhqM_0 person AQALHMjkeh0_1 giraffe AQKHDJ9HKck_0 dog AQNEkyvgbeA_1 cow AQRKvHpsUk8_0 person AQTk87BXkxk_0 person AQVhyDD8GEk_0 person AQVthZjIETQ_0 truck AQcg3TVkW1s_0 person AQcg3TVkW1s_1 person AQi0YSJ74cw_0 person AQj3enGQQeE_0 boat AQminPRA2W8_0 person AQtIgG8RHRY_0 person AQvltP0EarU_0 person AQy7gL42wfo_0 airplane AQzJp7Qi_yA_2 elephant AQzJp7Qi_yA_13 elephant AQ2bfY90nuU_0 person AQ7YDkmwB4M_0 dog ARAX6-JmsNQ_0 zebra ARAX6-JmsNQ_2 zebra ARFd2qxDhpQ_0 airplane ARNkmINZamQ_0 cow ARNkmINZamQ_1 cow AROrQJq2sWY_0 person ARRADkl3-30_0 person ARW5DipSrBo_0 dog ARmfFWE2ruc_0 person ARmsnBnMyPc_0 person ARnGZQm8zOM_0 truck ARqQUEVhu24_0 person ARrbFDLoy0Q_1 person ARtGNhHj2NU_0 cat ARyGQdkbuyM_0 person ARyGQdkbuyM_1 person ASBgE1svBKQ_0 person ASD516fNs3g_0 person ASExrIzixaM_0 truck ASc0m6oxXVI_0 person ASc0m6oxXVI_1 person ASm_mkHCybA_0 cat AS1xCm7MYs8_0 person AS1xCm7MYs8_1 person AS2tsNB9LBI_1 knife AS5hg_3pOXM_0 person AS9kBpj7qvE_0 person ATKytgCulZM_0 umbrella ATakdxmz3qU_0 car ATkJNKtd8yo_0 person ATk9e0fbxBk_0 horse ATk9e0fbxBk_1 horse ATk9e0fbxBk_2 horse AT1zSxV6stw_0 cat AT5urL0Fr0c_0 bird AUGQ4XFEkGY_3 knife AUI-RsDtk4s_0 person AUMHV6JiwU0_0 bird AUZevw68t_s_0 bear AUcOQ1L4Nj0_0 train AUfaVvy5QxU_0 train AUguk_8JO_U_0 skateboard AUgw-t2MrtU_0 person AUzge-cBHfM_0 bear AU0RtWdAXcU_0 person AU114x-Qif4_0 person AU3mKa0Npq4_0 person AU8GXMxyP9U_0 person AVHVVt5Srow_0 bear ZIGThAlQuUU_1 truck ZIGkCx4o3G0_0 person ZIMLdoIIFbg_0 person ZIWkcVTlaRU_1 person ZIamYwe-hJ8_0 car ZIawXDt6JH4_0 cat ZIlyoSrDQQ8_0 person ZImLYekhFBQ_3 bus ZI6J2WSiZy0_0 giraffe ZI7DX2OSzzQ_0 airplane ZJCSQFa1W3M_0 person ZJDAzZZQ38k_1 knife ZJDAzZZQ38k_0 knife ZJEQHkA9NLw_1 truck ZJHeFXEtwNE_0 knife ZJJoit687Tc_0 person ZJJpIPciUts_2 skateboard ZJL9WONxDB8_0 person ZJMJBrWq8-o_0 person ZJOVhmSGVMM_0 person ZJXuyIEaSc4_0 horse ZJYXcUOxNRc_1 person ZJdKrkzHR94_0 person ZJdKrkzHR94_1 motorcycle ZJe2QoJwNa0_0 horse ZJimYyH6VUI_0 car ZJoQRLyRs8o_0 person ZJpozi2Piqc_0 motorcycle ZJwWllfPFjo_0 person ZJyDrvmQwY8_0 elephant ZJyDrvmQwY8_1 elephant ZJ5n1Y-yXqM_0 person ZKF4kfqyu6U_0 person ZKIuqz6GDSA_0 horse ZKJuI7-4560_0 cat ZKKalWR8MBM_0 boat ZKSF-y6kC1I_0 elephant ZKSF-y6kC1I_1 elephant ZKTseP8JqIw_0 person ZKk703iOFmY_0 horse ZKrJdHuvvR8_0 person ZKy67yESvjM_0 person ZK1zKp1iJY4_5 elephant ZK1zKp1iJY4_2 elephant ZK3-Em8w4HE_0 horse ZK6pkPtSd_4_0 cow ZK_BL_TGwo0_1 train ZLFXKnOp0LM_1 knife ZLH6HbQ5Miw_0 person ZLSqYLLWQLc_1 cow ZLSqYLLWQLc_2 cow ZLcGyr4ZfJU_1 airplane ZLdb8-YkoiY_0 person ZLm8Hen6OFM_1 bicycle ZLm8Hen6OFM_2 bicycle ZLnf4vSxfgo_1 umbrella ZLqSGXI7FdM_3 knife ZLuY9hS-wd4_0 bus ZLuY9hS-wd4_1 bus ZLuY9hS-wd4_2 bus ZLupIiWNPOY_0 person ZL18xmfIKH4_1 motorcycle ZL18xmfIKH4_3 motorcycle ZL18xmfIKH4_2 motorcycle ZL3DgidLXjw_0 person ZL5SCZpZWtA_1 horse ZL-60We4drw_0 dog ZMDe7QMaLa8_0 person ZMD2tP69gaU_1 person ZMKFhrS_QnY_0 cow ZML6VoRZ_Tk_0 person ZMMDA6nYXZs_0 bird ZMPdl-1FCMQ_0 person ZMZU_V7d3-I_1 umbrella ZMa0bYeg_NE_0 dog ZMdAlm9Zx_A_1 car ZMeQ1Vc3HZk_0 person ZMuwZKOfK1s_0 motorcycle ZMvdpTH-1Ug_9 airplane ZMxu4wRDuqU_1 person ZMyEEXdgJeA_0 person ZM1xadWQqKQ_0 bus ZM2SMTrxUr0_0 train ZM3QVkm1izg_0 person ZM5-iyB8rFk_1 dog ZM_TO-0UDp4_0 person ZNJ8aytwo1E_0 person ZNP23sy27W0_0 person ZNTqZ3wERJE_0 person ZNUBh1ppeyo_0 skateboard ZNXCWGzmxK8_0 person ZNZx7hTxCQE_0 airplane ZNaTV3nGl6M_0 person ZNcUW5m7eRw_0 giraffe ZNg9OZgsMqc_0 bear ZNoQrAOf3Ns_0 truck ZNqpyPcacjY_0 motorcycle ZNv_LrEIljc_0 umbrella ZNxw9kVCouU_0 bus ZNzeI_r7GT4_0 truck ZN2bt7wkvH0_1 bear ZN5ukEMKLY4_0 cow ZN_gFe4IzxE_0 truck ZODUj9lsCzk_0 horse ZOEa1JGwnwE_0 person ZOEa1JGwnwE_1 person ZOGP8-XsFYc_0 person ZOIuTsiGyRY_0 bird ZOJSvR5KOsE_0 dog ZOMPRnYycak_2 cow ZOMnEZ4dWMk_0 elephant ZOStUYUIEdA_0 skateboard ZOTSBcRwdRA_0 person ZOX1xH7rOus_0 train ZOthVGHUcjo_3 cow ZOwhFlp5EiA_0 person ZOxDsYnvl0M_0 person ZOymkqw58fw_0 person ZOzQfVh1LN8_1 motorcycle ZO_5hZ2ex6Y_0 person ZPKaBLqoKvQ_0 person ZPNr3zZg6jk_1 person ZPNr3zZg6jk_0 person ZPNr3zZg6jk_2 person ZPQ0lqiH9uw_0 train ZPQ0lqiH9uw_1 train ZPQ3tbJp33I_0 train ZPVOrRypdRM_0 horse ZPZjgecd6OQ_1 boat ZPaWYb_4S8Y_0 person ZPeRU9CLLew_0 person ZPgUlFmZyP4_0 person ZPjN0Rp_1ZA_0 horse ZPkO4x8HPaI_1 person ZPqs3xJ8sMY_0 person ZPqs3xJ8sMY_1 person ZPq9qgTZ4XI_0 truck ZPyxQD17Fq4_2 person ZPyxQD17Fq4_4 person ZP7SN9kW5kg_0 person ZP7sET2Y9dU_0 person ZP8YaHDM_qE_0 horse ZQCFPzE41bg_0 cow ZQDoAEWZCQk_0 person ZQG5CpZ3fLM_0 person ZQRzkpfy378_0 bus ZQZRNVrE9hk_0 person ZQarE1lLDl4_0 person ZQdhjMVGJrk_0 person ZQdhjMVGJrk_1 person ZQmTc5C-h8w_0 person ZQrMMWQidx0_0 person ZQuVUoqiT_I_0 giraffe ZQuVUoqiT_I_1 giraffe ZQ3LAYCIDf8_3 bear ZQ8X2cqYANs_0 train ZQ9G0UkTR1c_1 person ZQ_vGl5xbKY_0 cat ZRFMzM7kxuI_3 cow ZRFMzM7kxuI_0 cow ZRFMzM7kxuI_1 cow ZRFMzM7kxuI_2 cow ZRLkkoSR8o8_0 knife ZRMOgw0VYRI_0 person ZRNQrzQlVwA_0 person ZRNgdckx504_0 person ZRQug2qT1tc_0 person ZRSRBBpyBG8_0 person ZRXjiNMKvis_0 airplane ZRc8GDK_9hc_1 umbrella ZRkHgC0EAz8_0 person ZRmkeBogj-U_0 person ZRoz_bGkPaE_0 person ZRuQ3ipcK3o_0 bus ZRzOWgIAwe8_0 bird ZRzOWgIAwe8_3 bird ZR0Qj5P8snw_1 bear ZR4yO1ASDwo_2 person ZR_VWPjxLTU_0 dog ZSDCxbSs-Hs_0 person ZSFzv92w5z4_0 motorcycle ZSGJwERlcvM_0 person ZSXoUfKY7t8_0 person ZSdzUC2BB8Q_0 train ZSdzUC2BB8Q_1 train ZSkkNWgXm6E_0 skateboard ZSkkNWgXm6E_1 skateboard ZSn4gRAJToo_0 cat ZSoJT194AtI_1 skateboard ZSoJT194AtI_0 skateboard ZSruK26cGuI_0 dog ZSs6Knma-Q0_0 cow ZSs6Knma-Q0_1 cow ZSu3GocMJzI_0 car ZS29l3t9vK8_0 person ZS6NQXztroI_0 person ZS_wuZnVzbw_0 person ZTLDJDjvSuQ_0 truck ZTPTnzEs_Lc_0 person ZTcRmNM1n8M_0 person ZTjOZ-dZDEg_1 car ZTmHHCmX7aw_0 skateboard ZTnEKCqMNHs_0 person ZTo33r_63Wg_0 knife ZTw6Dkp-LPU_7 elephant ZTw6Dkp-LPU_0 elephant ZTw6Dkp-LPU_4 elephant ZTw6Dkp-LPU_5 elephant ZTw6Dkp-LPU_6 elephant ZT5iwG3vEhM_0 umbrella ZUCf2cVBY08_0 person ZUWSpLaJj4M_0 bird ZUYtIKrcaKo_0 person ZUaHjAaQqF0_0 bus ZUdCQl7WU_U_1 person ZUdCQl7WU_U_0 person ZUd0IAbilBA_0 elephant ZUoFqGf_ijs_0 elephant ZUoJFmQ6ro4_0 person ZUwniKcHERQ_0 horse ZU0WSpOWSak_1 bear ZU0_sT3EbVY_0 zebra ZU9LGiLzKJg_0 motorcycle ZU-ZhVyhBpA_1 bicycle ZVAHreexSa0_0 person ZVBjo5HM0Do_0 knife ZVD-ea5SjMg_0 person ZVJpmiue5IA_0 truck ZVKyUsgomW4_0 person ZVOMkt8TORM_0 train ZVQo_9tFZGY_0 bus ZVY_873YYQY_0 skateboard ZVZJRbJ2h1A_0 cat ZViLnbCdjZM_1 person ZVlOetMc3m4_0 person ZVl8So4V1Ss_0 cat ZVnaHf8vAhA_0 zebra ZVtPRAs8Za0_0 person ZV8NIO3XuLQ_0 person ZV9eJe2grq4_1 bear ZWIPlBvd1DI_0 person ZWIPlBvd1DI_1 person ZWJv_-wAdws_1 skateboard ZWKHlq-W7_8_9 train ZWKHlq-W7_8_14 train ZWKHlq-W7_8_0 train ZWKHlq-W7_8_1 train ZWKHlq-W7_8_4 train ZWKHlq-W7_8_7 train ZWKHlq-W7_8_10 train ZWKHlq-W7_8_11 train ZWKHlq-W7_8_12 train ZWKHlq-W7_8_13 train ZWNe-zcl-IY_0 boat ZWNjUm5Uzh0_1 bicycle ZWNjUm5Uzh0_5 bicycle ZWXE7IAaWrg_0 person ZWXSnELtawA_1 knife ZWXSnELtawA_3 knife ZWX1cGhJG98_0 bicycle ZWlTD6EbOTo_0 person ZWqzdCz6UvY_0 bird ZWr6RECjqV0_1 horse ZWr6ZU_-ir4_1 person ZWthtO1iGtQ_0 person ZWwlzozPAk8_0 person ZWxn8yT0bXo_0 cow ZW0HC4IRa64_0 person ZW3CWoXzrn4_0 bicycle ZW3CWoXzrn4_1 bicycle ZW5VkDNSfWA_0 cat ZXMqiFE6KOE_0 airplane ZXRcWIcok2I_0 person ZXgYAh2AWyk_0 horse ZXp6jOe8DUE_0 person ZXyJafbGcBM_0 horse ZXzno8CjUyM_0 elephant ZYB9yzoJ6jc_0 person ZYG83auB9Lk_0 train ZYIgTdUmOWk_0 elephant ZYKlgXftesk_0 cow ZYM0_4YzeeQ_0 person ZYRgw5rNhE4_0 person ZYS7WVlJbuU_0 person ZYX53PWsBdk_0 person ZYY8vkvB1zU_0 person ZYkIkq9kfLc_0 dog ZYlANECCXnI_0 person ZYocOIOyuqs_0 person ZYsifQxv94s_1 motorcycle ZYs7rbZt8Zw_0 airplane ZYs7rbZt8Zw_1 airplane ZYs7rbZt8Zw_2 airplane ZYtk2iVNC90_2 airplane ZYtk2iVNC90_0 airplane ZYxn9wmzRI4_0 bicycle ZYxn9wmzRI4_1 bicycle ZYzeKMdP2SE_0 person ZYz6B5dwXcE_0 person ZY_urkqeQLM_0 bicycle ZZANjG2Z5Jk_0 person ZZFzCaL48sE_0 cow ZZNRG-ux4fw_0 person ZZQDFjbEcHQ_1 bird ZZQDFjbEcHQ_2 bird ZZQSDwoLZ00_4 knife ZZSFKq4WH78_0 cat ZZVPKuh-2v8_0 person ZZVx_IT4voA_0 person ZZlf3LtDpH8_1 bear ZZpLkBcXUgs_1 person ZZpLkBcXUgs_2 person ZZxMtMlV-MM_0 cow ZZyW-2jZcIo_0 horse ZZyW-2jZcIo_1 horse ZZ20JXRExdg_0 person ZZ8OuI39UTM_1 person ZZ85EAvnAGU_0 person ZZ85EAvnAGU_1 person ZaDVUoq6h5o_1 person ZaD5V9_Vw2w_0 person ZaJb3JTan7Q_0 person ZaLqPrH_aVo_0 train ZaLqPrH_aVo_1 train ZaNZV-lM-3o_0 person ZaNZV-lM-3o_1 person ZaPC288yVBg_1 bicycle ZaPC288yVBg_5 bicycle ZaPC288yVBg_7 bicycle ZaPltFe0S_o_1 truck Zabt7ElK3jM_0 person ZacHdhX9F9M_2 dog ZadGgAG3PzE_0 person Zaew_bHz-PQ_11 umbrella Zaflj5gSZEw_0 person ZanT0hXyJhk_0 bird ZavCWamLatc_2 person ZavCWamLatc_1 person Za4BYhhaFFQ_1 zebra Za6oX4aQR34_0 airplane ZbB-tdDvITQ_0 motorcycle ZbDu8V7ppZE_0 motorcycle ZbHt1sn7oTI_0 person ZbJvtTVTTV8_0 knife ZbQXzueqj4Y_0 horse Zbgfg8usx-k_0 person Zbgfg8usx-k_1 person Zbm5_qB8fEs_0 person ZbrJHC_mHlo_1 person ZbrJHC_mHlo_2 person ZbrJHC_mHlo_0 person ZbrqZYGiMvE_1 cow Zb2Vz655gh4_2 horse Zb755JeGMpU_2 person Zb-JKfQ5emU_1 person Zb-JKfQ5emU_2 person Zb-JKfQ5emU_0 person ZcJPap_gVyo_0 person ZcXA6CyQBi8_0 cat ZchU4DxP5A8_0 person Zcw7wSfd2JM_0 person Zcw7wSfd2JM_1 person ZdElKzM-US0_0 umbrella ZdKO1sC4o60_0 person ZdMbx0IXDzs_0 person ZdMm6j__cQM_8 bicycle ZdTZrRX0dv4_0 truck ZdXrQlOU7iw_1 bicycle ZdaFXJzLLUs_0 person ZdaFXJzLLUs_1 person ZdeTj7nyN-s_0 boat Zdevf1MbY8U_0 train Zdevf1MbY8U_1 train Zdevf1MbY8U_2 train ZdirtQF_sjE_0 person ZdlnVpHrDcg_0 giraffe ZdlnVpHrDcg_2 giraffe Zdq2csZeJr8_2 person Zdrk4yHmMXA_0 person ZdtUPHscS-s_0 person ZdxD4gqVioQ_0 cat ZdxHWwaivLc_0 cow ZdyBZtlMq-M_2 bear Zd3j0bQV6NI_0 person ZeHLf0q4Z1Q_0 person ZeZAZbMg1zY_0 person ZeaoaXZDhPw_0 person ZemOY1F1bVo_0 truck ZemOY1F1bVo_3 truck ZemOY1F1bVo_1 truck ZerHfx3SLxU_0 person ZerYXYTyhoc_0 person ZetcbIDyydg_1 car ZetcbIDyydg_0 car ZeuqVhpsVu0_0 horse Ze6GIOUVxZU_0 person Ze8W47hBrrE_2 skateboard ZfAFALQjUwI_2 person ZfAFALQjUwI_1 person ZfAM39o5Cbc_0 bird ZfDkxwMowSk_4 elephant ZfF5Z0hrOQw_0 person ZfHSyDaLaw0_0 airplane ZfHSyDaLaw0_2 airplane ZfHSyDaLaw0_1 airplane ZfJvZeaN7Ro_1 person ZfTTW39iHJQ_0 person Zflcz9EKz4g_4 elephant Zflcz9EKz4g_1 elephant Zflcz9EKz4g_2 elephant Zfmwrq2aghI_0 person Zf86HoPHmBs_1 bird Zf86HoPHmBs_0 bird Zf-rSx5ZNB8_0 person ZgK0Y4PgWSM_0 person ZgOr7facaIw_0 skateboard ZgP7q-rIhs0_1 person ZgTDthFY-aI_0 bird ZgZ18HIfCGc_1 motorcycle ZggirLBvHSw_0 dog ZgjspuwgTAc_0 person ZgtG8Zy63UQ_0 person Zg18GZ5OFWw_1 person Zg2YrzGNuZs_0 person Zg4f2iY8_zo_1 cat Zg4f2iY8_zo_0 cat Zg5MdsCXRWM_1 cow Zg5MdsCXRWM_0 cow ZhLB-laOg_g_9 bicycle ZhLB-laOg_g_3 bicycle ZhLB-laOg_g_5 bicycle ZhLB-laOg_g_6 bicycle ZhLB-laOg_g_10 bicycle ZhLB-laOg_g_12 bicycle ZhPafr5WTEs_0 person ZhtgT8q5Gm4_0 person Zhtr_XhO6_4_0 train Zhtr_XhO6_4_1 train Zh6QWGGQ9dU_0 person ZiJFOBVGah4_0 horse ZiPO1UcM3IY_0 dog ZiP2ydBHuPs_2 person ZiSl_Dy1ZB4_0 person Zibk3bXvHCY_0 cat Zig1VrVbQc0_0 horse ZimvCFcji0A_0 person ZisoM7y_CS4_0 person ZitUYI22J54_1 knife ZitUYI22J54_0 knife Zi1etYbSUmQ_1 person ZjCbmE2jLo4_0 person ZjFb1VLHvyg_1 horse ZjPmZ4grIFA_0 person ZjPmZ4grIFA_1 person ZjQqfJ1Docg_0 person ZjQ9lIlCehk_0 skateboard ZjSloqSrfWU_1 airplane ZjSloqSrfWU_3 airplane ZjWBw4tZUO4_0 train ZjWBw4tZUO4_1 train ZjWBw4tZUO4_2 train ZjWBw4tZUO4_3 train ZjWBw4tZUO4_4 train ZjWBw4tZUO4_5 train ZjWBw4tZUO4_6 train ZjbhM1ZiKW8_0 person ZjbhM1ZiKW8_2 person ZjcEfOHRyLQ_0 truck ZjcevqmMJvY_0 person ZjgTSjb7Vh4_1 car ZjnaerD1MHM_0 elephant Zjn6uD43ewg_4 airplane Zjn6uD43ewg_5 airplane Zjn6uD43ewg_1 airplane Zjn6uD43ewg_2 airplane ZjpmS5k09Ug_1 person Zjpzw1n9Lvc_0 skateboard ZjsEX7nNYdQ_0 person ZjxiHzcXOAs_0 person ZjxiHzcXOAs_1 person Zj2HBun9kBY_0 person AVW26zY72Ns_0 person AVXWb0s5LZw_0 person AVqCe7X9Pp4_0 cow AVragVmWr8M_0 motorcycle AVvnZ-Ky-ew_0 person AV9y4LnUV84_0 dog AWAQTemnBJc_0 person AWCUoghX20A_0 cow AWD_KAfvb0U_0 skateboard AWOhJ9RZReg_0 person AWOhJ9RZReg_1 person AWPNd7zPJzg_0 person AWPNd7zPJzg_1 person AWZt9EdU3BU_3 zebra AWdKXFitdJI_0 boat AWh2S4rI6kc_0 person AW1SjuoheU8_0 cat AW2cvkaExG4_0 cow AW8munaOGqw_0 person AW--f4fsLFY_0 train AXB4hYQKqUw_0 person AXB4hYQKqUw_2 person AXQlwoC_K0g_1 truck AXX66Oq_RkU_0 person AXhx8hncZvA_0 boat AXm0KvcIchQ_0 train AXtXzxTXTqI_0 elephant AX2rS0bpAmM_0 horse AX4Hsfdm-Fo_0 elephant AX8WoOXfJDA_0 person AX-xVtjP42Q_0 person AYLoR7L3CMs_3 bird AYLoR7L3CMs_1 bird AYUGoWokN_0_0 person AYYdBxTI_54_1 train AYakvLR8aVM_0 person AYe6Wf0URgo_0 truck AYgbgSVClN4_0 person AYg1V2ol96s_0 dog AYj70IRvvwI_2 airplane AYj70IRvvwI_3 airplane AYn-qtOy_nc_0 person AY7foLy1uok_0 elephant AY7foLy1uok_1 elephant AY-AbrJPyY0_0 train AZHYXkv5rMk_0 bird AZJsII37MPY_0 bird AZMW1TyN6Z4_0 person AZQjsUm-CXk_1 person AZhH2ej_x_g_0 person AZjZ1ZSyCeE_0 person AZk4MAu-j90_0 person AZleWF5zAxc_1 bear AZl3Emy9K3A_0 horse AZouBTtQrtM_0 person AZpAuvQryZo_0 person AZpAuvQryZo_1 person AZ9SW8bxD3E_0 bicycle AaGwVQ6UjOE_0 person AaRVwgGBmWU_0 person AaTW4oc5bBU_0 person AaZsdPwg9qg_3 bus Aac18k-eLZI_0 person Aac18k-eLZI_1 person Aac18k-eLZI_2 person AakpjcyvFSo_0 person AalaqaXsEbs_3 umbrella AalaqaXsEbs_0 umbrella AalaqaXsEbs_1 umbrella AalaqaXsEbs_2 umbrella AaoK6DPQKII_0 bus AaotWWHg4eU_0 truck AaotWWHg4eU_1 truck AaotWWHg4eU_2 truck AasksRmCk1g_0 person AatNkWo2ryE_0 person Aa0FU2EIMZ4_0 bird Aa-wzDtjCGc_0 person Aa_biYfYp08_0 person AbEsU9EX9XQ_0 elephant AbEsU9EX9XQ_2 elephant AbO_VrlyQ8I_0 umbrella AbTxhwSueZw_0 person Abd7Vn-Nyt8_1 truck AbeOAFhMXBY_1 bird AbeOAFhMXBY_2 bird AblKd4XIjqk_0 person AbmnNkzkXFg_0 elephant AbmnNkzkXFg_1 elephant AbuMVYzS0mw_0 skateboard AbvoOuTpLtA_0 dog AbwI4m0H9Hk_2 train Abx126RTs10_1 elephant Ab9zgKJnr9Y_1 person Ab-vGS2mqFQ_0 cow Ab-vGS2mqFQ_1 cow AcCU5YAWXlw_0 dog AcReGpoHOZI_0 person AcSmnBYhEsg_0 person AcTgPRNars0_1 truck AcUEWZRPoGA_0 umbrella AcZNiBe0Fgo_0 person AcZukbBG7tI_0 boat Acc1yTFpH2c_0 dog AcpBKywfL4o_1 cat AcpOxyI_YPI_0 person AcprJcYvkbY_0 person AdDiiRHwZ2E_0 cat AdEH-oHs1Qo_3 train AdEiQT7Nm0o_1 motorcycle AdE2jnpk6AM_0 boat AdbsyVjq_Xs_0 cow AddL-M622TI_0 knife AdgTVbi_kus_0 person AdsPsjswSGQ_0 motorcycle Ad044xbRhE8_0 person Ad2TSmaLvX8_0 person AeDfdgrccVw_0 person AeHbZ3U8S8U_2 train AeWBkNuJmEA_0 truck AeWBkNuJmEA_3 truck AeWBkNuJmEA_4 truck AeWBkNuJmEA_5 truck AeakbNNwcW0_0 train Aec4uweTSes_2 skateboard AeflYi3Sxss_0 person AegDGWXkWNw_0 person AenVUPH1ils_0 bird AendE1XHSps_0 bicycle AerUXP3Mmks_0 person Ae5qWkNt6RU_2 car Ae7ucKj40mw_0 dog Ae9Zd3lP7bg_0 person AfHkdkvxhNs_0 elephant AfNCSPijpao_0 person AfNGR5iEpvU_0 cat AfNtKiB_rD8_1 motorcycle AfWHElsVCyM_0 cow AfWfexnwsHg_0 person AfWfexnwsHg_1 person AfkKO6j4jWc_0 person AfmMpft13ZU_0 person AfnQoNimSjc_0 person AfynslRqwxI_0 car Afz2VDV4UHg_1 person Afz2VDV4UHg_0 person Af2MGhdZAn8_0 person Af2VyQEZtfk_0 person Af6Ve26JUOg_0 person AgBaUhTbzxA_0 airplane AgBaUhTbzxA_4 airplane AgBaUhTbzxA_5 airplane AgBaUhTbzxA_3 airplane AgBaZRmz8IY_0 skateboard AgJCf77qxsY_0 person AgP2HoU83S4_4 knife AgYhFemsFag_0 person AgZ2iflIKWc_1 person AgaetfTOzc8_0 person AgdrEW8jmw4_0 truck AgqmhFD0R94_2 elephant AgqmhFD0R94_3 elephant AgqmhFD0R94_1 elephant AgrKeQXSU2M_0 elephant AgrKeQXSU2M_1 elephant AgrKeQXSU2M_2 elephant AgtCW50wfig_0 person AgvxdVNj5Oc_0 skateboard Agw5t7YSQbE_0 skateboard AhAW4UKPzz0_0 giraffe AhE2vDF6Gbc_0 horse AhE2vDF6Gbc_1 horse AhjsDq9fEzQ_0 person Ahv2jhPqRPg_0 person AhwGPZWtf3E_0 person Ahxq6Rtu3lc_0 person Ahx3IZujXDw_0 bus Ah0AGjta1qg_5 bird Ah04VeRs2hg_0 truck Ah4x4EfR3BY_0 motorcycle Ah4x4EfR3BY_1 motorcycle AiIc8FW3q98_0 car AiL_iCJ8HZI_1 person AiNLvzwt3_w_1 bird AiNLvzwt3_w_2 bird AiP7EOvTpK4_0 motorcycle AiP7EOvTpK4_2 motorcycle AiU_T3DZI2w_1 bus AiU_T3DZI2w_2 bus AieRY99VkmE_0 person AieVzbENJv0_3 bicycle AiieCerOKpc_0 person Aik2hirrxEo_3 airplane Aik2hirrxEo_0 airplane Aik2hirrxEo_1 airplane Aim6_lZQi4g_0 person AiqqXxqnPPM_1 cow AiqqXxqnPPM_0 cow AittR1dd2SI_0 train AittR1dd2SI_1 train Aiv3XHMuVq8_0 train Aiyfw0Zh38k_0 person Ai29fDmklxM_1 person Ai29fDmklxM_0 person Ai3S7n1Aofs_0 elephant Ai-487iZv0E_0 person AjFhyF1XZw4_0 person AjJHvamHoMU_0 horse AjPBAy1xgrY_0 person AjVe8d0vc1E_0 person AjamPk2Geuw_1 bus Ajg7q9zxJUo_0 person AjroIzI2OW8_1 truck AjroIzI2OW8_2 truck Ajsu2bGngDw_1 person Ajs4qdBK7Jk_0 elephant ZkD_WAxZB3o_0 cow ZkHPsjy-YUQ_1 knife Zkbav-Qoxds_0 horse Zkbav-Qoxds_2 horse Zkbav-Qoxds_1 horse ZkidaaVx2VU_1 bus ZknqgRL504A_4 bear ZkqA2kLudwE_4 train ZkqA2kLudwE_0 train ZkqA2kLudwE_3 train Zku9JAotBZ0_0 boat ZkzM2jvV2AY_0 person ZlBfF2yK2vg_1 person ZlBfF2yK2vg_2 person ZlBfF2yK2vg_0 person ZlDsSDEHEzY_1 cow ZlDsSDEHEzY_0 cow ZlDsSDEHEzY_2 cow ZlFElBglnHA_0 cat ZlP8tmFYeyY_5 bird ZlfyrRfHDoc_0 cow Zljx0icnRa8_0 person Zljx0icnRa8_1 person Zlmsqen0qZo_0 person Zln667JkWo8_0 person ZmHKBIsSjQA_0 horse ZmHKBIsSjQA_1 horse ZmVLw9-fLDo_0 car ZmbXlevaX2U_1 boat ZmgJjFt3JU4_0 skateboard ZmhKe4_d5Ag_0 person ZmiCqFxUJSw_1 airplane ZmkKOYN1dRw_0 person ZmrCaB8p3IM_0 bear ZmuzvhzN6EI_0 cow Zm3AU4TEpEw_0 person Zm5VvBaQUwU_0 bird ZnRgQ1VBIGE_1 person ZnWAM5ju8NM_0 person Zne4XpVG2YQ_1 person Zne4XpVG2YQ_0 person Znr-Uiobo-k_0 person ZntDSf8cCPI_0 person ZnvLWU_PCZ0_0 motorcycle Zn-r14oEJwM_0 airplane ZoC1knYO0Tg_0 cow ZoJIup20AGU_0 person ZoKfc3OL0JY_0 person ZoK4wKRoZjY_0 person ZoN4k6UNw6I_1 horse ZoOvu218D6M_0 person ZoR1yoQzsbM_0 person ZouHgocvjDI_0 bird Zo-8G7N2DXU_0 person ZpAlbL-YE0E_0 bus ZpCrRb_a9QI_0 person ZpCuVDLXQSw_1 horse ZpCuVDLXQSw_0 horse ZpSzmFLEm0c_1 car ZpURI0wRgws_0 person ZpXJ-0dv6Us_1 cat ZppFK22HdIk_0 person ZpqXtZfe-3w_0 cat Zp1nQXN7dyg_0 horse Zp2CuvTAZLw_1 person Zp740cgCPPE_0 person Zp8GHxi_5l0_0 knife Zp8GHxi_5l0_1 knife ZqM9VL5DJ28_1 person ZqOcOhiAI6k_0 cow ZqS1PqS3iT0_0 truck ZqW027iDkCI_0 person ZqXFvdeNrYI_1 person Zqa0-AUnl9s_0 person Zqm8A3wpeJQ_0 person ZqtVs5joekw_0 cow Zq018zZzx1c_0 person Zq1u84GLCHI_0 motorcycle Zq5nK49UZ_o_2 elephant Zq5nK49UZ_o_3 elephant Zq5r3BwLg_c_0 skateboard Zq-RNCVoZFs_0 person ZrA0NE09ipc_0 dog ZrDoGqu-A5A_0 train ZrI4ruv6B3o_0 bird ZrKpKmp29_o_1 bird ZrKpKmp29_o_3 bird ZrKpKmp29_o_6 bird ZrK5JKg83qU_0 person ZrUx83OGIOk_0 person ZrW7Si0hJKI_0 person ZrbVa__ne-0_0 person ZrfPtqkS_MY_0 airplane ZrfPtqkS_MY_1 airplane ZrfPtqkS_MY_5 airplane ZrfPtqkS_MY_6 airplane ZrfPtqkS_MY_7 airplane ZrgMnk8f_TA_0 person ZrgMnk8f_TA_1 person ZruJ2hhn9z0_1 person ZrvWeRZ_dyU_1 cow ZrvWeRZ_dyU_0 cow ZrwXUWAxjIM_0 giraffe ZrzdqF_ePkM_0 horse ZrzdqF_ePkM_2 horse Zr5eAtkuxQ0_0 bear Zr_AAxouNfg_0 cow ZsCaDsfPNec_0 cow ZsDDOO-bpFA_0 person ZsDDOO-bpFA_1 person ZsESx0nIYqI_0 elephant ZsESx0nIYqI_6 elephant ZsESx0nIYqI_7 elephant ZsJCwiPEvkI_0 person ZsLDBiZ0o14_0 skateboard ZsPVRik6m_c_1 bear ZsSkZhL-HOM_2 bicycle Zsb2ucv_mAg_0 person Zsdv_3EWODM_0 person ZsyMk67bjIM_0 dog Zs0j_1tuTDo_0 person Zs1ltKMvRec_0 person Zs1ltKMvRec_1 person Zs79wUXMpx8_0 bear ZtA8n6dsH-w_4 car ZtA8n6dsH-w_1 car ZtA8n6dsH-w_2 car ZtA8n6dsH-w_3 car ZtDUifuLGrM_2 bird ZtEDTuHcM9U_0 person ZtM6JRtVtpU_0 motorcycle ZtToUMIMdYE_0 person ZtlDJ70ap8Q_1 bear ZtlJcLPPjsg_0 person ZtsGzhfZg9g_0 person ZttTri7sEK4_0 train Ztyep9o6CLE_4 bus Ztyep9o6CLE_6 bus Ztyep9o6CLE_7 bus Zt9qKAA_xyA_0 person ZuC0Jr3Y3s8_0 car ZuGpcHtPLLA_0 person ZuWlzE4F84c_0 truck ZuhmoYvtP40_1 person Zuicm6_fX9I_1 bicycle Zunjyc7DIP4_2 train ZuoBIQ-Kq74_0 person ZuqXxaMAufU_1 person ZuuL_Yi4FZQ_1 dog ZuuL_Yi4FZQ_0 dog Zuy59kV2M-0_1 person Zu-vh46IwiU_0 cow Zu_dXJvDHdo_0 person Zu_f8xuOweg_3 elephant Zu_f8xuOweg_1 elephant Zu_f8xuOweg_2 elephant ZvDo2WbWL4g_1 person ZvDo2WbWL4g_0 person ZvJItzBdO04_1 person ZvJrqHsPVL0_0 bus ZvSN_Y6vK3c_0 person ZvV5mqJgbcQ_0 cow ZvfCrJvE1Tg_0 horse ZvfIYK-AWCw_0 person Zvlx8vSlAPs_0 bicycle ZvtGPgtfhE8_0 person ZvtuffxB5EY_0 person ZvyOzgxu-4Y_0 truck ZvzVi9irgvw_0 bear Zv6DWiKAux4_1 person Zv9e9Vm6Vis_0 motorcycle ZwDqCxCFpF4_0 bicycle ZwDqCxCFpF4_3 bicycle ZwH5xnh6Thw_0 person ZwW6ybIP8ys_0 bus ZwdSYMz9ioo_0 person ZwmRodW5wgg_0 horse ZwrtmR7ewc4_0 person Zw7a69yU7f0_0 motorcycle ZxAlVbDwlCc_2 bird ZxAuwcxhXxc_0 person ZxE5MjV6i4w_0 skateboard ZxOVw-Lc-NI_0 person ZxStkYy-wgo_0 motorcycle ZxUKijmOWJc_0 person ZxitXAY6Xsc_1 knife ZxqbwwO81Xc_0 train Zxv2BRQIWm0_4 airplane Zxv2BRQIWm0_5 airplane Zxv2BRQIWm0_7 airplane Zxv2BRQIWm0_8 airplane Aj7HWiU0iQg_0 skateboard Aj_E-ObfzoE_1 person AkGYKkcRyPM_0 dog AkHT5Oo22rQ_0 person AkMpnm9JrLU_0 person AkWcVIeIx34_0 boat AkaR-XgClv0_0 person AkaR-XgClv0_1 person AkeAdeJpbpg_0 train AkeAdeJpbpg_3 train AkeAdeJpbpg_1 train Akh0VNTS6G4_0 person Akh0VNTS6G4_1 person Akh0VNTS6G4_2 person Akh0VNTS6G4_3 person AkkNBGH82Ic_0 horse AknHhsIpRqc_0 airplane AkxKeaxEnvQ_0 dog Ak3XQg9z8XQ_0 person Ak8ygMb5ykk_0 person Ak8y7dALcJI_0 person AlAUJSBL-e4_0 dog AlNCPdpo1gg_2 bicycle AlNCPdpo1gg_5 bicycle AlNCPdpo1gg_6 bicycle AlNCPdpo1gg_0 bicycle AlNCPdpo1gg_3 bicycle AlNCPdpo1gg_4 bicycle AlPZeADzCKc_0 person AlPZeADzCKc_1 person AlXlVnkucyU_3 train AlXlVnkucyU_1 train AldX05MqOs0_0 person AleuxLN7VcU_1 bird AlfbdsgKBAc_1 person AlhjN5qz_WI_0 train AlikgfDMckk_0 person AlnIWAFamHE_0 bear AltA5vQ7Icw_0 bus AlzB8mXDcYc_0 horse Al2hm71ia6E_0 person Al9l6-4QDz0_0 horse Al9wCTPpSWM_0 skateboard AmPe5gTOCTo_2 person AmPe5gTOCTo_0 person AmPe5gTOCTo_1 person AmQ_UrwLf3g_0 person AmRyW4hmSjw_0 person AmcAzvpvDRg_0 bear AmeaTbvmKvo_0 car Amt8BGudD0w_0 skateboard Amt8BGudD0w_2 skateboard AmuX-Lv7OeM_2 cow AmwvLxALyCw_0 person Am2wElVETcw_0 cat AnD6ijSktyM_0 person AnEC6v3fXrE_0 cow AnOwuTW7DKk_0 cow AnOwuTW7DKk_1 cow AnQ2ZY1JxAY_2 person AnWClR8yyu8_0 person AnZKri0xn-c_1 cow AnZKri0xn-c_2 cow Anb2IyxcJbk_0 horse Anevw4PbqTo_0 person AnkgvW70F5E_0 person AnkgvW70F5E_1 person An342tYqi5g_0 person AoI1hSI0PSI_2 car AoKs5jwMuHc_0 person AoP-So0vjIc_0 cat AoSwFyY0f_A_0 person AoXHZgatpco_1 horse AoXHZgatpco_2 horse AoXHZgatpco_3 horse AoXHZgatpco_4 horse Aof87CGS8NQ_1 skateboard AoiCmKM8xz0_1 truck AojgueRMVCY_0 person AolLjcEFv5o_0 person AopGnIjKuEk_0 motorcycle Ao0EDmBMIQk_0 person Ao0EDmBMIQk_1 person Ao7Iys-_lZs_0 skateboard Ao_b43xexzA_0 person ApJMiJjCxCY_1 car ApJMiJjCxCY_4 car ApJMiJjCxCY_5 car ApP4eoyM72g_1 skateboard ApWIa9pt-vk_0 person ApilCZCROGI_0 motorcycle ApjCOCv29N8_0 person AppgdYQTII8_0 truck Ap1gZJZynL4_0 person Ap-iaHj5SLk_4 elephant Ap-iaHj5SLk_5 elephant Ap-3HonA5go_0 person AqBYSr4wmpQ_0 person AqKP0V3Xj7E_0 cow AqOxDunFl08_0 airplane AqOxDunFl08_1 airplane AqSP11-eje8_0 boat AqUxRBRS-n0_0 skateboard AqZhKjLLG70_2 boat AqdAnSsQLI8_1 person AqdAnSsQLI8_0 person AqlHHwyJypE_0 bird AqmXAZYmPJc_0 person AqmXAZYmPJc_1 person Aqo5yZkzz8I_4 truck AqpinwPH8gM_1 person AqpinwPH8gM_0 person Aqqs8XxA8gM_1 horse Aqqs8XxA8gM_0 horse Aqqs8XxA8gM_2 horse AqqvZzLy3IE_0 motorcycle AqsuBaW1L0Q_0 person AqxTv7XRAH0_0 person Aq_n86sub5o_2 bicycle Aq_n86sub5o_3 bicycle ArJNEsuLzDc_0 person ArJaHKwfOEo_0 person ArM6GXi6YnI_1 dog ArbpF1NIm-s_0 car ArbpF1NIm-s_1 car ArfeHbvYvKY_0 motorcycle AriIdq0ZPfE_1 elephant AroxRXjr3po_3 bear ArrB-hbOgf8_1 elephant ArvYqb1hJSk_0 person AryOE3od43M_0 person Ar7WaiToztg_0 person Ar8Wk3m0uZ0_1 person Ar8Wk3m0uZ0_0 person Ar-vOeN30bM_0 cat AsJt3MHLGiM_0 person AsKUm364aHg_0 person AsNy8gmdVec_0 person AsWWfQtZSHA_0 person AsY1dt4QojM_0 person AsZa3il8cZQ_0 person AsfAcK_laZA_2 horse Asix5lGmXlg_0 airplane AskNHLhn1t0_0 cow As_a3CyN-kQ_0 bicycle As_a3CyN-kQ_2 bicycle As_a3CyN-kQ_7 bicycle As_a3CyN-kQ_8 bicycle As_a3CyN-kQ_10 bicycle AtFOIFqxLKs_0 person AtG98YoPQyg_0 bird AtKUkiMSzfs_2 elephant AtKieG766oI_0 person AtawrCflbrM_0 person AtfXsIpaSgQ_0 person AtmVV-8Pjsg_0 person AtmVV-8Pjsg_1 person At0-VpJyfBY_0 skateboard At81P33v_z8_0 person AuA4_FjCMvo_0 person AuJLIGyAoj4_1 horse AuJalbdpJP8_0 train AuLw9iNhPvw_0 bird AuQYS5w13co_0 bus AucK5ZDM060_1 airplane AuchGbKLdmk_0 person Aucxkj3w3nc_0 person AugnPC3tdso_0 motorcycle AunfkfLwN1w_0 bear AunfkfLwN1w_3 bear AunfkfLwN1w_2 bear AutsbWiMLoY_0 person AuuZLhOpxcI_1 elephant AuuZLhOpxcI_6 elephant AvGLANxpJ-Y_1 person AvJexx39uCE_0 person AvOpMSLKXTM_1 person AvOpMSLKXTM_0 person AvP_DY8SuU4_0 person AvQgdEmyoFA_0 airplane AvVBLLWgeWo_0 horse AvdUsPyX5lE_0 person AvdgweWTeeg_0 cat AvgusAC7DUU_0 bird Avlg_B60Z0E_0 bear Avlg_B60Z0E_4 bear Avp80BzoG9Y_1 person Avp80BzoG9Y_0 person Avr6FKguO2o_4 skateboard Avr6FKguO2o_1 skateboard AvvWfbj5x88_0 person Av78r-lWmCs_0 horse Av8Hkyi1fdc_1 knife Av8k98IyQhs_0 person AwAX85eLJH4_0 cow AwDIxdZSWKQ_0 person AwECiro8_h4_1 elephant AwEtKHnfKJ8_1 cow AwEtKHnfKJ8_2 cow AwFA2LuUWN8_0 person AwM3QWX5Jsc_0 person AwOJkAFe8Xs_0 bicycle AwZ6nHwMMuA_0 dog AwqZ_9G0pWg_0 person AwsAA0Xk1J8_0 person Aw-D6USSthk_0 bear AxAIZDsViZw_0 person AxAIZDsViZw_1 person AxAkf4tRXbI_0 person AxLiwCy5umU_0 person AxUFYNgnIq4_0 person Axg0nab1SDc_0 person AxvrCidcYqM_1 person Ax2iIXU4Gyc_0 person Ax5dd2_2sFA_1 car Ax5dd2_2sFA_0 car AyAAL3Rd_Rg_3 bicycle AyAAL3Rd_Rg_5 bicycle AyAA5q5B-84_0 person AyAA5q5B-84_1 person AyH0zvW0ndQ_1 bird AyKf0Ufaa_o_0 person Ayfmwf4oW_k_0 person AyhXfIgl4Kk_0 knife Ayo9w6aKSY0_0 person AyqiYJuONPs_0 airplane AyqvDNKC1CQ_0 person Ay2VXLYZW50_1 person AzFaa7gRy0k_0 person AzMHek-Oow0_0 cat AzNf4dneWFU_1 person AzVMbaXM_QM_1 boat AzVoOWc-ueY_0 person AzaUz9OpHMI_0 truck AzeA4K-S0CI_1 person Azew3w3WZfI_5 skateboard Azew3w3WZfI_1 skateboard Azew3w3WZfI_3 skateboard Aze0ijK2t2M_0 person Aze_lfqL6mw_0 cow AzhTPVtwJVk_0 person Azh82KkzMVs_0 bird Azh82KkzMVs_1 bird Az0Hr5pa_Pw_0 person Az5vE5ssYxk_0 person Az5vE5ssYxk_1 person Az7glF28oOw_0 person Az_5XR0RSv0_1 person Az_5XR0RSv0_2 person A0JB0OdZ2NE_1 knife A0L6M_8fDyM_0 person A0Nx4JbdXO0_0 person A0PQ6Si3nOU_0 airplane A0XGvY-NO00_5 airplane A0jhzA4HvrY_0 umbrella A0n7dLEgCjo_0 cow A02wb1V5W0A_1 person A02wb1V5W0A_0 person A08TTc4NLik_0 person A1Hvxm2NCpk_1 airplane A1H8wrYSPlQ_0 bicycle A1NBheOGWNE_0 bird A1fdw6WBO_w_0 cat A1oQZf9EXPg_0 person A1oQZf9EXPg_1 person A1oQZf9EXPg_2 person A1r3FpgoeP0_0 elephant A1unjHSiYuk_0 skateboard A1w5Z9ryeJI_0 elephant A1w5Z9ryeJI_2 elephant A1w5Z9ryeJI_1 elephant A11L_7hymDI_0 train A2ODL8T477o_0 umbrella A2UiM17u3Ao_0 bear A2Vhzr_2AAY_0 person A2WfZtUfAy4_1 person A2gisYdnTi0_0 bird A2iD7VC-A9g_1 cow A2p7Z_Ia9Ak_0 person A2p7Z_Ia9Ak_1 person A2rOJWkWoRo_0 person A23nZy9maYk_1 person A23nZy9maYk_0 person A29DgqMHeEQ_0 person A3EcM1p8r14_0 person A3FTEFw2Bo0_3 horse A3JmvJSIxeU_0 person A3Lmb8E3Ovw_0 person A3L2pdrSYdE_0 person A3MpR785VH8_0 person A3MpR785VH8_1 person A3UoQh4P1_o_0 person A3ZIKfh-QPo_0 person A3b1bCXjWWE_1 knife A3eocVVFaX8_0 person A3vXSLx3blY_0 person A4BVLpu2EQI_1 cow A4CYcvyDGec_0 person A4P_7hjid7Q_0 person A4gw9TbmL54_0 train A4ijVvmthCQ_0 person A4oNmb9PiYQ_0 person A4t4imYj0tA_1 dog A4u61iOuzr0_0 person A4u61iOuzr0_1 person A4u61iOuzr0_2 person A4wLmZZODQU_1 person A4zzoIg6-W4_0 skateboard A42uEePHr8c_0 person A438LRj4MN0_0 horse A5Ho_qla_bQ_0 skateboard A5Kii0lU4h4_0 person A5ZAKa7xw_I_0 person A5ciZloGW2o_2 horse A5nuZ-mKcBE_4 airplane A5nuZ-mKcBE_7 airplane A5-RNkQ5yzU_0 person A5-yfb7-1NM_1 person A6DfgaqbLDM_0 person A6GND629_dg_0 person A6IIHamstQo_0 person A6KXKalaC7M_0 train A6KXKalaC7M_1 train A6LmIR6_mtk_1 truck A6L7XcS8oF4_0 person A6MkQdxLBSI_1 bicycle A6MkQdxLBSI_6 bicycle A6SipDli3dE_0 person A6Tx9smTdyo_0 boat A6Zbpn5hd6Q_0 person A6jEv9bIawA_1 bus A6rxrML8vyk_0 horse A66pUkVBt_M_0 person A7GxuMCyr50_0 cat A7KLi_xOQFc_0 person A7SDQoaalEY_0 person A7SIvy9srFU_0 person A7Zz2ESO-PM_2 bear A7aEqy5QRJ4_0 cat A7cjjAkLjfQ_1 person A7cjjAkLjfQ_0 person A7coVhNQrSs_0 cow A7c_1Wcr5hM_0 cow A7ltojA7WTk_0 person A729VkZvy_s_0 person A7_WDIFj23s_0 cow A7_hPlvWyGc_0 cow A8F5UnJOU5A_0 boat A8MGPGEOAWk_0 train A8PGaHrBO-g_0 bus A8PlfHNTHVQ_0 person A8RztgyPvCE_1 horse A8U5HWirVCk_0 person A8gL-e9dRa8_2 bear A8oMFSrcteU_0 bicycle A80V1BVUvf4_0 airplane A89eQvkZ4go_1 car A89eQvkZ4go_0 car A89tFE_-szI_0 person A9ACfqLHRIM_0 person A9ACfqLHRIM_1 person A9LEZHrMOh8_0 person A9Mw5uHZ7WM_0 dog A9UlOqoTO3A_0 car A9WAS-oLC8Q_1 train A9WAS-oLC8Q_2 train A9etwHCHkQM_0 person A9fblLjEn7E_1 person A9fblLjEn7E_0 person A9f0bktW-uM_0 train A9sznaQipiM_1 person A9sznaQipiM_3 person A9tOXINxUeA_2 person A-BcgCHWiLE_1 knife A-JRl34Jmok_0 elephant A-JRl34Jmok_1 elephant A-JRl34Jmok_2 elephant A-JRl34Jmok_3 elephant A-MMqq_FLXo_0 person A-R5A0HMT3w_0 boat A-SdlQGGdZg_1 person A-Vo3GQZrd8_0 skateboard A-gQnulNzVo_0 person A-gZpG3OWNM_0 person A-jGPkEGCdo_0 person A-qT3DcitzM_0 skateboard A-0o6fFroLk_3 bird A-1_sR8c39g_0 skateboard A-1_sR8c39g_3 skateboard A-37XpNHfQw_0 cow A_AbA6K8Ouc_0 person A_AbA6K8Ouc_1 person A_B83i3dvWQ_0 person A_CDsn7za4c_1 person A_CDsn7za4c_0 person A_DqzmxTyPQ_0 dog A_Eaoo5O71M_0 skateboard A_Eaoo5O71M_3 skateboard A_Nb1jSK7vY_0 person A_RHSgWC24U_0 elephant A_R7iK_MLgM_0 elephant A_Z7Cj10nKA_0 truck A_aN9LUuMY8_0 person A_g6G7vBr8I_1 person A_qnLTG_VBg_0 person A_uC3UuAVQE_0 cow A_uxGLJDf9I_0 person A_xtvYH_7vg_0 person A__fHCZfwtM_0 person BACWpC6GdxY_5 airplane BACWpC6GdxY_3 airplane BANdhsMHpw0_0 person BANdhsMHpw0_1 person BANdhsMHpw0_2 person BAOR6YBIb8U_1 skateboard BAO0Uce3vXA_0 cat BARELTt_9Ko_0 elephant BAWN6Xpw7sg_0 person Zx3x1-cBu7I_0 person Zx3x1-cBu7I_1 person Zx8LkdyJzG8_0 person ZyDqefuyQfU_1 cat ZyDqefuyQfU_2 cat ZyNwfXl7s2w_0 motorcycle ZyQL8Ugiq4Y_0 person ZyQxolWsw2o_0 cat ZyQ_gFztNXU_0 train ZyQ_gFztNXU_2 train ZyqvHk5Ugjk_0 bird ZyrTKvb3Uq4_0 person ZyuoNtTPexE_0 person ZywGdneFaWs_0 dog Zyw6pIArS1g_0 train Zy04v73t_oU_0 person Zy4s6kQgRAs_0 person Zy7a1FYT_2I_0 person Zy9BXzUqORk_0 horse ZzAgbPU4qoA_0 person ZzBP5IPOX7Q_0 person ZzBP5IPOX7Q_1 person ZzFvfG2mfRU_0 cow ZzIeftZXBMw_0 person ZzPUlKXnUgE_0 person ZzRMRSyCzzU_0 person ZzS_a0D4AhE_1 skateboard ZzWMnTc1LBY_0 person Zzdl60FMu48_0 person ZzeCPtqruzg_0 person ZzgU7APbNfs_0 person Zzgoobk2eIA_0 person Zzgoobk2eIA_1 motorcycle ZzhCWdZJAQY_0 person Zzic21J3Ea8_0 person ZznEoJsdkVI_0 person ZzpccfyFyL0_0 person ZzpccfyFyL0_1 person Zzq_S3HujTo_0 person ZztD-tmxwyc_0 person ZzwlUbCfscM_1 dog ZzxRC2pLBVA_0 person Zz2oIdSVB6Q_0 person Zz5GwCMuMj0_0 person Z0D6uKz7v5Q_0 person Z0m37r4St5Q_3 truck Z0pLWU6Wg-o_0 dog Z0stjlmfTpU_0 cat Z0xYA5PwrjI_0 person Z02r-T2hINk_0 elephant Z04k6LBSuRk_1 person Z1G9pYdQwCY_0 person Z1HK6zDIJhg_0 person Z1MvNM4bmxs_0 person Z1SML4zVPik_0 person Z1U7Wnf_WiA_0 cat Z1XafO8l8gs_0 person Z1aU1CigISE_0 person Z1a8Tqg-yjE_0 person Z1e-5FLWf6I_0 cat Z1gxFkBk4EY_0 horse Z1j81keSb9Q_0 motorcycle Z1j81keSb9Q_1 motorcycle Z1nr46t7EVk_0 airplane Z1pv5a0as9c_0 train Z1rB_fu2lKY_0 dog Z1x8sEeQIuI_1 motorcycle Z13O2uGP1nE_0 car Z14p6heAJRc_2 person Z14p6heAJRc_0 person Z14p6heAJRc_1 person Z15QqHX1Z6M_1 train Z2HF5_tyxR4_0 bus Z2K03YbfcGg_0 elephant Z2QWOKCHkM8_0 cow Z2QWOKCHkM8_2 cow Z2QWOKCHkM8_1 cow Z2SljfwK58g_0 skateboard Z2SljfwK58g_1 skateboard Z2VI7eM7BB0_0 bear Z2acpS-e_cg_0 person Z2cvYI55Dps_0 skateboard Z2dab1zmqv8_0 horse Z2gvlPrX5HA_5 elephant Z2gvlPrX5HA_6 elephant Z2kcVxTMZtM_0 person Z2n2a39MxJQ_7 bicycle Z2n2a39MxJQ_1 bicycle Z2n2a39MxJQ_2 bicycle Z2n2a39MxJQ_3 bicycle Z2n2a39MxJQ_4 bicycle Z2n2a39MxJQ_6 bicycle Z21DONVXY1Q_2 zebra Z23Gg06mNj8_0 person Z236ql8Tpvg_0 person Z23_3K28VSI_1 giraffe Z3AHrAB9qhw_0 cat Z3AplkSO6kA_1 car Z3KMX_N6WSg_0 person Z3KMX_N6WSg_1 person Z3KMX_N6WSg_2 person Z3PzgfwbjLk_0 truck Z3i5sys0boU_0 person Z3i5sys0boU_1 person Z3sRLCOCxMY_0 cat Z37dIpwPIqI_3 bicycle Z4DQoYcs5mM_2 person Z4DQoYcs5mM_0 person Z4DQoYcs5mM_1 person Z4XLmQjbg7Y_0 person Z4XLmQjbg7Y_1 person Z4ZKg0KbSm4_0 bicycle Z4ZPyzSGdRU_0 dog Z4bO8cpjQZI_0 person Z4bO8cpjQZI_1 person Z4bW8HHeYP8_0 car Z4mYWGPFVkw_0 person Z4n5ieSA6cM_0 cow Z4tOSluXWnE_1 umbrella Z4u3PPkCYOs_0 person Z4u4zasFeAw_1 bird Z4u4zasFeAw_0 bird Z4vRtZE1WjQ_0 dog Z4voZ3h_Dyk_1 person Z4xVMaYAqJ4_1 bicycle Z446P08C8vE_0 person Z5KGx49qaAE_3 bird Z5KGx49qaAE_5 bird Z5KGx49qaAE_6 bird Z5Qo8xdb8os_0 elephant Z5RKMhlNHEE_0 person Z5ZBRI0sc4Q_0 bicycle Z5iJRTvm-Kw_1 person Z5iV683VDk0_0 person Z5ls93B1bBk_0 person Z5mQ_0ttu74_1 elephant Z5mQ_0ttu74_2 elephant Z5yNMm-TIjI_0 bus Z5zGHZ82r9A_0 person Z53B8-gR640_0 person Z6BVtmEMfkI_0 person Z6FikDWrKkA_0 person Z6MfvYa9hCs_2 car Z6MfvYa9hCs_3 car Z6PyYboRq5c_0 dog Z6Q3LdMwgi4_0 cat Z6WrlM4ZZKA_0 person Z6j-7La25S4_0 person Z6j-7La25S4_1 person Z6j-7La25S4_2 person Z6k1unwmsfA_1 person Z6sd800eFC4_0 person Z6tGpP8q53A_9 elephant Z6tGpP8q53A_2 elephant Z6tGpP8q53A_4 elephant Z6vCDHs6NrM_0 person Z6yNyxXPPOw_0 elephant Z60iXtKpGMQ_0 bus Z61B0fShfbs_1 cow Z7AqkWEBwV8_0 person Z7DGMMQP79U_0 cat Z7I8r1AqMhU_0 person Z7JHCdt48hA_0 airplane Z7KEzuE_7hQ_0 person Z7LfnFm4OHs_0 person Z7WaJYiX_1o_0 person Z7WaJYiX_1o_1 person Z7bMdjLGiAo_0 person Z7eGCBjkKrU_0 dog Z7gxE6ZSQXI_0 airplane Z7iq45DtCTM_4 horse Z7iq45DtCTM_5 horse Z7zeXJ5lJRY_1 person Z7zeXJ5lJRY_0 person Z72sIqrQAF4_0 skateboard Z74EGXvFjFM_0 person Z76Y_PNOgK4_1 person Z76Y_PNOgK4_0 person Z78P87kjtu4_0 person Z8CXvEObu4c_0 dog Z8NfZN7WDKw_0 person Z8Oi5HJEyS4_0 skateboard Z8k0TTq5BC8_0 horse Z8s-Kg1PuSg_0 horse Z86E7eIS9t8_1 airplane Z89mG68LE2k_0 person Z8942_IPiTo_0 bicycle Z8942_IPiTo_2 bicycle Z9SwanypLJM_0 bear Z9SwanypLJM_1 bear Z9XS4cvVVy4_2 person Z9awHnw5J4o_0 truck Z9bt3xT5dCc_0 cat Z9f--QLEQqI_1 motorcycle Z9jDpr533Cg_0 cat Z9o5BEm1UeI_0 person Z9pHCguAO5c_0 person Z9wO9tftNG0_0 bus Z9x_cPvKErA_0 person Z98EscJ1IG8_0 person Z98GFnZo-LA_0 person Z-I0S45eRT0_0 person Z-J0UQfvb5M_0 person Z-MvTXpMdm4_0 truck Z-PMnTjqAS8_0 person Z-QO3lrbh7c_1 skateboard Z-VVWO3Ovgs_0 person Z-djkrj-5Cs_0 horse Z-glDeBd2xA_0 boat Z-lrIzXr9ck_0 train Z-mTl_ipVa4_0 umbrella Z-mXYrvubn8_0 dog Z-zy-BzjLT0_0 motorcycle Z-zy-BzjLT0_1 motorcycle Z-7W_lh96xg_1 airplane Z_JXyC6v_-s_0 person Z_KItWz0mTI_0 elephant Z_PViIzihe8_0 person Z_QVuM8wEmQ_0 person Z_QVuM8wEmQ_1 person Z_kPrUEqYXE_0 bird Z_p4gYNjwG0_0 person Z_85vV3FHUg_0 person Z_85vV3FHUg_1 person aACqXYewohQ_0 person aAI7SN5_3CY_4 bus BAhHrnCKvcM_2 boat BAhHrnCKvcM_3 boat BAhHrnCKvcM_5 boat BAmy5TQke7w_0 person BAnfbsB8rIY_0 bear BAnn4L-iNLE_0 person BAq_fnyQ6z4_0 person BA4ZGv8flRA_0 person BBCBbdz3Qvs_0 dog BBCBbdz3Qvs_1 dog BBLAyHVLHh8_0 person BBOd-YBAUgw_0 bicycle BBPlqTbAphY_1 person BBQ2xu9OehQ_1 dog BBS5owVJaTU_1 skateboard BBS5owVJaTU_0 person BBVPb5z0x7k_0 cat BBXs1J4j2mA_0 skateboard BBdA1qc9H-g_0 skateboard BBk7ZnOEjMA_0 person BBopEl_n3Fc_0 person BBpFu8j2fBc_0 bus BBpFu8j2fBc_1 bus BBqTHwpYeEc_0 train BBrfgTTduuI_0 person BB9l_znmPls_0 umbrella BCBCK2k2Bdw_0 person BCBgjRWuOcA_0 person BCGB6zaBDpg_1 person BCGB6zaBDpg_0 person BCI91i3aEek_0 motorcycle BCJbf6um28s_1 airplane BCKVauIBDFM_2 bear BCin0MjzM8Y_0 cow BCoTKGNhMVw_0 dog BCoTKGNhMVw_1 dog BCo8e6n2dYQ_1 dog BCqYnyGIols_1 bicycle BCsmPvRqaNk_0 person BCuzA73UTl4_0 person BCwAdqAouFU_0 boat BCwyoTwckSE_0 truck BDFBV8JbIF8_0 person BDFVkc87amI_0 person BDHUAJn9nnc_0 person BDHsXkbkS-w_0 skateboard BDOemJGz04I_1 person BDcTOMebCHs_0 person BDcTOMebCHs_2 person BDcTOMebCHs_1 person BDdIKtFwnjA_1 train BDdbk3ZQrP0_0 cat BDdhenNSY9o_0 person BDk-BklqSdI_0 person BDroGke9Ogg_0 horse BDroGke9Ogg_2 horse BDtGFVFexaU_0 person BDzXi4ukhN0_1 person BDzXi4ukhN0_0 elephant BD30MTvTuYU_0 person BD7TQWBytfQ_0 knife BEArUGKSB-Y_0 train BEArUGKSB-Y_1 train BEKMcritl6M_1 person BEMcwkY2beQ_0 person BERvmKL4Glc_0 person BESdHwoIDsA_0 dog BEUB64a3AIY_0 elephant BEUB64a3AIY_1 elephant BEYy-ZRSWSk_0 skateboard BEa_8wp0528_0 cow BEqG56tHTEI_2 bus BEqPniAgjaY_0 cat BErty5GnulU_0 person BEuXjB1zLeE_1 car BExSp8l17GY_0 person BExlFv0scM0_0 person BE10HJUHUHw_1 person BE8KS4PZH54_0 elephant BE-crlUXSSE_0 dog BFC3DWxOces_2 airplane BFC3DWxOces_1 airplane BFC3DWxOces_3 airplane BFC3DWxOces_4 airplane BFC3DWxOces_5 airplane BFJ4v-XlKAg_0 skateboard BFPQCoJqTRk_0 person BFeIwErwdS8_0 person BFeIwErwdS8_1 person BFggPKKt6wk_0 person BFggPKKt6wk_1 person BFhh8z0Fmk0_0 person BFponHgVsdA_0 person BFs239KuGa8_1 person BFxUyTrqZhU_2 horse BFxUyTrqZhU_4 horse BF4YTMGtDs8_1 skateboard BGAQlsAiJ_0_0 airplane BGAQlsAiJ_0_1 airplane BGAQlsAiJ_0_2 airplane BGAQlsAiJ_0_3 airplane BGAQlsAiJ_0_4 airplane BGAQlsAiJ_0_5 airplane BGAQlsAiJ_0_6 airplane BGLM4yl_Ka4_2 horse BGO3DBbNozc_0 skateboard BGR1gMrCTpA_0 person BGT-p0CgoFg_1 person BGW9SDHTWKY_1 person BGW9SDHTWKY_0 person BGee3Ar-Fbg_0 airplane BGpx9Xow9Ew_0 cat BGqNnzNtWkc_0 person BGq6TeZHkLU_0 elephant BGshZfVDb5w_0 person BG4QyYPKYvg_0 person BG4QyYPKYvg_1 person BG_x-4YUtFE_0 dog BHA5UUg4lCw_2 train BHH2sTfHwks_0 person BHH2sTfHwks_1 person BHPSyq8L5S8_1 person BHQkdwmXrtI_1 skateboard BHQkdwmXrtI_2 skateboard BHYrJ1yaM-w_0 car BHdbqcxv3Vw_0 truck BHfXgxJCcrw_0 boat BH5fxWFpHvE_0 airplane BH5npOcPlY0_0 car BH6nqU68dWo_0 person BH74QV_0vtc_0 bird BH9Ob6Uiw1w_1 person BH_SlBCiQ_8_0 person BIETPRRGGgY_4 elephant BIETPRRGGgY_5 elephant BIIU36E15Vo_0 person BIMggdk7AHQ_0 cat BIQeL2o_Ogg_0 person BIUQ935UkDo_0 cow BIVLmUTNYbk_0 person BIV-1bNQ7pI_0 skateboard BIfqcruNiic_0 person BIkDAHYmcFw_0 person BIkDAHYmcFw_1 person BInC--gFqHM_0 person BIvTK9qvP1w_0 skateboard BIxCP9ck4-8_0 cat BI5i3aDb_FQ_1 person BI-kr0tFSDg_0 person BJIZYdOZHzg_0 umbrella BJK_SXpLtnI_0 bird BJMP05du3Eg_0 person BJQstPOa8Wk_0 person BJS2YLbErJg_1 person BJfRrRcfmF4_0 skateboard BJf9nFjqLvg_1 bird BJlcWhfsg_g_0 person BJriJT6zJl8_1 skateboard BJwoZcHbBK0_0 umbrella BJ05o1_UKzw_0 dog BJ44CIPaDf8_0 person BKAo6GZ_kNs_0 train BKTCaKgjiag_2 person BKUKi0vTt0A_0 person BKdSO_PNJ4U_1 person BKdSO_PNJ4U_2 person BKdSO_PNJ4U_0 person BKl0wLRzoD8_0 person BKw9UQxZ3a8_1 horse BK-rIrwen6U_1 motorcycle BLB0F-XD8IA_1 person BLB0F-XD8IA_0 person BLEdcnrUmEo_0 cat BLE9cZ8L3a0_1 skateboard BLFYe-dU9ZU_0 airplane BLO7KJUu8t4_0 elephant BLSwwE9mtTQ_1 knife BLcOGv-0-dc_1 dog BLfmgLou27o_0 cat BLvowRU6z7s_0 bird BLxsg2_sjDM_1 person BLy6RcifNl0_0 bus BLy6RcifNl0_1 bus BLy6RcifNl0_3 bus BL6tcorHrT4_0 bicycle BMH2ReDeKuc_0 person BMUnKa8FUGQ_0 person BMUnKa8FUGQ_1 person BMavrQABR1Y_0 person BMa4xJ1U3Zk_0 person BMbZc-jxEfo_0 person BMbZc-jxEfo_1 person BMfsf9tDz8o_0 cow BMfsf9tDz8o_1 cow BMhy1f7EuXM_0 elephant BMptIGI1Il8_0 car BMuO2fjJoOw_0 car BMweJTmvCBg_0 person BMweJTmvCBg_1 person BMypDovEOEE_0 person BMypDovEOEE_1 person BM0QiiStqd8_1 skateboard BM6XrBQQ7NE_0 person BM6609PpfO0_1 person BM6609PpfO0_0 person BNGDM8sFM8Y_0 person BNIVhG5pZh8_1 dog BNJwAx3eUKc_0 person BNK68rC7RdI_0 umbrella BNTS3OPHAP4_0 horse BNXKRPSr66c_0 person BNXKRPSr66c_3 person BNXKRPSr66c_1 person BNXKRPSr66c_2 person BNbPQGMLs2w_0 person BNbPQGMLs2w_1 person BNbSUPI8feg_0 person BNcj3161E9o_0 person BNeWUyqXAC0_1 airplane BNmMB68b1PA_0 person BNnVfaIfBx0_0 airplane BNnVfaIfBx0_1 airplane BNyK_4tt2fg_0 car BNybc47kPjg_0 person BN1HT0FOOhI_0 dog BN7YfmbYuVs_0 elephant BOE82LEqzWw_0 cow BOF3tFvEu0o_0 person BOHE8JNUcQc_0 boat BOMeyjZNH5k_0 bicycle BOQiuL9QlIo_1 person BOUcPea33eY_2 skateboard BOfgzvAgVQw_0 bus aAMhdGuR5DE_0 cat aARa5-CLhG8_0 person aAVaqjgY1m8_1 person aAZ2fVjhcIE_0 person aAj0EN1Rnc0_0 bird aAj0EN1Rnc0_1 bird aAlTiBaLr8M_0 person aAmVIu8X7p4_1 person aAma36YlaAo_0 zebra aAsr-Rf6rEE_0 person aAsr-Rf6rEE_1 person aAuz7EfR_fU_0 cow aAyTLM_PmzA_0 skateboard aAzpA1iK_bE_0 person aA0FrWtkjXk_0 person aA3okCsYx6Y_0 bird aA5DYzky6o4_0 cow aA8Tz4nZ99g_0 person aBBtHXQoEtM_2 person aBBtHXQoEtM_1 person aBQm5kN1TfY_0 cat aBexNnNkORk_0 airplane aBq4NF1upak_0 person aBvvXrP1BJs_0 person aB-tGXFmyFU_0 person aCQAel27T4o_2 person aCSzhpU1heQ_0 cow aCXfvvg8CF8_0 airplane aCiDDC9KFS8_0 motorcycle aClye1Ctc9E_3 truck aCl98J6O9Hk_1 person aCuXZ3LmfSo_0 person aDGpg2xtDk8_1 person aDRE08tF2Wc_1 bus aDTQRnSeu_E_0 skateboard aDTTYd0Z5Vk_1 person aDjhOS5Xa9Q_0 boat aDmLwCb_o30_0 dog aDtJSv7XR90_0 car aDte-e70l7U_0 cow aDte-e70l7U_2 cow aDte-e70l7U_3 cow aDt4Puik-kU_0 horse aDwTy9yiOms_0 umbrella aDxRlCI40wo_0 person aD2q00X0-eg_0 person aD2q00X0-eg_1 person aEJy28mvKPk_0 person aEJy28mvKPk_1 person aEMPa2NvIl4_0 horse aERed6pg_h8_0 person aER-VrHLWwY_0 person aER-VrHLWwY_1 person aEZ9vBpXNKU_0 person aEw_vtKlegE_0 elephant aExRtJpfZEs_0 knife aE1veVneq04_0 person aFC2Zy2-0dY_0 person aFFKeUdtPcQ_4 knife aFL2V522q9A_0 person aFZ03eEOZFE_0 bird aFbVlCimys8_0 bird aFdPuo5xB-c_0 person aFhKp8gVZSE_0 person aF86vrld8V4_0 person aF-CmWo8ooM_0 person aF-CmWo8ooM_1 person aGAB6WQFklc_0 person aGE8AphnkNU_0 knife aGGiVuwB1p8_0 bear aGY3LCiYRnQ_0 motorcycle aGgnovv6T3U_0 dog aGgxdwCpAN0_1 horse aGhNzJSHCOU_1 knife aGmxZatPe60_0 person aGmxZatPe60_1 person aGuWVv6XS8Q_0 person aGuWVv6XS8Q_1 person aGwPRbsru-4_0 cat aGxOl5SXjtM_0 person aG1c8x5Dl-w_3 bicycle aG1c8x5Dl-w_2 bicycle aG1c8x5Dl-w_4 bicycle aG20iwkTd_o_0 person aG6D_te6V3s_0 person aHEFx7Zz6E4_0 person aHb4yEpCinw_0 truck aHiGSUMMfBQ_0 person aHnMWEvjLzI_0 car aHrTcxckS-A_0 person aHrTcxckS-A_1 person aHsgQAyd8ss_0 person aH2ZxImdwaU_1 motorcycle aH2ZxImdwaU_2 motorcycle aH5Cd20kdJw_0 elephant aILjXrLJpHw_0 umbrella aIQf8LQ5QPU_0 person aISEbZGZH68_1 car aITryMUZ2b8_0 person aIUYT8pblHs_0 truck aIU5E5tHvdc_1 person aIVWVNBI-n0_0 elephant aIcFi8LMv0w_0 airplane aIjLf6T_K3o_1 bear aIoZO3mu_tQ_0 person aI311E3BWwI_0 elephant aI7axTZFW4A_0 truck aI80ysvYFG4_0 person aJChqX9Ki8A_6 airplane aJChqX9Ki8A_1 airplane aJChqX9Ki8A_2 airplane aJChqX9Ki8A_5 airplane aJN9lRsvUv8_0 person aJQ9scZQmz8_0 person aJTABCCQtK4_0 horse aJYmkpuijrk_0 motorcycle aJYurtxV0Og_0 train aJYurtxV0Og_1 train aJcPyWppCcI_0 motorcycle aJgpAyFnpeI_0 cat aJ0dUcEIE_U_0 person aJ1SzcgNcxI_0 cat aJ8w4L7E368_0 person aKLf2yC2diM_0 car aKMqeCkIJSg_0 person aKOMIxz2RsM_0 person aKOMIxz2RsM_1 person aKiwOUy71Lo_1 person aKiwOUy71Lo_0 person aKqrwq-Sigg_0 skateboard aKtBD-3wFMA_2 bear aKtBD-3wFMA_1 bear aKu-1-TFl1g_0 knife aK-rgio7orw_2 bus aLDq7roX-SU_0 cat aLFDqtBMblI_0 cat aLFxGnCM1zs_0 person aLIa7x90hQc_0 person aLUSnANtUlE_0 airplane aLX9cIe12C8_0 skateboard aLZAMgiWcXk_0 bird aLZ0lbLzg8Y_0 person aLZ0wCY2j2s_1 person aLeeoZ1uVcc_0 boat aLjomcNk9fc_0 person aLj4N9Tp6C0_0 skateboard aLj4N9Tp6C0_1 skateboard aLo-gekX9j0_0 person aLo-gekX9j0_1 person aLuNNRUC09A_1 bus aLuNNRUC09A_6 bus aLvCIWJQJbY_0 car aLvg1CWrY0Q_0 truck aLxJ8T4CFuM_0 person aLzL_Gldhzk_1 person aLzhO0EqNcc_3 horse aL6H2Jatw0k_0 cat aL70_drPJtA_0 train aL8hELYDnTc_0 person aMAKznXul5M_2 knife aMAYLrcEnZY_0 bus aMAeSegIdJg_0 person aMAeSegIdJg_1 person aMHtvIvWTBU_0 bear aMNbQ1Cl5GY_0 motorcycle aMRtQFBcLNM_0 person aMX0jhSq6UY_0 person aMb78Ixlbfw_0 skateboard aMqHsdXJ7UU_0 person aMzZxN9uvMc_2 horse aNB5rIhRL7g_0 airplane aNEpBEnAUhw_0 motorcycle aNF18KgxGHA_0 skateboard aNJuTWrnIfo_0 person aNJuTWrnIfo_1 person aNKleFpxS4M_0 person aNKleFpxS4M_1 person aNNWNDoOM_4_0 person aNNWNDoOM_4_1 person aNOXvvKZ3qU_0 person aNZMe4tov6w_0 cow aNdJrRu4imo_0 person aNjs-khPjiU_0 person aNj1xwowXYU_0 person aNqkQnGfWEc_2 skateboard aNqkQnGfWEc_0 skateboard aNwIHwPqFPc_0 car aN4Na3OaY4I_0 bicycle aN4NmH-GafU_0 person aN770kOQCD8_0 person aN82X1hXgEE_0 person aN82X1hXgEE_1 person aN9XAd7-rzE_0 person aN9XAd7-rzE_1 person aN_3Pwk-7oY_0 person aOHPVt_93RE_0 bicycle aON6RKmi-YQ_2 train aOPbvY62dMQ_0 airplane aOQ-8RoQYEU_0 person aOQ-8RoQYEU_1 person aOQ-8RoQYEU_2 person aOW81s5KlyA_0 person aOcGv3kcyhg_0 bear aOcGv3kcyhg_3 bear aOjjUIWuG6Q_1 elephant aOp2NlwNeoY_0 cat aOz0l6mLHmA_1 dog BOlBcGufEU8_0 person BOlBcGufEU8_1 person BOmgqlRxGlM_1 person BOmgqlRxGlM_0 person BOnvGIZd58M_0 person BOowRuwiNhU_0 person BOowRuwiNhU_1 person BOr7CffDWEU_0 person BOsNz8L3PXI_0 person BOtfIOm5kag_0 dog BO1T_-iFGdM_5 bird BO1T_-iFGdM_2 bird BO1T_-iFGdM_3 bird BO3UKxe7nyo_0 person BO5EdP_PO9M_0 person BO7sWBaaL7g_0 person BO7sWBaaL7g_1 person BO-3uvHhUdI_0 person BO-3uvHhUdI_1 person BPBBMIdFoiE_0 person BPEwUVhfaOk_1 knife BPVpq7UrI-k_0 person BPX5EquoyCU_0 motorcycle BPX5EquoyCU_3 motorcycle BPX5EquoyCU_1 motorcycle BPX5EquoyCU_2 motorcycle BPiWTYUA7eI_0 person BPjkQ-lEqcw_0 person BPrrZpiDdo4_0 cow BPsTDg4C4o0_1 person BPsTDg4C4o0_0 person BPxPfFzwlQA_0 truck BP-GGAbCOhE_1 bus BQDxNNWRtas_0 car BQDxNNWRtas_1 car BQEzj9pP1SU_0 person BQIO94PF6RE_0 person BQIO94PF6RE_1 person BQVcvMWyWpU_1 person BQZGptzIdjE_0 cow BQgPk0vRreM_0 bird BQgPk0vRreM_1 bird BQgPk0vRreM_3 bird BQgPk0vRreM_6 bird BQgPk0vRreM_9 bird BQh5Ib9nynM_0 truck BQtDUi4BxRg_0 person BQwLGv7fgQg_0 person BQxCcefrjSk_0 cat BQyowuIZqFQ_0 person BQzzKQ9ejzw_1 knife BRCb183ELe0_0 person BRHPsi_0nTg_0 motorcycle BRQiSnowTss_0 horse BRVNuDR5WzI_0 cow BRcQS0dQqEU_0 car BRfegSv5VEk_0 person BRfegSv5VEk_1 person BRi_AMaK3kc_0 dog BRjvUtQdukg_0 horse BRlWBt4WHdU_1 horse BRnsmPzoEsM_0 skateboard BRtCCpXG_N8_1 elephant BRt1o8xqxFs_0 person BRt5hLASRMU_0 bird BRxrw0-skYM_0 elephant BR0SGq2ioqU_2 train BR0SGq2ioqU_7 train BR1gOlJPEdk_2 elephant BR8cOV8KYX4_0 person BR-XwELzLV0_1 dog BSDy_dzOSS4_0 cow BSHg9I0V6Yc_2 bus BSJgV2iO0jc_0 person BSOCno_3bfI_0 person BSSyaPq1EoM_0 train BSWNCcyXeR4_1 horse BSWpwtIPQ9U_0 elephant BSWpwtIPQ9U_1 elephant BSWpwtIPQ9U_2 elephant BSWpwtIPQ9U_3 elephant BSqz3i60KPw_4 bicycle BSqz3i60KPw_1 bicycle BSqz3i60KPw_2 bicycle BSutEBx3H4A_0 truck BSvCnoryvn4_0 elephant BSyxB7X9SH0_5 truck BSyxB7X9SH0_7 truck BS1lexD0ugY_1 person BS1lexD0ugY_0 person BS5mJ0Y7Rys_0 person BS-S0nYSwkQ_0 person BS-S0nYSwkQ_1 person BTBmlFGHK-8_2 person BTBmlFGHK-8_0 person BTBmlFGHK-8_1 person BTKLizyvgcA_0 person BTR83oP1vpo_0 person BTlwglCdzOk_0 elephant BTpBteZfK7Q_0 cat BTxSuijXVPY_0 person BTywlpNCABw_0 cow BTzWqg8vHQI_0 car BT9sKGDb0Qw_0 train BT9sKGDb0Qw_1 train BUF45g7KGB8_0 motorcycle BUX8raEGFZk_0 dog BUX8raEGFZk_2 dog BUX8raEGFZk_3 dog BUY-_l8_v9s_0 person BUZ7x7JaQ1k_0 person BUrMlyUBryI_0 horse BU4SnrK9UiY_0 horse BU4SnrK9UiY_2 horse BU4yiA6qKAQ_0 bicycle BU5PaU-UTss_0 person BVAi_zqhIeg_1 person BVCe2emxuTQ_0 horse BVFYmsvoNTA_0 cow BVS5Q8eBmRs_0 person BVWEvs3lq0Y_0 person BVWEvs3lq0Y_1 person BVXMpcHTg80_2 motorcycle BVm9KRW0iu8_0 motorcycle BVo3XdFnAJM_0 horse BVxr6TGFsMQ_1 person BV5tXmVwddI_1 person BV-UtDJNS2w_1 motorcycle BWA5eWlt6Lg_0 car BWFYpOE-8yo_0 person BWcaU8lR4rM_0 person BWdhK5cwgt0_0 bus BWjRZ-aKRX4_1 person BWlnPrI8FLk_0 person BWnFU-Li_8E_0 person BWn3QGOyZJc_0 elephant BWn7EPWkJ2I_1 bear BWp2oVJMG1A_0 person BWqYVuIKaNA_0 person BW5r0Kv6h2U_0 boat BW56O_QhBmc_0 person BW7uP0jcst8_0 horse BXA3uMFAA9M_0 cow BXCd65rDsk4_0 dog BXCrD4eGGWw_0 person BXHktSPnW24_0 person BXTGSkuESqU_0 person BXUL3aLVZM4_0 person BXWXLNGacmc_1 motorcycle BXWXLNGacmc_0 motorcycle BXdMv9s3Rtw_0 person BXiQhR0Zj70_0 person BXrwbMjK_ZU_0 train BX8AJD8uL3U_2 person BX-SAZsC6yc_2 knife BX-SAZsC6yc_4 knife BYQfvvAP9rY_0 person BYRNeh3RRZs_0 person BYS-DmtMpWE_0 cat BYVhHLCSZ_M_1 dog BYYakMVK6Ko_0 person BYi8dYVDYak_0 person BYkytpBqzHQ_0 airplane BYq45niURL8_1 truck BYq45niURL8_0 truck BYud6fy8t8A_1 knife BYud6fy8t8A_0 knife BYud6fy8t8A_2 knife BYud6fy8t8A_3 knife BYxg5sQjvQ4_0 person BYyATiWsxZs_2 car BYyATiWsxZs_0 car BYyATiWsxZs_1 car BYyrXwDFF5U_0 person BY0XhpATtuI_0 umbrella BY2Fs4KDDbU_0 motorcycle BY7KYQ_Qf3Y_0 cow BY8mmPl_K_A_0 person BY-5sA1BbFE_0 dog BY-5sA1BbFE_2 dog BZDa7e9EFvI_0 knife BZERyxrpvg4_1 person BZIzw3XdAgI_1 person BZI3ovXxotQ_0 knife BZeIe9Nkb1E_0 cat BZgZ1H4t3hQ_0 person BZgxjWSM7Vc_0 bicycle BZhfYzqKuu8_0 person BZkYWI_qxz4_1 bird BZldivEoOo8_0 person BZli_iMMV8k_0 bear BZli_iMMV8k_7 bear BZ94WX4wHn0_0 skateboard BaDQg_CCQpU_0 person BaDQg_CCQpU_2 person BaHS1WcgbbE_0 bird BaHS1WcgbbE_1 bird BaJTQLa-vuU_0 person BaOQYsYuC6A_1 elephant BaRsW_taGVY_0 cat BaWQb_lSjYs_0 train BaYLeM_yk_Q_1 skateboard BafH7BetIyk_0 person BakCr5HeDNE_2 boat BakCr5HeDNE_0 boat BauKE-faLzM_1 person BavQVUFfmBU_1 person BavQVUFfmBU_0 person BavoG7kb0wo_0 car Baxc5TW06FU_1 knife Ba1sC-X1OF8_0 person Ba1sC-X1OF8_1 person Ba2T3joy6BQ_0 person Ba3CWVKFpBE_0 boat Ba5BO-nvDnE_1 horse Ba-SiAqH09k_2 truck BbAdBjyFFEA_0 bird BbAdBjyFFEA_1 bird BbAdBjyFFEA_2 bird BbEfZ9mUKOY_0 cat BbOabnT5V-E_0 person BbQyfmZx-2Y_2 bear BbRarKH6D_Q_0 horse BbYZ7Ee3Ixs_0 person BbYqjT1OzLY_0 person BbYqjT1OzLY_1 person BbfOXQD21Ac_1 motorcycle BbnSU5sRdBs_0 person BbnxzNL5tMk_0 person Bbq8h83cFE8_0 person Bbu_YM_GBG4_3 bird Bbu_YM_GBG4_0 bird Bbv9Y9Goufk_5 elephant Bbv9Y9Goufk_0 elephant Bbv9Y9Goufk_1 elephant Bbv9Y9Goufk_2 elephant Bb4uwSjmtKk_2 bird BcHl4OuJLT4_0 person BcHl4OuJLT4_1 person BcSXX5O_YDw_0 bicycle BcVn38vI_Zk_0 person BcV5QdDIrMg_0 person Bcg-TsdpO-Q_0 person BcjVHV-6WWM_0 person BcjZaclf1m0_3 bird aO4uLNN4Gt0_0 bear aPCEyodWBU4_0 person aPPUf7JUJRo_0 person aPf5SoOgmhQ_0 motorcycle aPheJtUTSps_1 boat aPm89i_7aKs_0 train aPm89i_7aKs_1 train aPswSvCaFDQ_0 elephant aPvqWgeR03U_0 person aQAieL0LKIo_0 horse aQB2gAnqQi0_1 person aQGQKDLwRqM_0 person aQVn7fJi_l4_0 cat aQaKnTZ4hDg_0 person aQfQqr5W5uI_1 truck aQfQqr5W5uI_2 truck aQfQqr5W5uI_4 truck aQlLjT95Hgs_3 horse aQub6VGWKzQ_0 car aQzKS5Sn9u0_0 person aQ1c75hfANo_0 person aQ6larydXgI_4 elephant aQ6larydXgI_0 elephant aRBWB79BIIg_1 umbrella aRHGn50eToQ_0 bear aRQQ75s9Ni4_0 boat aRRUAfurxVU_0 person aRcw_PTSf4o_0 person aRdAN9jVvqQ_1 dog aRnJ4lIPIL4_0 bus aRueDRgWEOs_0 truck aRzwrPXsTRI_0 truck aR6P3PtMIZc_0 person aSDuIU0pzYY_0 person aSH88cb0kww_0 person aSMzQpOjAc8_0 train aSUtY_pSN0k_0 bird aSWGbO-Nfcg_0 train aSWGbO-Nfcg_1 train aSb-LY3vBsg_0 giraffe aSkBoJ55w2Y_0 person aSqwAZJaQIk_0 bus aSqwAZJaQIk_2 bus aSsjyvISV94_0 train aSw1yhbXHuA_0 elephant aS2Zw7-j7p4_0 car aTBr31jkThQ_3 bus aTOn74Inw24_0 bird aTR3FylgTkA_1 person aTR3FylgTkA_2 person aTS8hur_yyo_0 person aTcDiEXEhhk_1 horse aTdIOtWasSE_0 person aTeFjqoG9fM_0 person aTeFjqoG9fM_1 person aTj38bNIsQo_0 cow aTvgsqSb5aA_0 person aTvoRXrEvG4_0 bicycle aTvoRXrEvG4_2 bicycle aT3idINTybY_0 umbrella aUFHlj5AVrU_0 person aUNlQPWMFHo_0 car aUQh47P34C0_0 person aUQh47P34C0_1 person aUX-HZraWQs_3 zebra aUh41vv5vdE_3 train aUh41vv5vdE_0 train aUh41vv5vdE_2 train aUv4LjbJxLs_0 bus aU5AZMYHZ2o_0 dog aU5tePXE5qE_1 elephant aVFbcdQrobU_0 person aVGtibXVt40_0 train aVMpwmT7ojA_0 truck aVPIHMyNEw8_0 truck aVZJ8qaxG3s_0 person aVif6Qc9Prw_0 cow aVknWcQimJA_0 bus aVm9jp_ttsk_0 elephant aVm9jp_ttsk_1 elephant aVm9jp_ttsk_4 elephant aVm9jp_ttsk_5 elephant aVm9jp_ttsk_6 elephant aVm9jp_ttsk_7 elephant aVm9jp_ttsk_8 elephant aVo-jvGoUGs_1 boat aVo-jvGoUGs_0 boat aVq4ezzbcTc_0 bird aVvuGEexwy0_1 person aVy9mhLlo5U_0 umbrella aV2_0JBmw8o_1 person aV7mSkydynI_4 bicycle aV7mSkydynI_1 bicycle aV7mSkydynI_2 bicycle aWCNGGW4Qew_0 person aWDtrDYqivs_0 person aWQxqFyyzng_0 cow aWQxqFyyzng_1 cow aWWMT0webCY_0 person aWWtWhgt_V0_0 cow aWYoUCAev64_2 bicycle aWYoUCAev64_0 bicycle aWcaF85RIM8_3 elephant aWgSKxQO5Ps_0 cat aWi51gAEIkY_0 person aWma4eTtHv0_0 person aWqBSBc-XpU_2 knife aWt13fGkYuA_0 cow aW9D5rT3GCo_0 bear aXFFLOGR_yI_0 person aXFgCWZLFj8_0 horse aXFgCWZLFj8_5 horse aXFgCWZLFj8_1 horse aXKbkyjRqkU_8 bear aXKbkyjRqkU_0 bear aXKbkyjRqkU_7 bear aXOPdDTpvxc_0 person aXWkAKNw0Dg_0 bird aXXfrIsIqi0_0 person aXhd5BhT4hs_0 cow aXhd5BhT4hs_1 cow aXml5kCJyDY_0 skateboard aXml5kCJyDY_2 skateboard aXn1cwN8vng_0 airplane aXn1cwN8vng_1 airplane aXxKLf5m61g_1 person aXxKLf5m61g_0 person aXxPxBeZjQI_0 person aX0JOJY-BDc_0 person aX0JOJY-BDc_2 person aYCA7dz0nbI_0 person aYJzxhE8-Rs_5 knife aYPCTMucy6A_0 person aYgA8AxT0V4_0 giraffe aY1i2TADX0c_0 person aY1i2TADX0c_1 person aY1i2TADX0c_2 person aY4dOYabpbs_0 cow aY6lI7qO6kI_0 person aZF83PK7HKU_0 person aZF83PK7HKU_1 person aZGZbrCAFl4_0 person aZGZbrCAFl4_1 person aZHznZSD2uE_0 person aZJ_vArnOC0_0 cow aZL_n-gon0U_0 boat aZT_v5WnLio_0 person aZVtxAF_Imw_0 dog aZZcXyRJwyI_0 person aZ4tzgju18s_1 train aZ-3jypmJiY_0 person aaAAXDB7ml4_0 elephant aaAAXDB7ml4_1 elephant aaA_qcyN3eM_1 cow aaBf3fxpR7E_1 person aaQjh2_8aVw_1 motorcycle aaQjh2_8aVw_0 motorcycle aaUXN-xWi1c_0 person aaWV0TEIbhM_0 skateboard aaWV0TEIbhM_2 skateboard aacFWGARp08_0 person aacLCDo8Zus_0 umbrella aacZc8VUtxg_0 bird aaoYsiVAFDY_0 airplane aas39xgvbfg_0 cat aatdoixvb4w_0 dog aazC6OJV2GY_0 person aa0jo00Yxz0_2 boat aa-J6xg9RH4_0 person abCu1bwDisA_0 umbrella abHvXnWduQQ_0 person abQ7YCx3QQM_0 train abbympAEM_k_0 cow ablCJGTLCow_1 elephant ablCJGTLCow_3 elephant ablCJGTLCow_4 elephant ablCJGTLCow_0 elephant able--ZWvkg_1 person abnCzyC9R28_0 person abpyt2p-uMg_1 bird abrKRGgLV0o_0 dog abrKRGgLV0o_1 dog abxcR1X4UIo_1 bird abxuxX4aHFI_1 horse abxuxX4aHFI_2 horse ab1RpuefUA0_3 bicycle ab2b2WA-fQs_1 person ab2b2WA-fQs_0 person ab2b2WA-fQs_2 person acDY2Ono9WA_0 dog acL58vxHnnc_0 person acOdf26jldk_0 person acYxvpS0b7s_2 airplane acZFDZif1ww_0 train aciCzrBQsM0_0 person acnOEnTXwJY_0 cow acnOEnTXwJY_1 cow ac4feYMso4k_0 train ac6NdTBtc6U_1 person adAkRe99CDA_0 truck adE0Nk3CKyI_0 car adKIteGSOIM_1 skateboard adY8EtfOO_w_0 train adcv2A70AoA_0 person adiBUyRiBfo_1 person adiBUyRiBfo_0 person adskAqVAdFQ_1 elephant ad2C17MGAEo_0 bus ad94BZD75ck_1 cow aeAjL4rCjIM_1 truck aeAjL4rCjIM_0 truck aeIzIOSHZek_0 person aeJKW7m42xo_2 airplane aeJKW7m42xo_0 airplane aeKckIdL0io_0 bird aeUVIIEtwdw_1 motorcycle aeUVIIEtwdw_2 motorcycle aeUVIIEtwdw_3 motorcycle aeUVIIEtwdw_4 motorcycle aeboOU_vdjo_0 person aeboOU_vdjo_1 person Bc2pPI9s8bM_2 horse Bc26F0eEyBg_0 person Bc5QvTVd-04_0 person Bc64C5jdZDg_0 person Bc7NXuSycR4_0 skateboard Bc-b4WhkWxw_0 person BdBZuvI8oak_0 truck BdBZuvI8oak_8 truck BdBZuvI8oak_1 truck BdBZuvI8oak_2 truck BdBZuvI8oak_3 truck BdBZuvI8oak_4 truck BdBZuvI8oak_7 truck BdB6NgtqioE_1 bear BdCnusBWLuw_0 bicycle BdC5wdGWMCw_0 person BdLMnBBX7rc_0 person BdQ8AC4jpkk_0 person BdR02myBXHY_0 person BdTRTQRbNqI_1 skateboard BdT2u0kYx90_0 bicycle BdT2u0kYx90_1 bicycle BdT2u0kYx90_2 bicycle BdT2u0kYx90_4 bicycle BdZOawocL-c_0 person BddRmrmaI6M_0 person Bd0JDJL6yXk_0 airplane Bd21KrWCyCg_0 cat Bd-WW1Hs9kk_1 train BeAD9m4Yu_U_0 person BeCQkxXRRww_1 person BeCQkxXRRww_0 person BeCmkGB-RCw_0 horse BeQWoctTF5I_0 bear BeQWoctTF5I_2 bear BeQupBkL2y8_0 train BeTu3Ag6XIw_4 bicycle BeTu3Ag6XIw_1 bicycle BeVqWRYzPkY_0 knife Bebzr4dP1Ug_2 person Bebzr4dP1Ug_0 person BedgXkpLAOs_0 person BefMC4f6Z3s_0 person BefMC4f6Z3s_1 person Befq3kL0E7o_0 person Begwn2Da_j8_0 person BepRWdKn0QA_0 cat BetAKo6E3rw_0 person BezlbA5t77I_1 person Be4NCK9GwQU_0 person Be4V9lpSpJw_0 knife BfIBlw1RkXc_1 truck BfJUkGEnxvE_0 person BfOXYUOsSf8_0 airplane BfSxTA9yZak_0 person BfT3bVAeXLU_2 boat BfWpLwfDFbc_0 person BffFognyZOA_1 skateboard BffFognyZOA_0 skateboard BfkXvdTkYF4_0 person BfkXvdTkYF4_1 person BfwHmAlZdKA_0 person Bf1cF3BfY18_0 person BgBDqhuoTr0_0 dog BgHvkS4H7w0_0 person BgamGCKlzTI_0 person BgbxYgCIde8_0 cow BggPqcJz12g_1 elephant BgjdCfaJfsE_0 elephant BglxBESIjlE_0 person BgsTkbznAjI_0 person BgwZN0Ui-Q8_0 person Bg0_DcQLOys_1 knife Bg3Zox43xGI_0 skateboard Bg4NtG5QkwM_0 person Bg_cKljiGGE_2 person Bg_cKljiGGE_0 person BhA7KMeJYAE_0 skateboard BhL184lkUcw_0 person BhPyQcTHRmg_0 boat BhXpOqm8Q5o_0 bird BhZl6ZTtKDo_0 person Bha-PhOr-bU_0 bird BhdcIu_nQYs_0 bus BhqZrCcQpD4_0 elephant Bh4QFujTqIo_0 train Bh5wIL7IE9A_0 person Bh5wIL7IE9A_1 person BiGYFhnDhMI_0 airplane BiQ4cYnaGPo_0 person BiYzQbOwhWY_1 train BiYzQbOwhWY_2 train BipPdxUV2PY_3 boat BirMOPf7k0I_0 knife BizSBnzOzy0_0 person BizSBnzOzy0_1 person Bi1KsDpJT8w_0 person Bi1KsDpJT8w_1 person BjGhd-Eq5ig_1 car BjGhd-Eq5ig_7 car BjJSECIrsd0_0 dog BjLJqIPSyUM_0 bicycle BjQO2ipch-w_1 dog BjRyA1cPxA4_0 cow BjZ9JRI_WkM_0 person BjbCdEHhCjI_0 person BjfwCDsBoeg_2 bicycle BjhITTFavAk_0 person BjiJ7HAaOj8_0 person Bjj4KdIbDBY_0 person Bjk2IA4thIE_0 bear BjogwheL3BI_0 horse BjpX2nla914_1 car BjqdFABBqxA_0 person BjqdFABBqxA_1 person BjraW0bXW-0_0 person Bj8lO8Jag3Y_0 person Bj9wPwHXNQo_1 horse Bj9wPwHXNQo_2 horse Bj9wPwHXNQo_3 horse Bj_fS2abD9o_1 bird BkFws1J8IM0_0 bird BkMb48QM-zQ_0 person Bkco3wJWvp0_0 person BkdBnU65i7Y_0 person BkdWJT3sWro_3 airplane BkdWJT3sWro_4 airplane BkfKa-zgphc_1 airplane BklBU6Epydc_4 horse BklBU6Epydc_1 horse BkoQ8_W4drM_0 umbrella BkteTGu81tQ_0 bus BkwpJBHM_DM_0 dog Bk3VbRagAwg_0 dog BlXhR1rRct8_0 bicycle BlfVNiQZtko_1 cow BlfVNiQZtko_0 cow BlhT8WFfI54_0 person Blj4FY__L6Y_0 person BllnWV-BIDo_0 bird BlqsGIq2hNg_0 person BlqsGIq2hNg_1 person BlzUBgB6BEc_0 person Bl-1081HLyM_0 motorcycle Bl--N1EQpuA_5 airplane BmCAiO-WNmE_0 skateboard BmG7dEBuS6s_0 cow BmHShiZ1Xus_3 airplane BmNwfiFBeRo_0 person BmNzw5vNQNI_0 skateboard BmRZWeMzQLg_3 bicycle BmRZWeMzQLg_0 bicycle BmSBpZrrEt8_0 cat BmXdIzhVZ0Q_2 bear BmZN0ljGa84_2 motorcycle BmfHrAPEMrk_2 person BmfHrAPEMrk_0 person BmfHrAPEMrk_1 person BmjBM58PfZE_0 cow BmjEEjKDJVI_0 person BmjLZgp38NI_0 cat Bm3l_RLjYpo_0 motorcycle Bm3wZ63Ymvo_2 motorcycle Bm7e-qOAcKQ_0 person Bm8qAGd91Gg_0 train BnADRMlWOsM_0 airplane BnNJUP6xfG8_0 bear BniJFr7IJRo_1 person BniJFr7IJRo_0 person BniJr-iCh9M_1 truck BnkIFwVPh8w_0 horse BnkIFwVPh8w_2 horse BnkIFwVPh8w_4 horse BnkU89Dq2IQ_0 person BoA6CUl4t70_0 cow BoGAxXRzHWs_0 cow BoLSvTrm3d8_3 cow BoNtUpvusGM_3 motorcycle BoNtUpvusGM_4 motorcycle BoNtUpvusGM_0 motorcycle BoNtUpvusGM_1 motorcycle BoNtUpvusGM_2 motorcycle BoOANS5_U9I_0 motorcycle BoPj2W_G2Qg_0 airplane BoYvNfndu60_0 skateboard BoZ3ZvdEZ4o_0 car BoZ3ZvdEZ4o_1 car BoiPpDeQ2mQ_0 airplane BomNEWAGolQ_0 person BomVU8_LL_Y_2 dog Bowyw_fhWZ8_0 person Boy5toMvMwo_0 giraffe Bo2qsQNYATk_3 skateboard Bo5bT8QP_Og_0 person BpDLFqS9EAE_0 person BpVyiSvjk4o_1 dog BpdZmCkSHco_0 giraffe BpjdKB7AJ8U_0 skateboard BpkMUQLoJUM_0 person BpoWgamMMro_0 cow Bp1zluIhHzc_0 person Bp4vXfVIVxA_0 skateboard BqBkvlijWKg_1 person BqDnDPIE18k_3 horse BqPcqKW3uAM_0 dog BqoRxXUz7q4_2 truck BqpA7iBOQ_s_0 person BqqPm3F1F_w_0 person Bq4id5zA48c_2 bear Bq_emgXftMI_0 person BrDdbgxB7qI_1 bird BrHDj1biLlA_0 airplane BrHDj1biLlA_1 airplane BrJiBbRF25U_0 person BrKgWUQnUWI_0 cow BrQNhzCKfxs_0 person aelph1Y8yPk_0 skateboard ae161Zq0QBg_0 skateboard afCYMTTgbMw_1 dog afD_y2ZEHn4_0 skateboard afLO-CD48TI_0 motorcycle afLO-CD48TI_1 motorcycle afWl3lTglsw_0 person afbS6cTlE5Q_0 person afu5-raaJEc_1 elephant af9Z_LR-L7M_0 person af-MtTvmPic_0 person agFlIZmS0zU_0 person agF_eyIgF3g_0 person agGuxSx4UdI_0 motorcycle agIme93Q6WA_0 person agMdtESL5kE_2 cow agSpfpV4EsQ_0 person agVHBb-qLAw_1 bus agWS48KnYWk_0 motorcycle agXPzkjMl4c_0 bird agYR35aJ1no_0 person ag1ohTMq9Iw_0 car ag5Gy7ZNbfw_2 knife ag5Gy7ZNbfw_3 knife ag6NY6nrTvw_0 bear ahE37MgcoUs_0 person ahMgOG4Bpcw_0 car ahQD9PpYoqE_1 train ahYD0J4XzC0_0 cat aheVwPx1egw_0 truck ahiO1CwoaY4_0 person ahnbyNWfvpM_1 cow ahsHWgQGPNI_0 person ahv6_xBxvmg_0 person ah03BOnPUqs_0 cow ah-2yN1cKOg_0 bus aiINQVIMx5o_0 person aiNcNIUbY3E_1 dog aiX8ymgR1g0_0 boat aiX8ymgR1g0_3 boat aierZPItkn8_0 bicycle aierZPItkn8_1 bicycle aiiN3X-f5Ss_0 person aiklFoEJX1Q_0 person ainWSZibSIM_1 bicycle aio5SboRXGU_0 person aio5SboRXGU_1 person aizJI68M2SY_2 truck aizJI68M2SY_1 truck ai1CTuarr50_0 bus ai3xYb_xvFA_0 person ai7WTyMnl1g_2 horse ai7WTyMnl1g_3 person ai7WTyMnl1g_0 horse ai7WTyMnl1g_1 horse ai9-_EMwk4U_0 skateboard ai_jmsLJTR0_0 person ajAuKSOFBKQ_2 bus ajAuKSOFBKQ_3 bus ajB-QUVDyXI_0 cat ajO4xx5beuE_1 bicycle ajPP5EY_nAo_0 person ajPP5EY_nAo_1 person ajPY1htweXM_0 person ajPY1htweXM_1 person ajtvjEY9TPA_0 airplane ajxcj5ovYdw_0 skateboard aj0Ll84jtZs_0 person aj0Ll84jtZs_1 person aj3UwQNtZPo_0 train aj6sqeG0k54_0 umbrella akH9ouIrOds_0 skateboard akIlFKpZAtk_0 person akOLIpAsxqc_1 person akQU-s0RCWE_1 bus akoVZ50spRM_0 person ak6iAVUNU7c_0 dog ak6iAVUNU7c_2 dog ak6iAVUNU7c_1 dog ak89dpHVmHc_1 person alAFNWeSJts_0 skateboard alDkqPNUFLU_0 person alDkqPNUFLU_1 person alKgZTVxcV4_0 motorcycle alX9MOY80Aw_0 person aluZTs_Ys8I_0 car alvKKzlOBKM_0 person alzWhOivD0E_0 person al2Vh0In4HU_0 bear al2Vh0In4HU_2 bear al2Vh0In4HU_3 bear al8Of2FWy80_0 cat al8vzWgNDbs_2 bicycle al8vzWgNDbs_7 bicycle al8vzWgNDbs_8 bicycle amIvXQ6aZkE_0 cow amL9Dar_hp0_0 person amTcWqrgBBg_3 airplane amjpcHzuYb4_0 person ams9MCDF15I_1 person ams9MCDF15I_0 person amvLPTONS1U_0 cow am-3XKJkCqg_0 train anAXVexurxo_2 dog anJbsuTwShw_0 person anLTttUpag0_0 skateboard anR9cuXRv6Q_0 person anWxwjzPRBA_0 person anYy3XNTTGw_0 person anZ9lxr24eY_0 person angay7OmUwA_0 truck aniCxSPm8Uc_0 car anlydfnmv7g_0 person annQpJsk6NI_0 bus anpsTMr_HIo_0 cat anrBShdHOz4_0 person anvk-OdKLBE_0 person anvngue8Qh8_0 cat anzrRzyYAAc_0 dog an-QcnhNhL4_0 person an-mFuTYuCk_0 person an_FRcZ669c_0 person aoBqV2Guvso_0 person aoDJu0KrrQs_0 motorcycle aoOJR-0sPM0_0 person aoSWWKtf8mU_0 person aohLKKJxjIM_0 person aoizdynEVYU_0 dog aoqMoScEfqE_1 horse aotBl0tvpFs_0 train aotBl0tvpFs_1 train ao9uUinn2WY_1 truck apKAwFA4oP0_0 bird apQKmVEucLQ_0 person apZAEWvk8XY_0 person apcgot45Ql0_0 person apdP6_tCdls_0 person apfZjUpoTy0_0 skateboard apfZjUpoTy0_1 skateboard apprUmnQTcI_2 cow aqGKBg0azPA_0 cow aqGp6tCGLOU_0 motorcycle aqKiwfY3Oqc_6 bus aqKiwfY3Oqc_5 bus aqKiwfY3Oqc_7 bus aqNz8TCica4_0 zebra aqUHuS5ALXE_0 cow aqWN-Q0wDHI_0 person aqWN-Q0wDHI_1 person aqZfqhHJPLo_0 person aqdSuLpYlwQ_0 person aqe_mdIg6k0_0 person aqmie50AFwE_0 dog aq2UMxzwliQ_0 person aq50xKvuSFg_0 skateboard aq59B_-6ilw_0 person aq9Sfxn9vMg_5 knife aq-QzG14KJ4_0 person arFKRc7lAo0_0 person arFKRc7lAo0_1 person arPGoY7uh4E_0 person arS7aqpkAU0_0 motorcycle arT4jZLX8pg_1 knife arW0ZUPkah8_0 person arZ_mIhaJMo_0 cat are5LvOB2nQ_1 skateboard are9NykT9FM_0 truck arn0j0l_IWI_0 person artWKQTC7CQ_0 person artcASpzYrU_0 person arwZ6ZPJuN4_0 cat ar7TRjurXMY_0 person ar-fzXT8Juc_0 truck asT-GJNeJok_0 person aseOdDcbIRE_2 person aseOdDcbIRE_0 person aseOdDcbIRE_1 person ashHnkqFz7g_0 bicycle ashHnkqFz7g_3 bicycle asl-XTE0jsE_0 person asrDocOfGQE_0 car asrDocOfGQE_1 car asrDocOfGQE_3 car asrDocOfGQE_4 car asrDocOfGQE_5 car asrDocOfGQE_6 car astLiScyoaQ_0 person asx2CkH0O6I_0 elephant as1twjKe3Cw_0 skateboard as6Y3-EaaCg_0 person as6Y3-EaaCg_1 person BrgRnN_LBGk_1 person BrgRnN_LBGk_0 person BrhMkJ6n-hQ_1 train BrnBTne3NBw_0 bear BrnBTne3NBw_1 bear BroiAN_qtCI_0 person BrpRmX410DU_0 person BrrAlsmwDnk_1 person BrrAlsmwDnk_0 person Brrlyds8g1A_0 person BrwABvccCWs_0 person BrzEfM8nWCw_0 cow Br3M-xsvXFQ_0 person Br9CVteHFEc_0 person BsCH_ABy0WE_0 person BsRC5xbG6uY_0 person BsXphFpnOxE_0 bird BsXwLsR6dm8_0 person Bsv8dNYzPkY_0 bear Bs1rRAtP7bw_1 bear Bs3BPJZMD9E_0 person Bs94h8vMmwg_0 person Bs_9E_Rq524_0 person Bs_9E_Rq524_1 person BtFwcgeJjsY_0 person BtKVAhU1LdI_0 knife BtKl-iqkgoY_0 cat BtN0FlaISuY_0 person Bt19SM8BenY_0 person Bt41QF0ze6E_1 person Bt7B7nkGO_4_0 truck Bt7B7nkGO_4_1 truck BuFYI1vYj1k_1 person BuH65mVX5yM_0 person BuPWtDPEJ-0_0 person BuXvxclES0s_0 bird Buco16wWyFA_1 motorcycle Buco16wWyFA_2 motorcycle Buco16wWyFA_3 motorcycle Buco16wWyFA_0 motorcycle BufY7NdKUlM_2 motorcycle BufY7NdKUlM_4 motorcycle BunvBFXoGPg_0 bus BuqljdjPWWc_0 knife BuqljdjPWWc_1 knife Buumm7rgDPY_0 person Bu0gJwoDkRw_0 cat Bu5Bgr9asUU_0 person Bu_HdLSyLSI_0 person Bu_3ep-qAi0_0 person BvEAIc3hmkk_0 motorcycle BvHzGHjR6rk_0 person BvLCgNWIHfA_0 person BvLJZAhIR3A_1 truck BvTLdUcIH5I_1 person BvTbuvBeunI_0 airplane BvTjf9mG5MU_0 person BvZ8DqslB-U_1 airplane BvZ8DqslB-U_2 airplane BviGbtAujq0_0 truck BvrORC4d2yg_0 train BvrORC4d2yg_1 train Bv4rjfW9RsM_0 dog Bv9IXbrDYLk_0 bird BwDccOS7_vw_0 person BwIoxW7Ee8M_4 train BwUYR-ZnpX8_0 horse BwW4Fs1eTRg_0 airplane BwW4Fs1eTRg_1 airplane BwergWBqOOs_2 train BwgJmjOzlRk_0 person BwoTsoC3hvQ_3 horse Bwo1MaJvxRs_0 person Bwrh4q5KLVg_1 dog BwsHsSpS0dQ_0 bird Bw2RhmesY5g_0 person Bw5iwcbP4eM_0 giraffe Bw6f2OXYtSo_0 cow BxHIRvoGZMM_0 person BxMoEE7XwL8_0 person BxNE34BGZ-4_0 person BxQp3-SCUGs_0 person BxQp3-SCUGs_1 person BxWs9aINEEI_0 person BxWs9aINEEI_2 person BxWs9aINEEI_1 person BxYdU6vB2YQ_1 motorcycle BxaEaD7zeX4_0 person BxhktnvjtLA_0 truck BxmeqCev3Kw_2 boat BxmeqCev3Kw_3 boat Bxm3EvRZAI0_0 skateboard BxvlWueS9vA_0 motorcycle BxwmNnxcI7o_1 person BxzVlf9-SLc_14 bicycle BxzVlf9-SLc_4 bicycle BxzVlf9-SLc_6 bicycle BxzVlf9-SLc_8 bicycle Bx2YQSFETcw_1 person Bx4ELKBw9PU_0 cow Bx4ngxnRjvM_0 motorcycle Bx-is-dL1ko_0 person Bx_z_4bt8O4_0 person Bx_z_4bt8O4_1 skateboard ByBWtiJJNqk_0 person ByBWtiJJNqk_1 person ByFCiUvKd4E_0 cow ByFCiUvKd4E_1 cow ByFCiUvKd4E_2 cow ByJNGLp-Q1Q_0 boat ByRne1VtDow_1 person ByfeHjkm0NA_0 bus ByhpLi9sRUs_4 train ByhpLi9sRUs_5 train ByhpLi9sRUs_0 train Byn2Qo7ghaQ_1 person ByvWskJDMGg_0 airplane ByvW2VADH6w_0 motorcycle By1cSo8DcUw_0 bicycle By8jq7bVrkw_0 person BzKADkfj5sM_0 cow BzNlO4ccRRY_0 person BzOo01dGJkw_0 person BzT8xDTB14c_2 truck BzWiQPw-vQc_0 person BzX2DmrGvp0_0 train BzeW7KdQ818_0 skateboard BzeW7KdQ818_1 skateboard Bzehenf5vSI_0 airplane BzgqI8VBlSE_0 person BzpY-JMNW4c_0 person BzrM5QG9q2o_0 train Bzr3gVS8SzI_1 boat Bz5rpBZ1dzs_0 person Bz7A9QxD1nY_0 knife Bz9MqNlU7KM_0 person B0AazXeFQIU_0 person B0BXcxFMgrk_0 knife B0EZ9LIObGc_1 motorcycle B0FupWyYbG8_1 person B0NJSrhuWwA_1 person B0NJSrhuWwA_0 person B0QFrtXczzE_0 person B0SYog80Y78_0 person B0WaLst2GGg_1 person B0YrdZ7s3UY_1 person B0YrdZ7s3UY_2 person B0aFuZP3nYE_0 person B0aFuZP3nYE_1 person B01lwUoyl90_0 person B03gLj0lJrk_0 horse B0-L6VbxLcU_0 cat B0-lAJ4tBN4_0 train B0-lAJ4tBN4_1 train B1IQyTNE7eg_0 skateboard B1Ojfucympw_0 person B1Ojfucympw_1 person B1YzUGPZQWo_0 train B1hkAet1OQI_0 person B1isEeljBFI_0 person B1pC6hfF_Do_0 person B1qSE-7JgXE_0 person B1yiSrv4Ocw_1 horse B1zPD20nhTg_0 person B12C84by_eA_0 person B12C84by_eA_3 elephant B12C84by_eA_1 person B12C84by_eA_2 person B12C84by_eA_4 person B12C84by_eA_5 person B12C84by_eA_7 person B12C84by_eA_10 person B12C84by_eA_11 person B2EMVGU5pNA_4 train B2VryVb5p54_0 horse B2VryVb5p54_2 cow B2V7kk7fqSc_0 person B2X9JzMNZb0_0 person B2ZpqEJpVX0_0 person B2fTIk9eCNc_1 elephant B2gJVve4I58_0 person B2hKNbDmBtM_0 cat B2lAxi3jIR0_0 person B2lAxi3jIR0_1 person B2lAxi3jIR0_2 person B2xcdU4Qoz8_0 bicycle B2xcdU4Qoz8_12 bicycle B23TpirETNE_0 horse B26AQtx7Xic_0 person B3HZSrALQYc_0 skateboard B3IjPORG3_w_1 bird B3J2umsYK7E_0 person B3QykPv8TnI_0 person B3X5wDENAUw_0 cat B3kTu0B4OjM_0 person B32uNSxqzgs_0 cow B33seWCiea4_1 person B33seWCiea4_0 person B4Q6pRC_mZ8_0 bicycle B4Q6pRC_mZ8_1 bicycle B4Srj2O1AWQ_0 cow B4dFepwxEOU_0 person B4iP6lAoNYo_0 person B4jbThMFW00_0 person B4mWkc8-_6A_0 bird B4oO-miJ6VU_0 umbrella B4vM2iKb8cs_0 person B4_mRuPC7o0_0 person B5BNEoIaQL4_0 person B5GwJoM3aX8_0 person B5NgN9mocgI_0 person B5PHI2HVtuc_0 person B5fv91yB4Gw_0 bicycle B5qSvRpXLS8_0 cat as7rVUFzyzg_0 skateboard as_Rz9F3slw_0 cat atA-Cgv2XHY_0 person atE1O6J4Wls_0 person atLGWZUbEuM_1 train atMjLEIbsBI_0 cow atxnLL4Vjuo_0 person at2dmAEDdmg_1 person at4pXKjEDic_0 person at4pXKjEDic_1 person at5edW3lMVA_0 person auA-q9fWwn4_0 elephant auDJ1xtxFlw_0 person auDJ1xtxFlw_1 person auFLAZb-gD8_4 truck auGyhsy8iLA_0 cow auNciV4eLVo_0 bus auOl1mbGUlk_0 bicycle auOo1Lg_wvU_0 dog aubLDLbxxsk_0 person aueT5WO4e_c_0 giraffe aueT5WO4e_c_1 giraffe augKp60fa5Q_1 car auiPa0HNOEQ_0 person auu_tYb3G1Y_0 person auzy4oPzM5Q_0 motorcycle avCqOSeS7WU_0 person avC67gaD1NM_0 cat avHbY1Q3vyw_1 elephant avLxYBedm_c_1 elephant avT7Q6Wibdg_0 person avl9d-bL57Q_0 airplane avl9d-bL57Q_1 airplane avob12vGzmU_0 horse avonCFmxPyg_0 person avonCFmxPyg_1 person avpWY3czerE_1 car avpf9VVT6CU_0 motorcycle avvQ5wNPiew_1 person av475qBV4QY_0 skateboard awC9zxAeP54_0 person awQ1n9aQEco_0 person awVBieSP5Zw_0 person awVa7pqR9DU_0 horse awfg9NsCVQ0_0 person awjHSQ5uPi4_0 bus awkpYVN-fJw_1 horse awmHGFkxxlw_0 person awwWMuOKe3c_0 person aw059qHbVm0_0 bus aw2lOvXUAPg_0 truck aw5C9nQgLcA_0 person axB1Gk85UtQ_0 person axEK7nZ8W3I_0 person axJZ92uWnkA_0 person axXs2oUd4ow_0 bear axcDoOd0G0s_0 truck axjSgDsN6t8_0 horse axltu5Qf6ok_0 skateboard axn6QuPBPqA_0 person axulii3UXSQ_1 person axulii3UXSQ_0 person ax4YUE-PcF8_0 airplane ax4YUE-PcF8_2 airplane ayD3RJIjplM_0 dog ayRmnUb2LAI_0 airplane ayax5k3PJMs_0 person aybdlOdul0U_1 person aybdlOdul0U_0 person aydxF0r6n9s_0 person aydxF0r6n9s_1 person ayg0x1glF2s_2 horse ayg0x1glF2s_0 horse ayg0x1glF2s_1 horse aylBB_8cv60_0 umbrella aysqPEtZvsg_0 person ayuF_8chcKM_0 person aywW_Wvo49w_0 person ayzzG8M0fzo_1 person ay1d8NBbrl0_2 bird ay1d8NBbrl0_3 bird ay5RnrQple4_0 train ay5tx1Rovwk_0 cat ay7LLDO9Ecc_0 dog azC7-_wC8N8_0 bus azDn4DU7cGA_0 person azKKcIb4Ufw_1 boat azOInI_CMHM_0 bus azbls7-iaEU_0 person azbls7-iaEU_1 person azbls7-iaEU_2 person azfLb8VvI-4_0 person azfLb8VvI-4_1 person azfLb8VvI-4_2 person azlRI_Jydpw_4 cow azmZDijLihI_0 person azmZDijLihI_1 person a0FDxoXtFyM_1 airplane a0NOwUio_n8_1 person a0NOwUio_n8_2 person a0NdjlW5H_U_0 cow a0N_vetshbg_0 person a0N_vetshbg_1 person a0OjB7xzRx4_0 person a0RusP9ATfw_0 person a0dHPtoBS3U_0 person a0hRgBpppWs_0 person a0jpiOFS7eM_0 bear a0oeBV6-20U_0 person a0uoJdAwobA_0 person a085oeXd0RE_0 person a0-Pmmyi8js_1 person a1ADw1megCI_1 airplane a1Fzn7iUHO8_1 motorcycle a1RVXQl4rlY_1 cat a1RinDI9Hgw_2 knife a1SaKvoO2Og_0 cow a1U6U_pntMo_0 person a1XDxiP1hNA_0 person a1ctjjNUZ-4_0 dog a1kLNA-KACs_1 bicycle a1lQwuhicQI_0 person a1lQwuhicQI_1 person a14VlgxHS3M_0 person a2AT0Xo7uLY_0 person a2Osa5aleJ0_1 bus a2Qp2Grx3_8_0 person a2XMK6mjiZg_0 dog a2XvXs2guuE_1 person a2XvXs2guuE_0 person a2fEq8oS3M8_0 bus a2gYRtJhP1E_0 horse a2gYRtJhP1E_1 horse a2hv4szlq-Q_0 train a2kH2_9zoWU_0 airplane a2o_-GSpXXk_0 cat a2qmS6AhUYk_0 motorcycle a2vx_F1NOas_0 person a26mRIQUPoU_0 dog a26mRIQUPoU_1 dog a26mRIQUPoU_2 dog a27UC8vu1hI_1 truck a29AS00WJrY_0 cow a3AIwQnG0Ek_0 cow a3FLLhQu768_0 person a3THrQYDkqw_1 bird a3UCtF8nZIY_1 skateboard a3dbdHben-o_0 elephant a3dbdHben-o_3 elephant a3dbdHben-o_9 elephant a3dbdHben-o_1 elephant a3dbdHben-o_2 elephant a3dbdHben-o_4 elephant a3dbdHben-o_6 elephant a3rGEI8MdMs_0 cow a3uvEIsI1no_2 person a32oJ0GsAYw_0 person a35UuVw16Ks_0 person a37D3FoqIJA_1 knife a3-oi7T-Lw0_1 zebra a3-tURw95Xo_2 person a4IU4va7hp0_1 truck a4LYVAPbEwI_0 motorcycle a4LaeeZXIc0_2 skateboard a4Nt5QxFqmY_1 boat a4PwZfJZVPA_2 bear a4arqJgXHDA_0 person a4pR_YBd4yY_1 bicycle a4uNoGpllg4_3 bear a4v1ptMpyi0_0 cow a4v1ptMpyi0_3 cow a41TZwhyyP0_0 cow a46BqT5Mo5I_0 cow a5HZnFcvdyA_1 horse a5P8pVrcSRk_0 motorcycle a5brvs-fct0_0 person a5tSaF5GCKE_1 cat a5ye5BUJFlY_1 person a5znd3aNwLk_3 bicycle a58tMy0mhIk_0 person a6G_DBEFdFA_0 horse a6ZXi7Qqls0_0 person a6fBYYEgBvs_0 dog a6jDeIJbF7Q_0 person a6uyjrBkkXs_0 boat a61piN6ffE4_0 person a67zz0CSEpk_0 person a67zz0CSEpk_1 person a7B81Zeqgfw_2 truck a7HKuyv2qLQ_0 elephant a7HKuyv2qLQ_1 elephant a7Q6eb6feT8_0 person a7S9rFNKVMI_7 motorcycle a7Zr0-1LIPc_1 dog a7hwm4TORvY_0 person a7pC7IjO2ik_0 truck a7peWR4xJwQ_1 cow a7ygZsaDMis_0 person B5_Hyk-p7kE_0 cat B6E15pe4UR8_0 horse B6LGwD1E9SQ_1 person B6P8B8BO-6U_0 giraffe B6SaDYczlDQ_1 person B6U92N9hh6k_2 horse B6V4xqX67OA_0 truck B6bDVhRNw00_0 airplane B6cEdaWTjeU_0 person B6dBkoOhfBU_0 car B6lU93wtaDA_1 boat B6mP9KsnQPc_1 bear B6mngUQtFJ4_0 cow B6nlTJYtmws_0 cow B6pXMjH4geU_3 boat B6qshzfLYzs_0 person B6x2dNbgPjM_0 cow B6y439-imys_0 person B6z7eCsgfM0_0 bear B61Wf8NFvcU_0 airplane B645r0hkdmg_0 person B645r0hkdmg_1 person B67FwwZfIEA_0 person B6_IcyhOHpE_0 person B7BjhnnQ2K4_0 person B7GRNv2opSY_0 bird B7MHQOUO4f8_0 umbrella B7Z9UV6aQuM_0 bird B7a8WkaWmH4_0 person B7a8WkaWmH4_1 person B7cXCz7jJKQ_0 cow B7gX18_mDyQ_0 person B7hmqrwe88o_1 elephant B7hmqrwe88o_2 elephant B7iAvi5riV8_0 motorcycle B7nwfSMbEL8_0 cow B7pEEUJ-J1g_1 motorcycle B7rCxgg3F_s_0 train B8Bp9yKWV9c_0 person B8D4fPARFvo_0 person B8HQglK444U_2 airplane B8HQglK444U_0 airplane B8HQglK444U_4 airplane B8LGL1Tt_wg_0 person B8MxJKDkvkE_0 person B8eeoykmq1E_1 person B8eeoykmq1E_2 person B8f7NnYq5sg_0 person B8f7NnYq5sg_1 person B8sWL2syyA8_0 person B8uIyRkm9YA_0 airplane B8zGkBkQw4c_0 person B87W__RIE-E_0 person B8_Z7m50I_E_0 motorcycle B9AXF91pIUs_0 airplane B9Ed_vAN9mc_0 dog B9Y_LrDVbg4_0 person B9aqDsvGy5Q_0 person B9aqDsvGy5Q_1 person B9j233QxEuQ_0 person B9oJSA_NJ2s_0 bicycle B9z17FOPd5A_0 person B99mIPKaChY_3 cow B-CR7vl67W8_0 person B-QiQvJcSVk_0 person B-T1YNe09SU_4 bear B-T1YNe09SU_3 bear B-bDxAN93a4_0 airplane B-dlnlRKA5s_2 airplane B-dlnlRKA5s_7 airplane B-tukWZbXp8_0 person B-wJpt4zl0c_0 person B-x2pu-ux3w_0 horse B-z1uE4iuz4_0 truck B-0WNs2QYPk_1 elephant B-48lEXzIS8_0 umbrella B-7cqxw95Ro_0 person B_BqrY2eeCY_0 motorcycle B_Gjc7J18qg_1 person B_Gjc7J18qg_0 person B_M6X41emhY_0 person B_O8idmfoCQ_0 person B_Tj79jaRXs_1 person B_Tmq51dx1g_0 person B_jGC2tlhRo_0 person B_k6vEEPHK0_0 person B_lEJv31TlI_1 person B_lEJv31TlI_2 person B_nZdcreecE_0 person B_wWPH9kbxM_0 person B_ylVg-TN2Q_0 skateboard B_4Kfa8_9ms_0 person B_4eJYakoRY_0 motorcycle CAEqRvJLY-M_1 motorcycle CAe1SZKZ9T0_1 car CAq4CxCpeQE_0 cat CA4UqnJCs58_0 motorcycle CA9SLI7TOKQ_0 person CBASqWyp4yk_0 person CBJQ5dL6Df8_2 horse CBNqNe7G-QQ_0 person CBnYDFRfYgo_1 bus CBqyVKttAwU_0 cow CBtgGOzZtLQ_0 person CBz3ZOrTAjI_0 elephant CBz3ZOrTAjI_2 elephant CCAsEc2oRAM_0 elephant CCGg17i4vMU_0 person CCHay2RSnJI_0 skateboard CCHay2RSnJI_1 skateboard CCLRdGNDgdc_0 cat CCoGim--jEg_0 train CCp6NLBil8k_0 bicycle CCwovjgEx1k_0 person CCwovjgEx1k_1 person CC0aX78fQFo_0 cat CC-qoxEyocI_0 person CDCLLCkr87I_0 cow CDY4TXCreQ0_0 person CDbWYF89944_0 person CDb6uyrYrZA_0 car CDfjcWI7iBQ_0 boat CDgBHxiVkFw_0 truck CDnrG74PXbI_0 person CDpQZEjohRc_1 cow CDpQZEjohRc_0 cow CDrU-q6QdEs_0 person CD0cWR7d9yI_0 person CD4SGfIdfSg_3 elephant CEDTshbJOaI_0 person CEJoHSbb4gg_0 person CEJoHSbb4gg_1 person CEMCCDAYzQs_0 person CENd4xI4dnY_0 person CETUG_G0I4k_0 cow CETUG_G0I4k_1 cow CETUG_G0I4k_2 cow CEUjuyvgrB0_0 person CEUqqi8y4sg_0 cat CEVHrP5OzJ0_1 knife CEafe_JTk8g_0 knife CEqA0cqMfzg_1 cow CEsjzJHOUBw_0 dog CEzWiyTQOMA_0 truck CE1gHqc8aqU_0 person CE3KdY0X0QE_1 person CFD0NOl12CA_1 train CFD6d4OweGQ_3 motorcycle CFD6d4OweGQ_1 motorcycle CFD6d4OweGQ_2 motorcycle CFD-UQW1aQU_1 car CFD-UQW1aQU_2 car CFRsGLeMJKc_0 person CFXkKgig7Io_0 person CFee6F2rbjc_1 bird CFxObg2ebKQ_0 airplane CFxObg2ebKQ_1 airplane CF0JmXACTww_0 person CF01UBuV76Q_0 person CF7DZCaSqIg_0 bird CF7DZCaSqIg_1 bird CF7KYbTChlg_0 person CF71f3YLQ9U_1 person CF-cX0etaAw_1 cat CF_NSKkrwjg_0 person CGCNTZsml7Y_0 cow CGQoaYTzfaU_0 train CGQoaYTzfaU_5 train CGQoaYTzfaU_7 train CGgxp3ycSWs_0 elephant CGoqd4n_qJg_0 person CGsUTzKzV4U_1 train CGwrXZ2fUqg_0 person CGy0nn1MCqY_0 person CGy0nn1MCqY_1 person CG1sXlDy2Yg_4 horse CG1sXlDy2Yg_5 horse CHH1SlvOzfI_0 person CHIVYSnFst8_1 bear CHJFpAcH8NM_8 bicycle CHMzSMq0ui4_0 skateboard CHZU6sP-loU_0 person CHZU6sP-loU_1 person CHbhzxurZNM_1 person CHbhzxurZNM_0 person CHnWGkGAnos_0 person CHo3jSv3HIA_0 train CHwNoZ55z6c_0 cat CH6ptLNxppU_0 person CH8zCsamj44_0 person CH-_pvq3am4_0 person CIJ-q_X_y7E_0 person CIKrCLz06-4_0 cat CIQLvytEu6E_0 person CIQz5we_nHI_0 person CITgpk4GyMA_0 bear CITgpk4GyMA_9 bear CIV_VaLTf5c_0 motorcycle CIc1KbOeijU_0 person CIgzZOf3uA0_0 person CIgzZOf3uA0_1 person CIlb5C929mc_0 knife CImmRnndBuo_0 person CItr4F49wO4_0 person CIxs-77bPrM_1 person CI2GrLRwQR4_0 person CI3rFXxUPtI_0 bird CI6fYr7IJJM_0 person CI_9TEXzQE8_0 person CJD7b_dMrVE_0 person CJG8ou9QuY0_0 person CJIpdb7wZEc_0 person CJNAMf-R_J4_0 truck CJNj2wqp8QU_0 bear CJOJBhvHmCE_0 person a79_ETe4ego_0 person a7_ixAbhsRI_0 elephant a8MHgXPiRZU_0 person a8as0DkifS0_0 person a8eQTqlG-6o_0 person a8insUA82jQ_1 dog a8insUA82jQ_2 dog a8insUA82jQ_3 dog a8r9Xss8Es0_0 person a8wT4T21reQ_0 person a8z4RhTT02c_0 horse a82uXl_fE7A_2 cow a82uXl_fE7A_3 cow a892r_pD5PM_0 person a9FI5hfZsG0_0 boat a9GBRb_g82o_1 bicycle a9GBRb_g82o_2 bicycle a9YciDJw4wo_0 dog a9Y2Jm4-FDM_0 person a9ZvcKL6lEg_0 person a9fG2p2YO7k_0 bus a9fG2p2YO7k_2 bus a9g4dt8Lszw_0 person a9g4dt8Lszw_1 person a9riNB4_uhk_0 horse a90AssqciQk_1 elephant a90AssqciQk_2 elephant a-EIC5v0X4o_0 dog a-EIC5v0X4o_1 dog a-MNXAJ2mZo_0 person a-NocjWzZtY_2 person a-QTXZfMMT4_0 person a-ZWAMyDG3o_0 person a-iJ2J3oI-A_0 person a-lm-MyKchM_0 cow a-s461-Ddxc_0 skateboard a-u5tm8bZnc_0 horse a-yRjCC5TTM_0 horse a-1bMCU5aj8_0 motorcycle a-8RK3OMAOo_0 skateboard a-8RK3OMAOo_1 skateboard a_KVzTF1RIA_0 person a_KZ5mevNfs_0 bear a_OkB8q7LMc_1 person a_SryCna8Rk_0 person a_UjbYab9UM_0 train a_YIQ1VvpcU_0 person a_YIQ1VvpcU_1 person a_gLFD5d04A_0 person a_wdiSqtOK4_0 airplane a_xkGO87GsU_0 skateboard a_1zKb6B-bs_0 person a_6uxh_4kb8_0 person a_-WUUfn_l4_0 person a__R_Y49D54_0 person bALr5X95BQ8_1 person bAMbXytHB7Y_0 person bAMbXytHB7Y_1 person bAdtKFYWQcE_0 person bAfpD53Vjic_0 horse bAinSo2I3HI_0 person bAinSo2I3HI_1 person bAp653-8UZI_0 person bAtWugkhW88_0 bus bAutb-z3rvw_0 cow bAwVg4MVWds_1 elephant bAwVg4MVWds_0 elephant bAwVg4MVWds_5 elephant bAwVg4MVWds_9 elephant bAwVg4MVWds_10 elephant bAwVg4MVWds_11 elephant bA2bnjEnbus_0 person bA4v5gLC700_0 person bA5elX54rTQ_0 cat bA6JRlAu2yE_0 person bA8lz4kTY-0_0 bicycle bA8lz4kTY-0_3 bicycle bA8lz4kTY-0_5 bicycle bA8lz4kTY-0_6 bicycle bA_NwRpP6Tw_0 person bA_6OElyKFo_0 train bBPPJNf59kQ_0 umbrella bBT4o_qtgWU_0 person bBgRYIPlqAQ_0 person bBm9VYnMO9g_0 bird bBt5A6pwnxY_0 person bB1rIuXXQFA_1 bus bB4Xm1LS9CI_0 dog bB6PWM19eMo_0 person bCB5mMgiGnk_0 person bCRN4AZbr6o_0 train bCbqiJ6Ales_0 person bCuWk5NSB0k_0 person bCuWk5NSB0k_1 person bCuuL9wxM7E_0 person bCuuL9wxM7E_1 person bCvbst3iM94_0 motorcycle bCwUgQIL5cE_0 knife bCx54wbopXs_1 horse bDBjT69DcT4_0 cow bDEPo_ZJ8BY_0 truck bDJyFQqK69A_0 person bDOeksOYoHc_0 truck bDOeksOYoHc_1 truck bDOeksOYoHc_2 truck bDOeksOYoHc_3 truck bDO5jSIN9C4_0 person bDZrANNzYZY_0 skateboard bDaTeoyWI4g_0 train bDcapf9qqwU_0 person bDjiXPhFyUA_0 person bDu9DwJEoHs_0 cow bDu9DwJEoHs_1 cow bDu9DwJEoHs_2 cow bDxvHkJLr2M_0 bus bD9LGwYECDw_0 cat bD-NwifgK0w_1 skateboard bEDI6tCMZXU_0 person bEIh6sX-Tl4_0 person bEKdkY9RBEY_0 person bEM1_c0lvzs_0 bear bEOBKFTwR2Q_0 giraffe bETxZfOvyHY_3 bear bEUZ0kW5UxE_1 person bEawSJKPt-Q_0 person bEhFibV8au4_0 person bEqXwB3xaWk_0 person bErIbiSkE10_0 skateboard bEwALd1GaT4_0 bicycle bEzk1Y4QUKs_0 bus bE2p5KejqaA_0 person bE54N9ho-us_0 elephant bE9RuKWeuuo_0 person bE--xARlZGI_1 bird bFA9McooYzo_0 car bFCSt5rQdmU_0 person bFEO4MHzBto_0 person bFIAwyZ6uuE_2 person bFIAwyZ6uuE_0 person bFIAwyZ6uuE_1 person bFNUtoXNMlQ_0 bus bFORQXIUbxA_0 person bFXutLP--Cw_0 cow bFXutLP--Cw_1 cow bFXutLP--Cw_2 cow bFXutLP--Cw_3 cow bFYfbtcZvsM_1 horse bFe5fer15nk_1 bus bFm95kiEE_Q_0 bicycle bFnZbMhDMQ8_0 person bFrVmI5XvFw_0 person bF2D0pMJqLQ_1 knife bF65L0Tc9w8_0 person bF8lUYDQNgc_0 person bGCRyP03o54_1 skateboard bGFqTDkSuTA_1 bird bGMKF81Sy6c_0 person bGcugFPOZ98_0 person bGeFOznVAdA_0 elephant bGmggiJ7Hrk_2 boat bGpuuVQyMOY_0 person bGsY4wldptk_1 horse bGsY4wldptk_0 horse bGyLNR-ZWRY_1 cat bG7btkvllWc_0 skateboard bG9Q1zv6YZ4_1 person bG-X3irBEO0_0 person bHALJVsPIWo_0 person bHBuapxTSS0_0 person bHB5zkcU4DY_0 person bHO746jxL2Y_0 skateboard bHP9bh7-qNQ_0 truck bHWmtSkc1qY_0 person bHWmtSkc1qY_1 person bHbgFvCFkb0_0 horse bHcNLuPTrTk_0 person bHcbcNIxs_o_0 knife bHdxB4LnmGY_2 motorcycle bHdxB4LnmGY_0 motorcycle bHdypdEXRYY_0 skateboard bHim6VG9R7E_1 boat bHoVPJGd7EU_1 truck bHoVPJGd7EU_2 truck bHoVPJGd7EU_3 truck bHvVd9-u80E_0 person bH5d5crxmiw_0 cat bIFUXEvQb_4_0 truck bIFUXEvQb_4_1 truck bIV7YZEPqTo_0 person bIiV4e5w280_0 person bInwFKVbP2c_0 person bIqcbjzOQ0Y_0 car bIslKUiw6YQ_4 airplane bIslKUiw6YQ_0 airplane bIyfjvesRuY_0 boat bIzzvd9q2po_0 cow bI19pnS1D7Q_0 motorcycle bI8htXUqQkI_0 cat bJADjJacbIY_1 person bJAxqtGR-MY_0 person bJBnGIqBiuw_0 horse bJDJ5yePi6M_0 person bJITjrxz5Ns_0 person bJI1844s-tU_0 horse bJKrgOW0nMk_0 person bJMS4sT7XRo_5 horse bJMS4sT7XRo_6 horse bJMS4sT7XRo_8 horse bJMS4sT7XRo_9 horse bJMS4sT7XRo_0 horse bJMS4sT7XRo_1 horse bJWTtXkyZHg_0 person bJcrA1AOfI4_2 train bJcrA1AOfI4_3 train bJfHVvueTbo_0 person bJh3iPv6jYc_0 cow bJqhWaDN0hQ_1 dog bJ0SdP6bjnQ_0 person bJ24-WqB1xs_0 person bJ6hIJWstDo_0 truck bJ6-RBgHmRU_0 person bJ8k9v22vJA_0 person bKBLXhOMUi8_0 dog bKCfbZIUSZI_0 person bKCjZrT7jIY_0 truck CJfXDO8EqQ4_0 person CJfXDO8EqQ4_1 person CJm40KxFN5E_1 person CJm40KxFN5E_0 person CJqFjtBvN9Y_0 skateboard CJqHpmU9iSk_2 person CJqHpmU9iSk_0 person CJrxPkQa2GE_1 train CJ0sXsga9bM_0 bus CJ35smVDZW0_0 person CJ4qgeMiaOQ_0 airplane CJ6n8mmO1b4_0 cat CKB_--5AbfU_0 train CKC6BopJKyk_0 person CKGpdOkI6P4_0 person CKNmSha1fz0_0 person CKQHLTDcKyk_1 bird CKSN1SlM9ug_0 cat CKZ1xRX4dh8_4 knife CKcBs841bV0_0 person CKhADB_ssaI_0 elephant CKjQxzl__Fw_0 bicycle CKkp1wLGtks_0 person CKmTbQn6J9U_1 person CKsvfQdlYfo_0 person CKuBMM3fZ84_0 airplane CKxmvXSrPIg_0 bicycle CKzh_WuJFng_0 person CK29cIxMNP0_0 person CK39c3vr6gc_0 skateboard CLAjvvAM-K4_0 person CLB6UiAOkP0_1 bus CLMUcOgZdNQ_2 cow CLQOTITDBeo_0 person CLXlbsB7sLY_0 person CLdyznsISW8_2 car CLosaFzMFeI_1 person CLzV3TNXkFo_0 person CL1Bt58elWc_1 person CL1Bt58elWc_0 person CL1z2IBwWkA_0 person CL1z2IBwWkA_1 person CL4fc23TpVo_0 person CL5zmQikk-A_0 person CMBw6j8-QzY_0 person CMBw6j8-QzY_1 person CMIMzbsGXk8_0 bus CMLOYaDEQ9g_0 person CMMGX4SFyIs_2 person CMOEwqoxxwo_0 person CMP-dHylUas_1 person CMlE5HjD19w_0 truck CMlNU8W7Lsk_0 cow CMrJ3Hog9z4_0 elephant CMrJ3Hog9z4_1 elephant CMrJ3Hog9z4_2 elephant CMsMnTwn9o8_1 truck CMwy_JpVNwc_3 bird CMwy_JpVNwc_1 bird CMwy_JpVNwc_2 bird CNDd5De0h98_0 person CNEdjudh1lE_0 person CNID7GMZCtU_1 horse CNiuz-9TxDo_0 person CNqKVUmynPk_0 airplane CNt_itMBqgs_0 person CNua3gOk0oM_0 bus CNwRXN4wSAk_0 knife CN6-VQgDfe4_0 person CN8AktLgwN8_0 giraffe CN8AktLgwN8_6 elephant COAed-b3LTY_0 person COFcQrVSFcc_0 person COTylrR16zU_1 boat COc8fmI9wQ4_0 horse COh7aoqTWjY_0 elephant COj_p56dMLI_0 motorcycle COksm121JZ0_0 train COxq73j4_rY_0 person COyU6vUfxXQ_1 person COyU6vUfxXQ_0 person CO2cK7r8MNQ_0 person CO33VpWw45s_0 skateboard CO_0l5Z12kw_0 cat CPManZ0i9vw_0 truck CPN9sc_XrbM_0 elephant CPOp_zZsQJk_0 cow CPQXOFjv2LM_0 person CPXyJXYL8yY_0 motorcycle CPXyJXYL8yY_4 motorcycle CPYxpWVVj_M_0 cow CPZSesZALiI_1 cat CPuy90LHgrc_0 bus CP3cZfEx36E_2 bear CP3u7XjYteQ_1 person CP3u7XjYteQ_0 person CQEjDKzTc3Y_2 person CQE_vEzLzMQ_0 person CQPAMu_3qwY_0 bear CQUUCXr0Idg_0 person CQU9LkJ1PlA_0 person CQU9LkJ1PlA_1 person CQbUivUBlJ8_1 bear CQbUivUBlJ8_3 bear CQihoSP1KLM_0 person CQite5jXihw_2 person CQlL5sCIaM4_2 train CQlL5sCIaM4_0 train CQlL5sCIaM4_1 train CQmCFDEszdc_0 cat CQyxRGB9-_o_1 elephant CQzQkumb_iw_0 person CQ0hdku_Mu0_3 elephant CQ0hdku_Mu0_4 elephant CQ0hdku_Mu0_6 elephant CQ0hdku_Mu0_8 elephant CQ0hdku_Mu0_11 elephant CQ2pa82Muc4_0 person CRGhEOLOPLw_0 bus CRHfpplogUY_2 car CRHfpplogUY_1 car CRPfcUOT10Q_0 train CRQ8kzUgpGE_0 cat CRS3P9ePDug_8 train CRS3P9ePDug_0 train CRS3P9ePDug_4 train CRS3P9ePDug_7 train CRS3P9ePDug_9 train CRS3P9ePDug_1 train CRYLa0UnCJY_0 dog CRZQQc-7Cr4_0 person CRZQQc-7Cr4_1 person CRcL9sc8Z_Q_0 person CRihNgUldQg_0 person CRpG5Auclh4_0 train CRscoQhOT24_0 elephant CRteSMMhdfo_1 person CR2Qbth78ug_0 person CR7gNMR7aFk_0 person CSBnYbN-fwQ_0 person CSBnYbN-fwQ_1 person CSCN35ZL4gk_0 person CSCmLaLpgec_1 train CSGkGWkJnIo_0 person CSKOzx-8MRM_0 person CSKhQtYbLiY_0 person CSTEfDaVq_w_3 horse CSgIyZrF2Xw_6 bear CShE1WLp4V4_0 person CSlYtyS3ekI_0 cat CStjlkpuH8I_0 knife CSwiprmAnWk_0 person CS4LhFaTdRc_1 person CS4TVHuh-OI_1 person CS4TVHuh-OI_0 person CTBCSXpoCNw_0 knife CTGjM7vaWkc_0 car CTNN0vCWthk_0 cow CTOTTFDvM9g_0 elephant CTOTTFDvM9g_1 elephant CTpK5Ywqj4E_0 person CTtActqncZs_1 person CTtActqncZs_0 person CTty0Fesx4k_1 elephant CTty0Fesx4k_2 elephant CT6O84zfmoY_0 person CT8VKdB074U_0 dog CUB_Y4U0gNU_0 person CUE1Oj2b7oo_0 person CUIv9zU0_7M_2 dog CUQZtS7SlyM_0 truck CUVQtlpfthI_0 person CUVqn-7LP_k_0 cat CUjEVN0BT58_0 person CUjbAz30mdA_0 person CUvi-gOiEak_0 airplane CUvi-gOiEak_1 airplane CUzrNlKejnA_0 person CU-5HeXnZag_0 person CU_cxu2KrzY_1 cow CU_4MsJSWGw_0 horse CVCPdF3TevY_0 car CVJEcVS63rM_0 person CVJu9kpxa0o_0 skateboard CVQq3Lnsmb8_0 skateboard CVRQkAzvHOI_0 cat CVXbWRarjGI_3 bicycle CVa-tmxG3G8_0 bus CVfXcK9LvU4_0 cat CVnQzQjIfdo_0 person CVnQzQjIfdo_1 person CVtUo7t1tg4_0 knife CVtdQUWrMFo_0 person CV1gdpxyUvQ_0 umbrella CV7yBA-RY-s_0 person CV9Mv-Z5ywo_1 knife CV9_qaQ3bOc_0 dog CWNPg3hbbCc_0 person CWRUw47fnHQ_0 dog CWcpGIObSb4_0 person CWcpGIObSb4_1 person CWhtecFS3Ps_0 person CWh66yU69HI_1 person CWq2nbpnjkw_0 person CWsgkyp-Wv8_1 person CWsgkyp-Wv8_0 person CWu6nT2qW2Q_0 person CWydCxGJyck_0 cat CW0GVWegie4_0 person CXEi_k33z08_0 person CXF-MNV21Uw_1 person CXF-MNV21Uw_2 person CXF-MNV21Uw_0 person bKIEzYSD9LU_0 bird bKM4LmiXX5k_3 knife bKM4LmiXX5k_0 knife bKQQdBiIraA_0 dog bKT6s25xsS4_0 person bKh8FyKvOq8_0 umbrella bKic74m-XKg_0 horse bKnsY1ytgqc_0 person bK0HzQHKqhg_0 motorcycle bK0HzQHKqhg_1 motorcycle bK0IN2qoSjQ_1 person bK7Wo0UxDyQ_0 person bLBmIVS2T-0_0 person bLLFtAMqoF0_0 person bLOW53I2oWw_0 knife bLU0G55kWgs_0 car bLU0G55kWgs_1 car bLYGpYiiF7Q_0 person bLg0SdwRkKc_0 person bLneVyHWwdk_0 person bLoyRVgQcTk_0 cat bLoyRVgQcTk_1 cat bLoyRVgQcTk_2 cat bLs4dUFZzcQ_0 person bLs4dUFZzcQ_1 person bMEbcFBdRsA_0 airplane bMM1OZMZ_WY_0 person bMNzE6F4WK4_0 truck bMPPnTHvu8c_1 cow bMQlfzj9vCE_0 person bMZPcnVc1K0_0 person bMakr2vwfqQ_0 person bMdfLBSo6jw_0 bicycle bMfQw6tBALo_0 cow bMgWjlwilqA_0 bicycle bMk8JyTyvUo_0 skateboard bMojajeogfY_0 person bMphaUsZuqU_2 elephant bMrDB2JI0QM_0 elephant bMuSXdxvaWY_0 bicycle bMumJTM0f28_0 person bM3OcevX9F4_0 person bM6fRimkPZg_0 cow bM6peJ4lQyU_0 elephant bNGoGllCEj0_0 car bNGoGllCEj0_1 car bNJ5ygVB-GI_0 person bNPtMp-AuhY_5 train bNPtMp-AuhY_4 train bNR89JLsh7Q_0 motorcycle bNZe9vwuE8E_0 car bNcTCIgwqNY_0 boat bNinDD5s0LQ_0 person bNo2RseLYYs_0 person bNqXgNLQX3s_0 person bNtivYIWtQE_0 person bNtivYIWtQE_1 person bNtivYIWtQE_2 person bNyyHqBZnmQ_0 airplane bN4vggzwxWI_0 person bN-epcJfRJ8_0 person bOPvxhSlnZI_0 truck bORQv_d22gA_0 bear bOTYFfq_264_0 person bOXM6ibmbG0_0 truck bOarvmUMdLs_0 person bOarvmUMdLs_1 person bOb4k6pTF-k_0 motorcycle bOeUzXPOIWw_0 motorcycle bOfrPHjROWI_0 dog bOm9Qgnl2KI_1 umbrella bOor15z5M5Y_1 truck bOuuxRt7ugE_0 bear bOwOVcqeajs_1 boat bPAO0nyCO8Y_2 cow bPLKx5uJaZY_0 bear bPTTPAsH7v8_0 airplane bPZdC3oRr1c_0 dog bPanGwtU82U_0 airplane bPavgNJxZnI_0 horse bPavgNJxZnI_4 horse bPcXQrlHs60_0 zebra bPddyJH2fm4_0 cow bPeFwxV66_s_1 cow bPfaS8RIHVw_1 train bPjZsDes9ck_0 bird bPvvA8Wm5Ts_0 person bPw91vtx0rY_0 dog bP17881jyH4_0 elephant bP17881jyH4_2 elephant bP17881jyH4_1 elephant bP6QvQUfZSI_0 person bP7ZU4wl_xs_1 person bP7lN2WyBTg_2 bird bP7lN2WyBTg_0 bird bP7pux4nQa4_0 person bQJQKEfdctc_1 person bQKuVB3YmRI_1 knife bQNLK-43XKM_0 person bQNXrSVq4r4_0 person bQQS-amRhxU_0 person bQQr8FzMTHE_0 person bQR6KxB4qjg_1 train bQWO4r5DLWY_7 bicycle bQWO4r5DLWY_8 bicycle bQZ8WQ2mS9o_0 horse bQd1k1RNZZA_0 person bQwDt3XOok0_1 skateboard bQy9W_tIPJg_0 cat bQ7FEMZ309U_0 bicycle bRElYolSzbI_2 horse bRKfUmz_7hE_0 bicycle bRKfUmz_7hE_5 bicycle bRP4TElBetA_0 skateboard bRUtCCY00Yw_0 person bRd_NGjRFpU_0 cow bRgNc063rsk_0 person bRgNc063rsk_1 person bRiVaIWzo4k_0 person bRiVaIWzo4k_1 person bRpbblTb1VU_1 person bRq06zdCv4k_0 dog bRsjD1GTjeE_0 truck bRuSrTOibGY_0 skateboard bRw2PFlL8l8_0 cat bRxyuZTXkWo_0 person bR61bP65wdI_0 person bR_EeaX8Kns_0 cat bSC7MwTZ0Og_0 person bSJbBDA3-rI_0 person bSJbBDA3-rI_1 person bSSSYoS7HhY_2 person bSSX8qJnGak_0 person bSVCTx_L7lU_0 person bSbZuDkimC8_1 cow bScFgdC-DH8_0 motorcycle bSkEsUu7aBI_0 cat bSqX5D_GrEc_0 person bS4mTtP-Ud4_0 person bS4mTtP-Ud4_1 person bTAxiISsPNE_0 cow bTHRXr-yw54_0 person bTOZp15gd24_0 airplane bTOZp15gd24_1 airplane bTOZp15gd24_2 airplane bTO9Pid9808_1 cow bThFysASYJg_0 person bThX-5t7OWM_3 bus bTl-dt761p8_2 bird bTp1hk4dhPE_0 person bTuho6CpJpg_0 horse bT7mzx9P1Yo_6 bird bT7mzx9P1Yo_8 bird bUDYPhSFyyw_0 airplane bUFCsL247kY_1 person bUFCsL247kY_0 person bUIov_O62GU_0 train bUVi7VVygmM_0 person bUa61WY6E38_1 person bUu6iW_nRvM_0 person bU8cBepgoMY_4 elephant bU8cBepgoMY_1 elephant bU8cBepgoMY_3 elephant bU8r7rNDaHQ_0 motorcycle bU8r7rNDaHQ_1 motorcycle bVCLNxl4PQY_0 person bVPgCZmg1CY_0 person bVTzUiTPtww_0 person bVZixqlT1AI_0 person bVbT4F3I0s4_0 person bVbdO8rj6TQ_0 person bVbdO8rj6TQ_1 person bVdjQbIzOGc_0 horse bVgKe0-_228_0 bear bVkYqw1YJ6c_0 person bVnmeQsd3xk_1 car bVph6GZ3jLE_0 skateboard bVrck_XYsR8_0 bicycle bVtMukuPx9A_0 motorcycle bVtWuhD1L1s_0 car bVvVMOxHOT4_0 cat bVwWkzYdrvk_0 person bVw9txmBeX0_0 person bVz-pHuWNfc_0 person bV3UXbGCshc_3 elephant bV3UXbGCshc_4 elephant bV3UXbGCshc_0 elephant bV3UXbGCshc_2 elephant bV8k_w0cphI_0 person bV9tUYWi-9o_0 truck bV9tUYWi-9o_1 truck bWCW4QZTIXE_0 person bWCxObc3uVo_0 person bWEnwFThRlA_0 person bWEtMBeQQCA_0 bus bWEw8rNQ-kI_0 person bWJg9jatoBY_0 person bWLcKJauKIs_0 person bWO4NBx37Vk_4 airplane bWdWgIB371Y_0 person bWdWgIB371Y_1 person bWkKy-_YzW8_0 umbrella bWotjBNgmiA_1 motorcycle bWotjBNgmiA_2 motorcycle bWo4CzHWaZ8_0 dog bWqayCqhqVQ_0 person bWtXkAzA6zE_0 person bWtXkAzA6zE_1 person bW1JoZnZpXs_0 bicycle bW1JoZnZpXs_2 bicycle bW2I1hUiWgg_1 bear bW2I1hUiWgg_3 bear bW2I1hUiWgg_2 bear bW6PJACBEFo_0 boat bW6PJACBEFo_1 boat bW7x14tLsxU_0 cow bW7x14tLsxU_1 cow bXGa-FIGViQ_0 truck CXOKkaurfXo_0 person CXVmfrDfalE_0 person CXVyHpmc_fU_1 cat CXXWvUVLBBE_1 train CXXWvUVLBBE_3 train CXaF0E3wEzI_4 boat CXaF0E3wEzI_1 boat CXaF0E3wEzI_2 boat CXdGDPRtlo4_1 cat CXdjIo4q-w4_0 dog CXoeLQPShqU_3 horse CXoeLQPShqU_0 horse CXrwHki5ShI_0 person CXw5HMRQwEk_7 bear CXxPPuZcT2k_0 knife CXyujV2S5aE_0 person CX1US3Y-2jI_0 person CX5Y01eJ_g0_0 knife CX838M4iPkw_1 bear CX_YxpWurRk_0 person CYEtgx1uVTM_0 train CYEtgx1uVTM_1 train CYFtiy8FtgM_0 person CYGBUw8HZ8Q_0 person CYKbj5BgaiI_0 person CYPFpTJXCp8_1 person CYXd3muNlJ8_0 person CYcxxdqG02k_0 person CYcxxdqG02k_1 person CYghFhQySik_1 person CYghFhQySik_2 person CYghFhQySik_0 person CYg8fy66poA_0 train CYjEASXRoys_0 person CYkow-sm2pA_0 person CYmpj4UFFtA_0 cow CYsgb4GhJ_0_1 cat CYtehjvIIIE_0 cat CYw9ONxIi0M_4 bear CY3-VTI7lQU_1 cow CY48729zIgM_0 bus CZAt34OJpoI_0 elephant CZGoAqWEDQM_1 horse CZJz6zZt3cE_0 person CZXHqexGqfI_0 cow CZduQndn_Eg_0 train CZfMxEFk9hc_0 motorcycle CZfe1GuZxPI_1 person CZws8sfLA8M_0 person CZ8bjG4wdZU_0 person CZ9MT7tZZ2E_0 knife CZ-Kodbg_2A_0 bus CaA-PFuqaXw_0 truck CaFlo5YQHXw_0 train Cag3vCKRh6c_0 bicycle CajF9IxbOvI_0 person CajF9IxbOvI_1 person Cam_wHie6XQ_1 person Ca4_dI-Ii8o_0 person Ca5GzZ-rifE_2 horse Ca5GzZ-rifE_0 horse Ca5GzZ-rifE_3 horse Ca5mOzqFz70_2 bear Ca6g367yxss_3 dog Ca9JsTGifmQ_1 knife Ca-l5zpgIL0_0 horse Ca-wDaXxSn8_0 train Ca_LwXljv5I_2 dog CbBrv9GkBDM_0 person CbKVR2EGoWU_0 cat CbO4r5w5NEM_0 cat CbTbpHHYfGo_1 cow CbYQk8GFQwY_0 person CbYXzAv9G40_0 person CbZA75LYWsk_0 boat CbZA75LYWsk_4 boat CbZA75LYWsk_7 boat CbZA75LYWsk_8 boat CbZA75LYWsk_6 boat CbbsxxHKQBs_1 bicycle CbbsxxHKQBs_3 bicycle CbfML92fBFc_0 person CbrOGI6D5oo_0 dog Cbz0hgvZtyM_0 person Cb0EbSTABAw_0 person Cb31aGVbcGE_0 person CcJ-51mUw00_0 person CcNfpk8tVxA_2 person CcNfpk8tVxA_0 person CcNfpk8tVxA_1 person CcadL-XHA8w_0 person CccC-FK79hM_0 skateboard CceETksmvEc_0 bus CcfAKl1kCRM_0 person Ccl3EZzzNhc_2 bird Ccl3EZzzNhc_3 bird CcmiWGPbuT4_0 car CcyRYeSG3sQ_0 truck Ccyqd4ZzDtQ_0 person Cc5DUip1-eE_0 person Cc9-Kd--ejs_0 car CdA-Gg7O6d4_0 person CdD0W0pS7gk_0 skateboard CdG8sd9UZFM_1 elephant CdG8sd9UZFM_3 elephant CdOwMZqCiMs_0 bird CdRgo9V_e_U_0 person CdTDo40rdz4_3 umbrella CdVnK1TcGcQ_0 knife CdW2qTShGbY_2 person CdW2qTShGbY_1 person CdYkEASWMqQ_0 person CddXUsFqg4Q_10 bicycle CddXUsFqg4Q_12 bicycle CdeUORbvfgs_0 person CdkbBdQwTX0_0 person CdmrCOVxj8c_0 person CdosWRXaOgY_0 person CdtY-oTmACc_0 elephant Cd3qxnZC6s4_0 airplane Cd8dfcT-D9U_0 horse Cd8zY0wsrLc_0 umbrella Cd_ZgXZ7qKw_0 person Cd_ZgXZ7qKw_1 person CeCnRUGvs9Q_1 horse CeEMUoHNeVA_0 person CeICmGeQXOk_0 motorcycle CeICmGeQXOk_1 motorcycle CeVjsWpfoCY_0 person CekBpSMLr08_0 horse CetmVa_LV2A_0 bird Cetw-N1I1bA_0 dog Cew6y9K7ynI_0 cat CezGmkW4sRY_0 person Ce1tW6uV_lw_0 person Ce1tW6uV_lw_1 person Ce_dgPawIkU_0 person CfC--i0DQ-o_0 car CfThv8Vk-oM_0 umbrella CfbzDUZ6PyQ_0 truck CfqtCB_f_Z8_3 skateboard Cfwk3niR9Uc_0 motorcycle CfyvbbrxquI_0 cat Cf_GVLLQaTA_0 person CgB0fwUOZd4_2 bus CgDcN1Lk7ag_0 car CgDcN1Lk7ag_1 car CgDcN1Lk7ag_2 car CgDyrbc-LLo_0 person CgHCCqADKys_0 person CgQl21vwrqk_0 person CgQv6o97KqY_0 person CglmlO92nKA_0 person CglmlO92nKA_1 person Cgod2p17L48_0 person CgwHXWDGAak_1 person Cgzt1Kv6Sqg_0 cow Cg9H20lr5Uk_0 person Cg9H20lr5Uk_1 person Cg9H20lr5Uk_2 person ChBKKPEO8N0_0 person ChOKPIVr5XE_2 bicycle ChPBGkSbJ0g_0 elephant ChRNCk9Bq-k_0 cat ChZB3vAX8sk_0 person Chc7poZ9r-k_3 skateboard ChmcE3Lz1Vc_0 person Ch2_CQg4r1o_0 person Ch-PosNzqZ8_4 elephant Ch-PosNzqZ8_0 elephant CiCqdFq_a7U_1 person CiCqdFq_a7U_0 person CiLbnwjSJ9w_0 person CiQOmR8VCzs_0 person CiQOmR8VCzs_1 person CiQS0RMaLZQ_0 truck CiT09gfBJPA_1 person CiVwjoLvdAs_1 horse CiWhBWV1zGM_0 cow CiWhBWV1zGM_1 cow CiYOn9VW1eY_0 horse CihCAad2Duo_0 person CilRWTfS8e4_0 person CiwaaMNfvCo_0 airplane Ci0S27Qp1w4_0 cat Ci2vW1OGHe0_0 cat Ci6mTJ6BqYI_0 person CjJ3l2smqAc_0 person CjMaorKuwf0_1 horse CjRX9J2BM4Y_0 skateboard CjUf3D9IsCQ_0 person Cje7Ip85T1I_0 person Cjm9Wky44TM_0 elephant Cjm9Wky44TM_1 elephant Cjn-mt97y-w_0 person Cjq3dda3PlA_1 person Cjq3dda3PlA_0 person Cjw2f0M_eB8_0 bird Cj1CpXDG_Qw_0 person Cj3PTZcRbd4_0 person Cj3ZEx4SDe4_0 cow Cj-a9t9yiiA_0 person Cj-a9t9yiiA_1 person CkBGaJnF9vo_0 person CkC43WVctnk_0 cat CkKQhDP2FGY_1 person CkKQhDP2FGY_0 person CkKQhDP2FGY_2 person CkLE-s6CsgY_0 cow CkLwgOIBF_I_0 person CkLwgOIBF_I_1 person CkP_70u-2zU_1 boat CkX8laawskQ_2 horse CkZeki9RVDI_0 person CkZhHtevDk8_0 person CknHFY05prw_0 person CkoK8C4Rzj0_0 person CkvEr5T38Wc_0 person CkvEr5T38Wc_1 person CkvEr5T38Wc_2 person CkyU5jU74Js_1 dog CkyU5jU74Js_0 dog Ck8GRgUrpoE_0 person ClBCXl7l2pw_0 skateboard ClH2-R5LeVo_0 cat ClLZcmIHrTw_0 person ClM3Ftm0S7o_0 cow ClRLFlpMUhU_1 horse ClSzHW4AuJ0_0 person ClV1oHNuF9o_0 person ClV6A8WNCvw_0 cow bXcKQNGRBvw_3 airplane bXcKQNGRBvw_0 airplane bXcKQNGRBvw_1 airplane bXcKQNGRBvw_2 airplane bXjVvJ8eOJc_0 skateboard bXkjwotai0Y_0 bicycle bXnvGCFA9Dg_0 person bX9TcejzzTM_0 person bYCvd_BTMsk_0 dog bYE-vUOh10s_0 boat bYN8lkupLt4_0 bird bYQiCAwebzs_1 bicycle bYSbuWYiixQ_0 person bYVgzwF1hNw_0 bicycle bYWGnwi8nDQ_0 motorcycle bYm9aUK2zzk_0 person bYpG750b7pE_0 motorcycle bYvzSXZ0w_I_1 person bYwwOO6vMAw_0 person bYwwOO6vMAw_1 person bYyFEbIGMfo_1 dog bY3sDu5BZDI_0 elephant bY3sDu5BZDI_1 elephant bY3sDu5BZDI_2 elephant bY6vPIaJDGA_0 person bY8BdyCsCAw_0 person bZL41d9eFyc_0 cow bZRpdnJtcT4_0 train bZRpdnJtcT4_2 train bZVMygQQgNg_0 person bZVZbn0oTjo_1 giraffe bZdq8Rk75M8_0 knife bZgZihlL0IU_0 person bZsoMlw4CnI_2 bus bZuOWV67gnY_0 cat bZwJl6ye9Cc_0 motorcycle bZwJl6ye9Cc_1 motorcycle bZzzlD0C8Jg_0 train bZ2u1x38Qbg_1 airplane bZ6gk6FLGss_0 person baDesUZ9Pyc_0 bear baRyXrRn_ls_1 motorcycle baWLnj87FOc_0 cat babQ3FBdeqQ_0 cow bagbzsb-tg4_0 person ba1hwKdPRx8_0 cow ba3cGHmc_OA_0 person ba5407XQYAQ_0 cow bbHdRyrdpDA_0 boat bbH4CQx07Go_2 knife bbLW6902ITg_0 person bbLW6902ITg_1 person bbLW6902ITg_3 person bbLW6902ITg_4 person bbM0SbH_pgk_2 bear bbZAdo3awRs_0 car bbZeVbzmLVw_0 elephant bbaUzB0Na2o_0 person bbfDHSIT9ys_0 person bbhyEgEjfvQ_0 cow bbjuucY5QQc_0 person bbkjnF0iGrs_0 horse bbkjnF0iGrs_2 horse bbkjnF0iGrs_3 horse bbkjnF0iGrs_6 horse bbnb-beW0p0_0 horse bb0DRm0ueKk_0 horse bb4sgALviyc_0 bear bb5OO1wMKr8_0 person bcJ1MAj_A_w_1 person bcLW7YqnUGs_0 skateboard bcdQmV1-Z5k_0 motorcycle bcgTPCycRIw_0 skateboard bcksTLjC1fs_0 motorcycle bcrQdxrU_vI_0 person bc1C8HrNVqE_0 horse bc28CjoKODI_0 person bc28CjoKODI_1 person bc3rySF6iao_0 person bc6jeLN-DUo_0 train bdU9JALjnmw_0 person bdYKw4SpkQQ_0 zebra bdZpXHSW4Ps_0 cat bdbVAdua3uI_0 airplane bdbVAdua3uI_1 airplane bdcoNmelRw4_1 dog bdcoNmelRw4_2 dog bdcwT2ufUBg_0 bird bddes6RyfCI_0 skateboard bddes6RyfCI_1 skateboard bdeoe5gmCd4_0 elephant bdeoe5gmCd4_2 elephant bdgSMIY2A8Q_0 horse bdoNsiMM1RY_0 bird bdwlZMpXPJo_8 bird bdwlZMpXPJo_7 bird bd--DVCeT-s_0 cow beE5VOzxibM_0 giraffe beLTv9YiY78_0 dog beLTv9YiY78_1 dog beLTv9YiY78_2 dog beQOHdCA8KM_16 elephant beQOHdCA8KM_3 elephant beQOHdCA8KM_6 elephant beQOHdCA8KM_7 elephant beQOHdCA8KM_10 elephant beQOHdCA8KM_12 elephant beSTl1azmTY_1 skateboard beVVM2pBQdA_0 cow beVVM2pBQdA_1 cow becTICXjrg4_0 person beliMXc3JE8_0 train besXR1P9Oew_0 car beu-edT1daM_0 person be9BCy6kHvY_2 person be9CXLatX9I_0 horse be-ggiVD4V0_0 knife be-5ARU_aHA_0 person be_IhYef3hE_0 person bfBZLLwpNWA_0 giraffe bfJaD1qZ2gE_0 bus bfJaD1qZ2gE_3 bus bfJtapJ86Gw_0 person bfRgL9oanEc_1 person bfRgL9oanEc_0 person bfS8FB_HOlY_0 person bfZfMA1mLrQ_0 dog bfZfMA1mLrQ_1 dog bfaMdaYiK90_0 cat bffC89pE6fo_0 person bffC89pE6fo_1 person bfkNVFr6Cwg_0 cow bfkNVFr6Cwg_1 cow bflVgDgAHSo_0 umbrella bfrY2wEePwY_0 person bfrY2wEePwY_2 person bfwWF0XO7bE_0 boat bf9YySHJcdQ_0 person bgAOYaooc18_0 person bgAo5vgwe2M_0 zebra bgBK4sMnLig_0 cow bgBK4sMnLig_1 cow bgC-r6p-XHU_2 elephant bgE_uy3Ml6g_1 umbrella bgHMLwWY4Qo_0 person bgV-FqQ8Tv8_0 umbrella bgXZ3BpIOh8_0 train bgaD7K2iEPI_0 person bgbS11O9lSw_0 bus bgelX1blhpQ_0 truck bglPgA_0LAk_0 motorcycle bgpB-A04RLI_0 person bgyEHsMav4U_0 person bhBMa8wQ5KA_0 bird bhGJ9gZmP90_0 person bhGJ9gZmP90_1 person bhH_pqCQ3Co_0 cow bhJGFbgXlts_1 person bhNfsUPLKDg_1 train bhWmpmnXSlc_0 person bhZZubkX8_o_1 bird bhdtzsUvieg_1 person bhqr680CLr0_0 person bhrOzwB-7qA_0 person bhsCCw1J_JU_0 person bhuOX61sk8M_0 person bhz6HG2KpnI_0 skateboard bh0ZZ4Z76cc_0 person bh3QacG9JYk_0 airplane bh3QacG9JYk_1 airplane bh3QacG9JYk_2 airplane bh8aMNVny8s_1 truck biAdsjypETI_0 knife biFm-y7gSrc_0 horse biGJ8vHOsZM_0 umbrella biLY6NMsqJU_0 cat biUFB3c0Ucc_0 bus biZU5SOHQvc_0 umbrella bibJ3Bv5YmQ_0 motorcycle bik9GuCughc_1 bird biuEbYnn68k_0 bus biwbqbVsZeE_1 elephant biyu3sxIOYc_0 person bi1kYvu5Irg_0 train bi1kYvu5Irg_1 train bi3GSUnfzd8_0 person bi5Bkz2MVP4_0 bird bi5Bkz2MVP4_3 bird bi6BNwvsR_0_0 person bi-GKlUZMR8_0 motorcycle bjBwCQ5z4IQ_0 cat bjH2OQR68Vc_0 person bjRQ69TaeKs_2 person bjgooTfy3JM_0 train bjgooTfy3JM_1 train bjgooTfy3JM_2 train bjhEqucWULo_0 cow bjq8de0pw5M_0 person bjq8de0pw5M_1 person bjrq_Kj-wSU_0 airplane bjrq_Kj-wSU_1 airplane bjrq_Kj-wSU_2 airplane bjrq_Kj-wSU_3 airplane bjwdTl5zyaI_0 skateboard bjx96uw-Q24_0 person bj-Grf4s790_0 elephant bkElaSUqJjM_0 train bkIBcqXKARI_0 person bkMU7xViDvA_0 person bkXBjOrn2yI_0 person bkggwniG4vc_0 person bkiQTbQF_TA_0 elephant bkigtjV1zA0_1 motorcycle bklheVvsfac_0 truck bkoOiNz6Zmo_0 person bkok3wr4188_0 person bk2l-O9wSEc_0 person bk8UlOzFy7U_1 person blAiGXbJxmI_0 train blIpNvBakFI_0 person blW8z3TPVvo_0 motorcycle blhCjXE5cRo_0 person bli5Z83QY_U_0 person blnFzQdaVRc_0 person bluU1CAbJfo_0 person blubKbt8mLE_0 car bluqyqDv2eE_0 car blv0QslQ524_5 bus blv0QslQ524_6 bus blzDAgvxJMw_0 person bl1XJCtyP2E_0 truck bl2xZSpcZqs_0 cat bl6wIjxfuJo_1 bicycle bl6wIjxfuJo_2 bicycle CloG2hcM5nU_9 bicycle CloLHr7NJqg_0 person CloOQkTkYfY_0 bus ClpDLu1qCx4_2 person ClpDLu1qCx4_3 person ClpDLu1qCx4_1 person ClvAi34e1zM_1 elephant Cl1mEpQ3wy4_0 boat Cl1mEpQ3wy4_1 boat CmEoz728tlo_2 bear CmGSMnkcvrg_1 train CmIXZuJDwt0_0 person CmNv_yKt5oM_0 person CmOIqZyQpPI_3 bird CmOIqZyQpPI_1 bird CmVoggJ6fxY_1 horse CmYL2EyELbA_0 elephant CmezWT8A2i8_0 bus CmjUCOwcOT8_4 bicycle CmjUCOwcOT8_11 bicycle CmjjEuS9_Ww_0 bicycle Cmjw8kbfDCw_1 knife CmoknpL1cMA_0 person CmqXoT7CXJs_0 dog Cmq1qVX-Ugo_1 cat CmsqpFOcosw_0 person CmtmoydPH08_0 cow CmxhIEztsyg_1 skateboard Cm1y7USHcrg_0 person Cm3tYZlSc0o_0 skateboard CnBJ9TMTRAA_0 person CnBJ9TMTRAA_1 person CnCTVtsK5Kw_2 bear CnEXHgq3AE4_2 elephant CnGp9Wq2rTs_0 bear CniS9Q6Y200_0 person Cn0UKsWocEI_0 elephant Cn0UKsWocEI_1 elephant Cn1dXZ_p3dw_1 person Cn9Bj5B29UI_0 motorcycle CoBuNWx_OwM_0 person CoDB7ZeilsQ_0 person CoKMowfrd5Q_2 truck CoKMowfrd5Q_3 truck CoKVaYX3c1k_0 person CoKVaYX3c1k_1 person CoKVaYX3c1k_2 person CoOwm7ccDrs_0 truck CoSIyrW5lvA_1 skateboard CoSSvI2-U_w_1 bicycle CoZY8o0c-h8_0 elephant CoZY8o0c-h8_1 elephant CocSNWws-Qo_0 person CodelARKQ10_0 skateboard CosYvoW04Uk_0 person Cot7Xj8C308_0 boat Coz9g_0N91c_0 person Co_XBpd6lxE_0 person CpDHwc5JmK8_3 elephant CpFiT_6KvM4_0 person CpF-80dM2aY_0 person CpF-80dM2aY_1 person CpxxxHYsJy8_0 person Cp0lT2opaL0_1 person CqANE5ByBvY_0 person CqDjHjvw8T0_0 elephant CqDjHjvw8T0_1 elephant CqVeLNnA0vk_0 horse CqZz9FnLLjk_0 knife Cqkhrld_7LU_0 person CqzahbOVzO4_0 person Cq02-pFNn6w_0 motorcycle Cq02-pFNn6w_1 motorcycle Cq4KAVAWq7g_0 person CrAxPJajbcs_0 airplane CrCNqDd18fw_0 umbrella CrUmEDCjFtU_0 person CrUmEDCjFtU_1 person CraDHWuN4Q0_0 person CrgMhrCYmOo_2 motorcycle CriTKYemGmo_0 person CrmzwYKpLAY_0 umbrella Crn24ZKAP1k_0 person CrsjxpJoY5Q_0 person Cru8KBJqhng_0 person Crz3l2CEDzA_0 person Cr0SWcS1qX0_0 cow Cr_B3I0QPEQ_6 airplane CsM_GTD0TZE_0 person CsPLGd2dgl0_1 airplane CsTntmE8EWs_0 person Csa542XNEXo_0 person CsfkuwD6-nA_0 person Csh_4yR8bFk_1 truck Csh_4yR8bFk_2 truck Csii4vkefsM_0 boat Csii4vkefsM_2 boat Csw3kLrhjoM_0 person Cs38JY7Gqjo_3 skateboard Cs-Vx_ym23o_1 bicycle CtC2yC9NGTk_0 bird CtD4wnIU0Pw_0 bicycle CtF9IxfLhaQ_1 person CtF9IxfLhaQ_2 person CtF9IxfLhaQ_0 person CtHIoS1lGKA_0 person CtLVK2j48gA_0 person CtO5dmTdzYQ_0 person CtPEAoFPnE4_0 person CtQPPKpIEIc_0 person CtTcyoZvRvU_2 skateboard CtUPPSKU8cE_0 bus CtVUqIFqqr8_2 bus CtYDJRkhtpg_1 umbrella CtYDJRkhtpg_5 umbrella CtfPPnpBKHs_2 bird CtipU0GHAEo_1 elephant CtjTAe-FFe4_3 elephant Ctkjh9fntpQ_0 bird Ctkjh9fntpQ_4 bird Ctkjh9fntpQ_5 bird Ctkjh9fntpQ_2 bird Ctkjh9fntpQ_3 bird Ctnjw80kgcw_0 person CtxK3wGlqx0_2 motorcycle Ct1QrXUgBGg_0 person Ct1QrXUgBGg_1 person Ct8S9nC7sfk_1 person Ct870xrnBGU_0 person CuDfCpgoIjg_6 boat CuGfRQMwYd8_0 cat CuHF9Hd0uwI_0 person CuIkNejeZrY_0 cat CuUJUrjEcc4_0 person CuWdZPYMLww_0 person CvDW2A8hD78_0 person CvRJwKt7FfY_1 skateboard CvVVS4SUiuw_1 train CvZaA28QUK4_1 knife CvajmAL3sjQ_0 person Cvda-hutmbg_0 dog Cvqylkq9fwI_0 truck CvxsoaCV1_8_0 person CvzsX_s6tek_0 person Cv2T8U0uQcQ_2 person CwAdBrBzIcA_0 truck CwBiMh4zHWQ_0 person CwFcmrnz1yw_0 elephant CwFcmrnz1yw_1 elephant CwFcmrnz1yw_2 elephant CwR2tJptu0Y_2 motorcycle CwVLRawns04_0 person CwVTSONqnVw_6 knife CwnHi50fuuQ_0 person CwnHi50fuuQ_1 person Cw22-zpE1UY_0 person Cw3iLs4yV4g_0 person CxFRYsUCyWc_0 cat CxH8vGqLVM0_0 bicycle CxH8vGqLVM0_1 bicycle CxH8vGqLVM0_3 bicycle CxH8vGqLVM0_6 bicycle CxJ7Uww1mSk_0 elephant CxN5CG94Q5Q_1 airplane CxN-YEErXFg_0 train CxPyIeBtRec_2 truck CxWaiU0rF9g_1 cow CxWaiU0rF9g_0 cow CxXdw0Cqr4Y_2 airplane Cxa8q3QXoRs_0 person CxgqklOxSfo_0 airplane CxgqklOxSfo_2 airplane CxnCTBBNWCY_0 person CxnCTBBNWCY_1 person CxoZT0--IBo_0 person CxooWldim98_0 person Cxs-xZDDZWw_0 person Cxug83tjWyc_0 horse CxzJV_HYpAc_0 airplane CxzJV_HYpAc_1 airplane Cx0XeFKQ06o_1 train Cx7ZY8oqOmE_10 bicycle Cx7ZY8oqOmE_6 bicycle Cx7ZY8oqOmE_8 bicycle Cx9efnltcUY_0 person CyE1kuECzfg_0 person CyH0woBc0zU_0 boat CyI7nyp65bI_0 person CyI7nyp65bI_1 person CyLLTzV_lAg_0 cat CyOXSqLm7ao_1 person Cyb4-vF1WMM_0 airplane Cyedl__okwE_0 person Cyedl__okwE_1 person CynfaDsQ1AI_0 zebra CysFfEkdDT4_0 bear CytiPd_Wbkg_0 airplane CytiPd_Wbkg_1 airplane CyvInNqvQyE_0 truck Cy002CigJRQ_0 person Cy_hvqOd0RY_0 knife CzFRG22Jmvs_0 cow CzHeIzQZUEg_0 person CzNFSb4N6p8_0 person CzQ03Z7Dv5U_2 skateboard CzQ03Z7Dv5U_3 skateboard CzQ03Z7Dv5U_6 skateboard Cza2-_wwpd4_0 person Cza2-_wwpd4_1 person CzcwXF0Z1TQ_0 cow Czt8McI8UTE_0 person Czze2Jy6Ook_0 cat C0Tk6QryTA0_0 bus C0Tk6QryTA0_1 bus C0a9pkujXQg_1 person C0lvs-UEqKs_0 person C0pOQ36uosU_0 person C0pOQ36uosU_1 person C0qbh7OJTHI_2 skateboard C0tGKqnFyZA_0 person C0xTDmlUYSA_0 person C0xZYHsXNws_0 person C0xZYHsXNws_1 person C0xjvq51pVA_0 horse C0xl46ieUxg_0 skateboard C0zUOQoeQrA_0 person C0zrmcMf8D4_0 bird C05P4mCw-xA_0 bear C1DCcNlUQDk_0 boat C1DX9TjKTrE_0 bus C1MfcNYih9c_1 person C1RCXQFjvvc_1 person C1RCXQFjvvc_0 person C1bdSMUVy2Q_1 truck C1bdSMUVy2Q_0 truck C16ZlJRDfUc_0 bird C16_rFYBwUA_0 person C17jwrOnSCI_0 horse C19rR4b8CSQ_0 dog C1_gk-bIL6Y_0 airplane C1_tauCAYjs_0 person C2GvHXU8mIc_0 person C2HZBTrCAf8_0 horse C2Hcs2itPTc_1 elephant C2H_P7MX3zw_0 bus C2H_P7MX3zw_1 bus C2IJYHPWHJM_1 cow C2K7zu49SKw_0 person C2K7zu49SKw_1 person C2LdkQMjxJk_0 cow C2ROFMcXam4_0 cat C2S4CV9mnC0_0 truck C2VjZHe3ID8_0 person C2r9VGslxTE_0 person C2v7hcs3Ax0_0 zebra C2zRn25TBOo_1 airplane C2zRn25TBOo_2 airplane C2zRn25TBOo_4 airplane C2zRn25TBOo_6 airplane C23ZGYnWhgo_0 person C26HiGgIjYg_0 person C2-glFtt9Vw_0 umbrella C3LbuiUjzvo_0 cat C3LbuiUjzvo_1 cat C3LbuiUjzvo_2 cat C3Qu-KUydyg_1 cow C3UX9hrlLeE_0 person C3YcvZKgCgY_0 person C3terpXzPm4_0 person C3z1zbkmwdU_0 bird C30B6KXg9vs_0 person C3399zrSQ6A_0 horse C34_EkCWJaU_0 motorcycle C4HzsadhLW0_0 boat C4QHknuNLYI_0 person C4RAj-omUMo_0 person C4W_g9eheB8_0 skateboard C4XGGPoj4q8_0 person C4dV8SPq6Mk_0 person C4e-5QS1FmU_0 umbrella C4e-5QS1FmU_1 umbrella C4irKghQYTE_0 horse C4jghf6KKYI_0 skateboard C4vFHmzTY-s_0 cat C4xJ3_Wrrn4_0 train C4yVuAqcr0U_0 train C409K0fAxiM_0 person C42397qio9c_1 skateboard C4317zxtzKA_0 person C4-k1XW5O3U_0 dog C5DAyL_gEQU_0 cow C5GJx1VFRm8_2 cow C5HT9La1jDY_0 person C5JobuZa590_0 skateboard C5MJ8fSfmLw_2 bear C5dPwnswp8Q_0 cat C5jo-fCBqmA_0 person C5jo-fCBqmA_1 person C5jo-fCBqmA_2 person C5pop0SvnOM_0 person C5r41vkLsKE_0 person C5sXGZRLfmU_4 truck C5sXGZRLfmU_6 truck C5umaWklWFQ_0 boat C5ybfGh51LM_0 cat C55z9Fe6H7A_0 dog C56Bp4toMG8_0 person C6NYuB7zIzs_0 person C6NYuB7zIzs_1 person C6XCgppHkHA_0 bus C6Yy8uEd0bQ_0 person C6aB6M0DHrU_0 person C6cOmWIisxU_0 person C6eN6sMtuXY_1 boat C6gNbZUU7xg_0 person C6ia-W4TV1U_0 horse C6nHtSy67OY_0 cow C6n6ECY5h84_0 cow C6qWzx58kxo_0 elephant C6qWzx58kxo_2 elephant C6rqmPvlIlI_0 person C6upTeuDG4E_1 skateboard C6xv6Wmy97M_0 horse C62nD-_VXpM_0 horse C62nD-_VXpM_1 horse C66OM90TFXI_0 train C66OM90TFXI_1 train C66z-I_UHqQ_0 airplane C6_p7BXwCTA_0 elephant C7CB2A_bxa0_0 person C7COsB9pcOQ_0 person C7CXGBdoJWo_0 cat C7KZnM_0j8s_0 person C7QYoT22ZYo_0 train C7W0oxkg-nU_0 bicycle C7kKR6pqYzw_0 horse C7to6tRsC9U_0 person C72k6hv1NPM_1 cow C72k6hv1NPM_0 cow C7-sqpILAXM_0 person C7-sqpILAXM_1 person C7_HhvBNDSw_0 person C8ETc2K6ef0_0 train C8G_kcqjspU_0 knife C8IE7aLZvIA_0 person C8IUB4Opf44_0 person C8IUB4Opf44_1 person C8PqOHn0izQ_6 bird C8Zex-ptYyk_0 person C8daRmtyPo0_0 person C8fcFW4HKGs_0 airplane C8mEWe-TWYs_0 knife C8n1dTEDWvk_0 skateboard C8ukXeoRjbI_0 cow C9Zq_rDHwgg_1 cow C9dD6oS_Zs0_0 person C9je005HOlA_0 bus C9jqFBMRyPs_1 person C9vG5qPPhzE_1 train C9wgqGACPso_2 elephant C95TX0IOPa8_0 skateboard C97oHqKqdBk_0 person C97t3TGT2oc_0 person C-AoVBwcBUw_0 person C-FX5hgFDd0_2 person C-Q9RDsPyOw_0 person C-Q9RDsPyOw_1 person C-S34-Drg7M_0 cow C-TWHpbtVNY_1 person C-WsGZQoLx0_0 boat C-cL2hzThKI_3 airplane C-cL2hzThKI_6 airplane C-omy9mzD7E_0 person C-q9nO8X1rs_0 person C-seg-BCK0U_0 bird C-v3Ttrvuo8_0 airplane C-38hraIyOs_0 person C-47EdafspI_1 airplane C-54wttM4AA_0 person C-9LBJqCMm0_0 train C-_ebeJtjyE_0 person C_BX3dg-lc4_0 person C_DOGAVETwk_1 bird C_EMJm-Z2I8_1 bird C_EMJm-Z2I8_2 bird C_EwPB6zgIA_0 person C_EwPB6zgIA_1 person C_GnC_IEwJM_0 person C_GnC_IEwJM_1 person C_HBU7EUsoE_1 person C_HBU7EUsoE_0 person C_IjqR1NOxw_0 person C_POS7ndKw0_0 truck C_PXq5TsPRQ_1 train C_TfufSsuEU_1 person C_VePcGhr10_0 knife C_aP0fKyudQ_0 horse C_aYcFttRC8_1 person C_aYcFttRC8_0 person C_cUky_0p2Q_0 cow C_uGdKk79X0_1 person C_ykabkQ2U0_2 person C_2EFIuyDSA_0 person C_2p_N8Kvpk_0 person DAJkfl5W8Vc_0 horse DANymtBuoIs_0 dog DAOBGjTf7xI_0 person DAQ9-YTrpp0_0 cat DAU6UNdxbRI_0 person DAn4fH-1Ucs_0 person DApkEgrJX0Q_0 person DAqHnZA6tBQ_0 truck DAtSTeTmg8I_1 horse DAwdyKiZyzM_0 person DA1bsx2RsGA_0 person DA1bsx2RsGA_1 person DA4LF3u2VTI_0 car DA5X-ADHM1w_0 person DBFMXaS9LRg_1 umbrella DBLaZSSthxo_0 person DBR0l2rW6Ew_0 horse DBVbRonJkb8_0 person DBaAVcI4Ftw_0 person DBaAVcI4Ftw_1 person DBmVOTuCJ8Q_0 person DBvOm1qnWrA_0 cow DBySPDEqsO8_0 person DB1Cvyyike0_0 airplane DB3lsf7fD84_0 dog DB6TJh9r1Dw_0 person DCE8Dg_ycjo_0 truck DCHv6sxfCAs_0 person DCPk1uyVNlU_0 person bmHyfvCZWsg_0 elephant bmHyfvCZWsg_2 elephant bmHyfvCZWsg_3 elephant bmLLdC88ohM_0 train bmMB6Mr1uKI_1 person bmPhh5NpV7U_0 person bmQbHpw-4fY_1 bird bmUFMo3pjyo_1 airplane bmhSkbKIg0U_0 cow bmhSkbKIg0U_2 cow bmhSkbKIg0U_1 cow bmhfPSKCY8I_1 dog bmqPIwMWGj4_0 person bmuIwo4T6rk_0 cow bmvh7yxyWcY_1 horse bm2eU4uLgQE_0 skateboard bm8MRDfmerA_2 person bm8MRDfmerA_0 person bnOUoCjxIvA_0 bird bnWQnn3a2xE_0 cat bnZwZd6xdHY_0 person bnc1LyPUCLg_0 train bnfN43NoRbA_0 person bnqbJR2oSPk_1 person bnqbJR2oSPk_0 person bnsuTEBQy44_0 person bnw6G0Prvc0_0 bus bnyALwWqo4Y_3 cow bn8epY7auRE_1 person bn8epY7auRE_0 person bn9y-iIDoUU_0 person bn9y-iIDoUU_1 person boHeJDDjRf4_1 person boIKCyPzxr8_0 bicycle boNYwNYmh1E_0 cat boVVWwoXNDw_0 truck boZ6xZrNpzc_0 person boadjC5Lci8_0 person bocql7vYA4o_0 bus boja3N4XQVo_0 person borBr_AiOmM_0 person bornws-twE0_4 airplane bosTHwpZ8Ao_1 dog bo7P3hYkeog_0 person bo9sUjViaHQ_0 person bo-qyHCKssw_0 bird bo-qyHCKssw_4 bird bpI4nUgSqbE_2 person bpI4nUgSqbE_0 person bpI4nUgSqbE_1 person bpJNbivFLKE_0 skateboard bpdgYRz5hPs_0 person bpiM4FHf540_0 person bpjVhXyB4M0_0 airplane bpjVhXyB4M0_2 airplane bpsMni7yj3M_0 truck bps3HXPsekI_0 bear bpu9NYWxcEE_0 skateboard bpyH8PRkBQM_0 person bp1zW8j_ajo_3 bus bp26IdTs4XE_0 person bp3rDJju8n4_0 person bp3xwI_FfOI_0 elephant bp6K7EUtORo_0 cow bqBtysMz94c_0 person bqEmBkEnR1c_0 person bqGkchWbZYE_0 car bqJcZwUB1Go_0 person bqPKigpT9AY_0 person bqQk37pcpVA_0 person bqaeUBH6J3Y_0 person bqhQG8t_2XA_0 person bqjcNzWyaC4_1 airplane bqoG__OO_5g_0 person bquLxAXnaww_0 truck bqwFWjwCZas_0 truck bq6n9q-Qpv8_0 person bq6870eY1a8_7 bicycle brDq8RFzVTo_1 truck brIIDuCmk-E_0 person brLbzZeRz1o_0 person brLeJHMfMXQ_0 horse brNR68fKeMk_0 bus brWg7FAeBEA_0 person brZj8bv9oxY_1 person brhA4NqjrgQ_0 horse brh4hrmrs0Y_1 skateboard brpbaoTNe4s_4 bicycle brpbaoTNe4s_0 bicycle br3e--6oH8Y_0 airplane bsGmFJGua4w_0 elephant bsR9KXIHlCM_0 umbrella bsVBX8u9pW8_0 bus bsXpGvnXpmk_0 cow bsa-G_HEllM_0 person bsbzpk_ejJk_0 person bsbzpk_ejJk_1 person bsgdfqE8ySk_0 person bspbqjb3wAg_0 person bsv_swJ9_KY_0 knife bs2FVeXKiYQ_0 person bs3u00S0eu0_0 person btI7FYFXsfI_0 person btL1Ptjq7pM_0 motorcycle btMmnZdL_uQ_0 person btO34shZMZo_0 horse btSyjckocDA_0 person btVQJbFp8Dw_0 cow btdt4lysW6U_0 dog btihrVidTTg_0 cat btk27mnJY_A_1 person btrdQ6N7QJc_0 truck btrdQ6N7QJc_1 truck btsT4XRF0nI_2 cat btul_U3BMKI_0 bus btvg47tz3Ps_1 person btvg47tz3Ps_0 person btz7EwI5rYY_0 person bt75khQG0w8_1 bird buFiFNHj41w_0 person buOqwfPnqkI_0 cow buRfiT3Mq6Q_0 bear buSgd-PrRmA_0 elephant buSgd-PrRmA_2 elephant buSgd-PrRmA_6 elephant buSgd-PrRmA_8 elephant buWf8ffXWTs_0 person bue8SUcqigE_0 cat bugTv6zkE0Q_0 person buh8d20UxNw_1 airplane bulc7gZ_YQY_0 boat buqR3s7EZeQ_0 person buq0_IIvQqc_0 person busJdrzEeJU_0 truck buyJwHRaSYc_0 person buyJwHRaSYc_1 person buzd3FYmwQQ_0 bus bu6QE_qf8fw_0 skateboard bvLQLfRAI9s_0 person bvW_ZJYSOLg_0 person bva98_iD8pI_0 person bvc6dUfKFpM_0 skateboard bvg-QHsENSc_0 umbrella bvnuyMz5Pk4_1 person bvnuyMz5Pk4_0 person bvqPJIDHXHI_0 person bvqPJIDHXHI_1 person bvwJ75OkrTk_0 person bvwJ75OkrTk_1 person bvwwPOK7lN8_0 skateboard bvw4raRDAys_0 person bvxAWBUG1zk_0 dog bv6ASjMljew_2 person bv6ASjMljew_0 person bv6ASjMljew_1 person bv7NOTxSDhg_0 person bv7lroHoMyE_0 person bv8CHN4kwyM_0 person bv9J7oplKjY_1 bird bv-ps8hofSY_0 person bv_rrakMnsY_0 elephant bwB-cfh8UFY_0 cat bwIBXBulTRg_0 person bwM3RKdZAd0_1 airplane bwM3RKdZAd0_2 airplane bwSSE1XeKkg_0 person bwSSE1XeKkg_1 person bwTJKRhesM4_0 person bwZEDD10b44_0 person bwd7bbxG4Kw_1 person bwjUOg-CI1E_0 horse bwotbTZHoPA_0 horse bwotbTZHoPA_1 horse bwv4Q2VqV5A_0 bus bwv4Q2VqV5A_3 bus bwwud6bxEeY_3 elephant bw1HepCVmL8_0 person bw3c96BQrRU_0 car bw3c96BQrRU_1 car bw96DHOgI1I_0 airplane bw_opOTzI6k_0 dog bxRX_05rH9Y_0 bus bxXWi1nvXjI_1 bird bxYeOYlqDPc_0 cow bxaC_opt7IU_0 truck bxjIDI2ZkO4_0 cat bxnu-AITJt4_0 person bxoclb4AFb8_0 person bxsI00qOi6c_0 person bx0h8tvY6kw_0 person bx6BVBAcBtM_0 person bx6BVBAcBtM_1 person bx7PtvZe6O8_1 airplane bx7-RzWnIe4_1 truck byDPGQJdn1s_0 person byQIRt1JF9I_2 dog byQIRt1JF9I_0 dog byQIRt1JF9I_1 dog bycJD4U6rIs_0 bird byehVoG0_eg_0 person bye0FepI8wg_0 bird byi-4Qx3vx4_0 person bykN9ap_QTw_0 bird byvddKaL_kw_0 person DCRIRGz2xhc_0 person DCRIRGz2xhc_1 person DCUcxHDfYiE_1 cow DCUvhnZnRGQ_0 horse DCXrBMEdS4E_1 person DCrv8CyK9zM_0 bus DCx698xXxjs_0 person DC0PPRyXlD4_0 person DC4ZTdVoj2o_0 boat DC5fRZmUZV8_1 airplane DC8lKdla6rE_0 person DC8lKdla6rE_1 person DC_Kd2iaw9U_0 person DDZILIDFFXc_0 elephant DDd8CfnxkYM_0 person DDgtm9B7Yj0_0 train DDhlugZ-vro_0 person DDhlugZ-vro_1 person DDjUzAM4mLE_0 bus DDjUzAM4mLE_1 bus DDjUzAM4mLE_2 bus DDjUzAM4mLE_4 bus DDoBBLQQ1Mg_0 train DDtWIKexWpM_0 skateboard DDw2iF2W4HI_0 bird DD4YGjlBsHc_0 boat DD844YVVMXE_6 bicycle DD844YVVMXE_0 bicycle DD844YVVMXE_1 bicycle DD844YVVMXE_3 bicycle DD844YVVMXE_4 bicycle DD844YVVMXE_5 bicycle DEHHjz2xiz4_0 person DEI-qJD08Pc_0 person DELUfY3m37k_0 person DEVUyfQt_G0_0 cow DEVUyfQt_G0_3 cow DEVUyfQt_G0_1 cow DEXhh5rt_24_0 motorcycle DEXhh5rt_24_1 motorcycle DEZHoMWiFBQ_1 person DEau5L3A9S0_0 person DEjPKQLASJg_0 umbrella DEtj0Fb-Jbo_0 skateboard DEuYWYNXbw4_0 truck DE3kl7rbakE_0 skateboard DE6z5oB-0vo_0 elephant DFBlkKPYtl0_1 cow DFBlkKPYtl0_0 cow DFI7_dtUb0U_1 giraffe DFI7_dtUb0U_3 giraffe DFRmdyjR_Dc_0 giraffe DFb4KWUX31Y_0 person DFpZ6f1iWT4_0 person DFwPVEPK4-Y_0 cat DFzgqOHlnAk_0 person DGC_pivLAEE_0 person DGMfSMlhL4w_4 elephant DGMfSMlhL4w_6 elephant DGMfSMlhL4w_13 elephant DGMfSMlhL4w_17 elephant DGM9CDF3ks8_2 motorcycle DGM9CDF3ks8_0 motorcycle DGM9CDF3ks8_1 motorcycle DGbZYKPp7XI_0 person DGc9VSWQUyQ_2 person DGc9VSWQUyQ_1 person DGp5vBVf28g_0 person DGsQAjKXPBw_0 cat DGs0ZHnAtkg_1 person DGs0ZHnAtkg_0 person DGvsndSWlBw_0 elephant DGx5aC4h8wg_0 horse DGygUuHcJhs_0 person DGygUuHcJhs_1 person DG8TJBoerZ0_1 person DG8TJBoerZ0_0 person DG93jIsco3E_0 person DG93jIsco3E_1 person DHB_RgHOHdo_0 umbrella DHB_RgHOHdo_1 umbrella DHLK8xDGwL0_2 knife DHLg5KzzoOM_2 cow DHLg5KzzoOM_0 cow DHPWnuYI2qA_0 person DHSGQLguGZ4_0 truck DHdFVfp7SvM_1 horse DHl_QoiyZ2I_1 person DHl_QoiyZ2I_2 person DHl_QoiyZ2I_0 person DHqrGwHgnAA_0 person DHr77uGYi-g_0 dog DHsorh6ngMI_0 umbrella DHs1KtWx2n4_0 person DH0OVsYB2vs_0 person DH5nSZZ6uJE_0 umbrella DH_wEdP1Glk_2 train DIFEQ3rorSw_0 person DILtO1oyoCY_0 person DIOuJC_mv_k_0 person DIO8l6DAJX0_0 person DIO8l6DAJX0_1 person DIP8d1YC6vM_0 person DISU2i6bJqs_0 cow DIaTXSXAfJM_1 person DIaTXSXAfJM_0 person DIpJyhb8gzw_3 motorcycle DI7rj5AAYEE_0 elephant DI801ysby74_0 knife DJD4Xlf0eNg_0 person DJKFzJe6KAk_1 skateboard DJKokwprK90_2 skateboard DJLSHLPE0po_0 person DJQ8goQ4xyo_0 person DJV-ft_10HY_1 person DJjjrdYts2s_0 elephant DJ4oQ03HqyE_0 bicycle DKBIz_MLIpw_2 knife DKC58UBq-0w_1 airplane DKEmSml-t4c_1 person DKEmSml-t4c_0 person DKHCjzNZE3U_0 elephant DKHCjzNZE3U_4 elephant DKICHseWnGQ_0 person DKJ3As_9Mlw_0 person DKKsGGUWero_0 person DKLxBVm3HHk_0 airplane DKMUARFnh2Q_0 person DKShwn6Xk8w_0 cat DKZ21QA0lBM_1 person DKcpPg_tEUU_0 skateboard DKj3fFeAaL8_0 person DKq7d2C6gOI_0 motorcycle DKxIadOj4D0_0 horse DKyckH3XY8Y_0 bicycle DKydJWySeUw_0 car DLKE31mt2Qc_0 bird DLLrkv1aF-k_0 train DLMDzB4XBPg_0 person DLPmEX5pwY0_0 cow DLT57E3vm98_2 truck DLct7_2tyWI_0 person DLd6kxxgSUM_0 person DLkx4w5oteM_0 person DLmCj6q5vD0_0 person DL3V2mhMX7M_0 skateboard DL3eQSTbZ9Y_0 skateboard DMB6Mr7lTSI_0 person DMEXGsc-PaU_0 person DMFEU87_IrU_2 boat DMR4kX1M_zk_2 elephant DMR4kX1M_zk_1 elephant DMTP7OyjdJ4_4 bus DMT_n1VJG80_2 bird DMbwyGKLF4c_0 person DMb-AjUXKe8_0 giraffe DMiFC67o2P0_1 horse DMiFC67o2P0_2 horse DMiFC67o2P0_3 horse DMn1JpU6MBE_0 person DMn-kaSNd5Q_0 person DMuLn7wJTcc_0 person DM7c57qvjgs_0 person DNAMMWkSfLY_11 umbrella DNAjFU24eK8_0 boat DNB4bgEP-8Y_0 person DNGlLqzJF6Q_0 person DNGlLqzJF6Q_1 person DNOZeC0gZzs_0 truck DNXuVh_X_qY_1 person DNXuVh_X_qY_0 person DNhOrRaOe2M_0 person DNul7ILzxkQ_0 person DNul7ILzxkQ_1 person DN0xWDfCAM0_0 motorcycle DN1ujoUaAKU_0 person DN1ujoUaAKU_1 person DN4TuB3csDg_0 person DN4e8ljPm1g_0 bicycle DN5mGCGzOOY_0 person DN7FitWe9k8_0 person DN8yb60bxNc_0 person DOAU-JodN0U_1 airplane DOAmtFxCuKA_1 person DODU9JghuAA_0 cow DORauVZJhAU_1 person DORauVZJhAU_0 person DOhLqHOLbQY_0 person DOiUy3AGiKw_0 person DOiUy3AGiKw_2 person DOoTpSSHVho_0 truck DOoTpSSHVho_1 truck DOsVwDV787M_0 bus DOuULWa1RKM_0 person DOvC_-Yrn5k_0 cat DPAEt1AqwbQ_1 car DPCyQOQdLHE_0 cat DPFO_O_f3hc_0 cow DPIm8x0i2yo_0 motorcycle DPJ7ZSWY2Qs_0 skateboard DPXJpAVtRfM_0 train DPXJpAVtRfM_1 train DPZi4DZaTmk_0 person DPZi4DZaTmk_1 person DPelBJ73uaU_0 bicycle DPo9M61p8gI_0 umbrella DPvxwOvedrQ_1 knife DPz3CG4lD2Q_5 truck DPz3CG4lD2Q_6 truck DP2q1TrqjAE_0 person DP2q1TrqjAE_1 person DP6ZB5PxNfc_0 person DP-JZPR9HFc_2 elephant DQDV1Wr7qo8_0 bear DQOglBZHFCs_0 bear DQZiSQmMBnc_0 bird DQcCfbTKP1s_1 person DQcCfbTKP1s_2 person DQcCfbTKP1s_0 person bywgcqNg6RU_2 car by7PLb7MqM0_0 motorcycle by_OJvQqlKE_0 person bzKVRbSQpZE_0 knife bzLdvZQAWgA_0 person bzO5MBTTrdQ_0 person bzRELZo9WMU_2 dog bzRELZo9WMU_0 dog bzZgsynjAGk_0 cow bzfE3U02_44_1 person bzfE3U02_44_0 person bzimWzymgu0_0 person bzquVP0NUms_2 truck bz5Ht4jyT0k_0 bus bz66OedbeoI_0 person b0C_2T7-IfU_0 cat b0GlXXGkfRQ_0 person b0GlXXGkfRQ_1 person b0HXAfyZ7Sk_1 person b0Q3EfK70fg_2 airplane b0Q3EfK70fg_4 airplane b0Q3EfK70fg_5 airplane b0Q3EfK70fg_6 airplane b0a7ewqE8S4_0 dog b0nOQfZSaUo_0 person b0nt17hBmDw_0 boat b0qXUUs3-WE_1 person b0t8uuynzIM_0 train b0xQRq8njAI_0 cat b0z1nalEX08_0 truck b0-UOt-DT1A_0 person b1ETK4nP9ag_0 dog b1EnXvOZQbQ_0 truck b1Gd5IWJBRI_0 person b1R3uk0VLc4_0 person b1SyeZsSk80_5 elephant b1SyeZsSk80_3 elephant b1UAPTD4s74_0 person b1UpjRRBrTw_0 cat b1cpAYk99_U_0 person b1cpAYk99_U_2 person b1cpAYk99_U_3 person b17OiOMReIs_0 person b1-WFxZ7Lcs_0 truck b2DqNP9s4t0_0 person b2Tm_7DUimQ_0 person b2Y6KLIX5vE_1 motorcycle b2Y6KLIX5vE_0 motorcycle b2azzMxEH84_0 motorcycle b2fq5Ba1L8M_0 person b2fsE3wZfWM_1 person b2m2gaVpjNE_0 person b2qNS9qjYbE_1 person b2tlrwd_LIg_0 person b28pEbOSeUs_0 dog b2_dSc2NxNI_0 person b3KP0d-WX38_0 bicycle b3KP0d-WX38_1 bicycle b3KP0d-WX38_2 bicycle b3R6fHlRZu4_1 bicycle b3R6fHlRZu4_3 bicycle b3R6fHlRZu4_4 bicycle b3SsKosfjOA_0 train b3SsKosfjOA_1 train b3SsKosfjOA_2 train b3UOZHA5jRI_0 cat b3Z1Ay2o1zQ_0 knife b3bkNCYQbwc_0 cow b3p-fFVYM4E_2 train b3p-fFVYM4E_4 train b3p-fFVYM4E_6 train b3tgGsan2vc_0 truck b3x6f5xFPTQ_0 horse b3x6f5xFPTQ_1 horse b3x8Gwk4V8o_1 person b3x8Gwk4V8o_0 person b323CLKf_vM_0 person b34Cdm6l5_k_1 airplane b34JUq19S0E_2 motorcycle b34JUq19S0E_0 motorcycle b34JUq19S0E_1 motorcycle b344je6lVYA_0 airplane b35ihWGyz_4_0 cat b37tPdAEkEw_0 person b39uBVwcm48_0 motorcycle b4E8uT19QkY_0 bus b4E8uT19QkY_1 bus b4FBbr4Pud8_0 person b4GXrkSKAdA_0 cat b4HAPQ_xX5E_0 person b4HAPQ_xX5E_1 person b4KwBIif5OY_0 cow b4KwBIif5OY_2 cow b4KwBIif5OY_3 cow b4KwBIif5OY_4 cow b4UXSjdnqZ0_0 person b4Xn8--nfvI_0 person b4aEJNvYqtU_0 bear b4j8lkkY_lE_0 zebra b4tTUDVt6Gk_0 person b42WUwHAKPs_0 boat b455pPKgTj4_0 person b5D9lQq3uf8_0 bear b5IshxZjL7o_0 motorcycle b5NxbNaAo_8_0 person b5R1HVvc040_1 train b5S8Db1Gu7I_1 bicycle b5S8Db1Gu7I_3 bicycle b5T_VSM7nbg_0 motorcycle b5nwFyniymA_0 dog b5ud9dsnS1c_1 person b5ud9dsnS1c_0 person b51dSWD8MF4_0 elephant b59pPUKW_78_0 car b5-eXPHW4Mg_0 person b6AoStVIzkw_2 person b6IE2imnfp4_0 person b6MtzhRufn4_2 skateboard b6MtzhRufn4_0 skateboard b6RIavVJ660_1 person b6dVZMAHwro_1 airplane b6gsIu7Pxbc_0 dog b6ndIInoIzU_0 boat b6xUAyNCbdY_0 person b61MghNCCTI_0 person b61MghNCCTI_1 person b65S2P2Pfms_0 person b66BE9WdQP0_2 bicycle b7HqfhRNtAQ_0 cow b7H_n_w2eFQ_0 person b7Igw_OO-P4_0 person b7LHlx86tk0_0 train b7RYkf4oXv0_0 skateboard b7WQe48-0NI_1 giraffe b7WQe48-0NI_0 elephant b7WiE1a8IAM_0 person b7go-l8jA5s_1 boat b7hJ62ORLHc_0 person b7iLQoOKVrM_1 horse b7ivqvv6s6A_0 motorcycle b7mawJlPASQ_0 person b7u0NZEc8OI_1 person b7ycKg8GLHA_0 person b71SThzfrDg_0 bird b78PYqyYWZA_0 person b8LqaxvNRHw_0 person b8LqaxvNRHw_1 person b8VoRclgULc_0 cat b8aWJIa4RFI_0 giraffe b8es8BWiC5c_1 person b8g4M9Yov8M_11 bear b8g4M9Yov8M_3 bear b8xtOCMwjJM_1 bird b8x1qHT8nvE_2 boat b8yA8bHlrtQ_0 bus b8yqEFXS8Ck_0 horse b82N91HYnUo_0 knife b9O_mJTNj2A_0 train b9SLHObDJzQ_0 horse b9Y5tpPv-LQ_0 car b9iCmG9fIHc_1 motorcycle b9melHkIeV4_0 bird b9oiO21MJh0_0 horse b9oiO21MJh0_1 horse b9u4WV9ft4s_0 motorcycle b9wwfAu5DCs_0 skateboard b96WdT0DXKk_2 bicycle b96WdT0DXKk_0 bicycle b96WdT0DXKk_1 bicycle b98Gs0d8AKo_0 motorcycle b9-xiVm1Xck_0 skateboard b9-2bW13faI_0 person b-Cp0i6fBOU_0 person b-Cp0i6fBOU_1 person b-S7G5A0MNI_0 person b-T0AS7CuxI_1 knife b-VYy9eEU6w_0 person b-W1PY33nQg_0 person b-hT8zKObfM_0 person b-hqwYjKCH8_0 truck b-i49sLOjBo_0 person b-i49sLOjBo_1 person b-mQajOHUAA_0 person b-mQajOHUAA_1 person b-mQajOHUAA_2 person b-ncxt38EFw_0 person b-wiIOBccF0_1 person b-x--HjbnpM_0 knife b-5K7RwiHdw_3 boat b-8ARNgk-Tw_0 person b-_FeNpM_wI_0 person b_B3oYiBWi4_1 skateboard b_KBD-NL4Vo_0 train b_ZVDwMrcEU_0 airplane b_exMPY7gnM_0 person b_fR7aS10Z0_0 bear b_h4xugql44_0 umbrella b_kksCK6cbw_0 cat b_n776bwyJo_0 boat b_n776bwyJo_1 boat b_vDLf3193s_0 bus b_1TwBIgwKE_0 car b_7EvlxDWFc_0 truck cAARR6q3Qq8_1 skateboard cAARR6q3Qq8_0 skateboard cAFqK_6ltXw_0 cat cAJsxlkMG_s_0 dog cAJsxlkMG_s_2 dog cAJsxlkMG_s_1 dog cAKfCLDFg34_1 person cASL6wZ33vA_0 boat cAYIECe6Bvs_0 truck cAnDryag2FA_0 truck cAqs3d9KNzk_0 person cArYvJEUdOg_0 horse cA0HCmGOK84_8 horse cBAG9pjaV70_0 cow cBBDfwkH23A_5 horse cBBDfwkH23A_2 horse DQk3Xvbv57I_0 cat DQqBXfTgqTE_0 train DQ04rtHIqHQ_0 elephant DQ7GZOJxra8_0 person DQ-vQygnOx0_0 train DQ-vQygnOx0_1 train DQ-vQygnOx0_2 train DQ-vQygnOx0_5 train DQ-vQygnOx0_7 train DQ_yyvagS0g_0 truck DRMoOpmUgn8_0 person DRO4MalcQFk_0 person DRSSiSNzV7Y_0 person DRXxJArWrQA_0 person DRaIGIiQXd0_1 train DRaX3P2ysBk_0 person DRhRKwI26n8_0 bear DRhRKwI26n8_1 bear DRseWxukwaI_0 person DRsoi5DxAJk_0 car DRuDqkZ0zfE_0 person DRuDqkZ0zfE_2 person DRuDqkZ0zfE_1 person DRxLQ6we5YU_0 horse DRybt0Cgr_U_1 bird DR0QGL0n_wM_0 person DR4mzyMklY8_0 skateboard DR82KhNzs1w_0 person DR-AMnnLCCQ_0 cat DR_jo4aSqn0_0 person DR_jo4aSqn0_1 person DSAbzYpUW5w_0 cow DSB9X3bgG2A_0 person DSCt67aveiw_0 truck DSCt67aveiw_2 truck DSEt02E1kJE_0 person DSM_BlK-ggg_1 person DSM_BlK-ggg_2 person DSRGbK9rPbo_0 train DSWlLGL3xj8_0 horse DSZkEwhJEI4_0 skateboard DSaSooZZeAg_2 bus DSn5-dKW_P0_0 person DSoRmFNRxiE_0 person DSoRmFNRxiE_1 person DSqy2MlVOxE_0 person DSq0q8dCuCw_0 truck DS5z-K8Cpzs_0 person DS-V_NKOawo_0 knife DTBhYAFcQ94_0 skateboard DTFg8SeWhbE_3 skateboard DTYiSIRTXW8_0 knife DTZkCYvGZ9E_0 person DTm5L6IAHC4_0 person DTnIC_Q8YoY_1 boat DTs2uXh47Xw_0 person DTtejx1VYBs_0 person DTvjWj60ixI_0 person DTvzQwX0KRQ_1 horse DT4KxrhD89E_0 person DT7TSCbFXek_0 person DUAhVOWkluQ_0 person DUAhVOWkluQ_1 person DUBzIIKht_w_0 person DUBzIIKht_w_1 person DUB3OOi7dQc_0 person DUHEv94Tyno_0 person DUHEv94Tyno_1 person DUHEv94Tyno_2 person DUPQ3fPhomY_0 person DUQa7q5NTQI_1 horse DUZhPq4FiJM_1 person DUb6-VQcokc_0 cat DUlYPwiuBrw_0 truck DUlYPwiuBrw_1 truck DUmKu-rc7jI_0 person DUwVOy7IYvA_0 person DUxGnuYB_GI_0 cow DU1ww3ryP7s_0 person DU4acd1_vuI_0 person DU8jvzO9tEA_0 zebra DVFfZw4HW3E_0 train DVFfZw4HW3E_1 train DVK9BrG_Y_8_0 person DVOFKTeh9BY_0 person DVgCgSDZVw0_0 person DVjOMylPUfU_0 person DVlEnd5Ra2Y_0 person DVm_-u6oWwA_0 car DVqsCPYrMrg_0 person DVqsCPYrMrg_1 person DV4GPAloBks_1 person DV4GPAloBks_0 person DV79-MpnE1Y_0 person DWQ0kmCIT0E_0 person DWZNfCg0W8o_0 person DWjj9U_lr30_0 person DWoRZEAFpUI_0 person DWqyeu4eovM_0 horse DWuaB5j6-CQ_0 person DWwGWBcxL0k_0 person DW1iqzQEWkE_0 person DW4OTTF7Jc4_0 person DW8G3A0trOk_9 bear DXEqDJWN72E_0 person DXEqDJWN72E_1 person DXI2AmrILgw_1 cat DXa15hEKLAc_0 truck DXgs-pfW-0M_0 train DXpyVrXMs1w_0 person DX5AP4s6u0k_0 bird DX867I2CNRk_0 airplane DX-PbjeeB6o_1 giraffe DYJJBRoUlnU_0 knife DYUiMLisOzs_0 person DYbb8_mMeLs_0 horse DYhTdNMuv5g_0 knife DYkV2TPfOBk_0 truck DYlrCUMDv_g_0 cat DYpBOmbclGY_0 person DYqIQv97tuE_0 person DYvHdc4rnxk_2 person DYvHdc4rnxk_1 person DY0ggbU0cIk_0 person DY3h0Y3ijmo_0 elephant DY3h0Y3ijmo_2 elephant DY6eQdk8jaE_0 person DZESlirYB3I_1 train DZGEjl9U78c_0 person DZIFKtO6y2Q_0 person DZIFKtO6y2Q_1 person DZMd9NPNnLE_0 person DZRZg1gGn1g_0 bus DZWsGelqCPg_0 person DZXldsAgY7o_4 skateboard DZYjfZMMVAE_1 person DZgbeXD-bZg_0 bear DZqs7ie6HPU_0 person DZ3JlgmRHQ8_0 person DZ4G9EBImOM_1 person DaMdWu7CyRE_0 person DaRYBq6zsmY_2 elephant DagKzwyphZY_0 person DapmUIRDw3o_0 airplane DaqVTidNtg0_1 person DatNYbTqxlw_0 person Daz5kZBXn5c_1 elephant Da10JheIcaw_0 person Da25bjhf1WQ_0 person DbAZPBnTh3U_1 person DbGX12xMbWM_0 person DbNOHXsDP5I_1 boat DbSGsjNmQ8A_0 cat DbXz_8anwSM_0 person DbZGV4ixs2E_0 bird DbdZugU9GWk_0 bus DbeCxvMCD-Q_0 person DbfJ2s7qQJ8_0 truck DbivV-It_rM_0 person Dbmwr1_ObHM_0 person DbnhReILFSs_0 person DboUAm-F7Rg_0 person Dbpte835xwc_0 person Dbqj1XCvcGw_1 cow DbrGY3BalZ0_0 skateboard DbrGY3BalZ0_3 skateboard DbrGY3BalZ0_2 skateboard DbvkTKJjRj8_0 person DbwEevYFGrg_0 person DbzakdG34mg_0 car DbzakdG34mg_1 car Db3OG025sz0_0 person Db74WjMmf-0_0 bear Db74WjMmf-0_1 bear DcAxPsNVe28_0 train DcFWetycnqY_0 person DcKjrocJ8iM_0 person DcKjrocJ8iM_1 person DcOl0Ec1kuI_0 person Dca5CTtFQZ8_0 motorcycle DcexSE28IOA_2 person DcexSE28IOA_0 person DcexSE28IOA_1 person Dcfs-bFQcxk_0 person Dcj-1vKe6iI_0 elephant DckRd1CpSm0_0 skateboard DckTHE_Pn5Q_0 person DcknQtmjIDA_0 elephant Dclr-tDJMO8_0 person DcpuJSx5z78_0 person DcpuJSx5z78_1 person Dc3yhv5mfN8_0 person Dc4EXPP0fqU_0 cat Dc9dWfPxIEM_0 bicycle DdGvFcujfxo_0 person DdHWfz7kw4I_0 person DdJuIi7LexI_0 bus DdKvI-6rMII_1 person DdNpi-Pmvgc_0 person DdNpi-Pmvgc_1 person DdNpi-Pmvgc_2 person DdOk9lG9b1k_0 knife DdUa-CozM14_0 person DdUa-CozM14_1 person DdYyeGgXLKw_0 person DddB5joJQC4_0 airplane DddRHyvYqFI_0 person DddRHyvYqFI_1 person Ddf4T9I0sdI_0 person Ddz7VVJXgHs_0 person Dd2qrXASEzk_1 person Dd2qrXASEzk_0 person DeCtt_QZqjk_0 person DeCtt_QZqjk_2 person DeFuoRV0yCw_0 person DeFuoRV0yCw_1 person DeHiMvczAD4_0 person DeIpwOsUzjw_0 person DeVZ83g93sE_1 bird DeViLrLvD1Y_0 horse DefHSc2VTOo_0 person DfGzSVv2ELQ_4 horse DfGzSVv2ELQ_1 horse DfGzSVv2ELQ_3 horse DfS7lvAcDQc_0 umbrella DfS7lvAcDQc_12 umbrella DfT_7BUGNQA_0 person cBI2gZhpA-8_0 person cBMnKBVcoOE_0 person cBMnKBVcoOE_1 person cBQJU95uwwM_0 person cBQJU95uwwM_1 person cBSbDKv-Z_o_0 car cBb6VPKgF1M_0 knife cBeH0xcCCWE_1 person cBhDn0TkAdc_0 elephant cBhDn0TkAdc_2 elephant cBhDn0TkAdc_3 elephant cBhDn0TkAdc_1 elephant cBlqBEElvDI_0 person cBpFzTn_uOo_0 person cBvZAwlCN4M_1 horse cBvZAwlCN4M_2 horse cB1RhnpteUg_3 airplane cB9XRu3bb_0_0 person cB_RQN9IXg8_2 skateboard cCA7llOU4HQ_0 person cCEUd1IZ6OQ_0 person cCEUd1IZ6OQ_2 person cCMe4KdqzeI_0 person cCaz75u-bCM_0 motorcycle cCfInBOvqkk_0 person cCfVriTflG8_0 person cCnjh5F8dvM_2 boat cCvpQCZ33xQ_0 train cCwB7O-yg4Q_1 airplane cCxZRIxh_yk_0 cow cC2UgNbG7Rs_0 cat cC3-bziiNKk_0 cow cC3-bziiNKk_4 cow cC4nZNGoC-g_1 horse cC4nZNGoC-g_2 horse cDGz5cnIzK0_0 train cDIc8cs3igI_1 person cDL0YZ_vXOk_1 person cDaR5WdXvIo_0 dog cDfSk2g6wRM_0 dog cDg-vYWO3AI_0 umbrella cDvCYN97QYU_0 dog cDvWWER9oeI_0 person cD_EAISZcwM_0 person cD_zwwrcvkI_1 person cEAwCEnfITY_2 horse cEFLP7rdZSU_0 person cEIAg54WPCs_0 skateboard cEOHFcu3Uic_0 person cEOqnkbgfMQ_0 person cEXYVwmcpSg_0 person cEdeOfPvcQ0_0 person cEomNeUqQUI_0 umbrella cErRs5qv8mc_0 elephant cEyCX-t8Jlo_0 bird cEyCX-t8Jlo_1 bird cEzC3hwdO_o_0 person cE7AS1hrlYA_1 person cE7AS1hrlYA_0 person cFBoLads7vA_0 person cFHTt7uFxH4_4 umbrella cFOk-AMS2Aw_0 motorcycle cFOk-AMS2Aw_1 motorcycle cFkmNa2nYEk_0 person cFoUf9UmoZ0_0 person cFq4fzO00qE_0 cat cFtfKwaxphA_0 person cFuoJPf6prU_0 skateboard cFzjl_SiNhg_2 dog cFzjl_SiNhg_0 dog cF0SM2Lf82s_0 person cF7uQwB8sEg_0 person cF9YklqKEp0_0 cow cGBOBTCgzP8_3 horse cGBOBTCgzP8_4 horse cGCbcyeQqG8_0 person cGCbcyeQqG8_1 person cGC4pGWPOUk_0 person cGC732t-itM_0 person cGEvxRn1UtQ_0 person cGNmKg25XMs_0 boat cGUXUioIa4o_0 person cGVaIIV18ug_0 person cGcyxMp1ZQc_0 person cGcyxMp1ZQc_1 person cGdeftwBWL4_0 person cGiVzhQI2a0_0 person cGpNQ9Vk-5E_0 person cGtaJVgvTJg_0 person cG1_sZqy7lU_0 person cG2fL1nRZmE_0 person cG5TxH-1Sf4_0 person cG65cBtyj20_0 cow cG7BBtumZnQ_0 dog cHCYX0EqsfE_0 person cHQLun1YTiM_1 person cHQLun1YTiM_0 person cHSjCxvPumA_0 motorcycle cHWE72lnzZo_0 person cHYcXW7HAkA_0 person cHaBQgTFdr4_2 knife cHjKy80ojXM_2 bear cHkm25QAG8A_0 truck cHnV0yZTha4_0 car cHpaD5PtHnM_0 cow cHv3ulnF1fo_0 person cHyjhzLIeO0_0 person cH2A35uULdc_0 person cH2g9vV4SyM_0 bird cH27awicc50_0 person cH8zYhvzdb8_0 person cICrfFzHoZs_0 person cIFXOWG5Dd0_1 person cIF9coXttVs_0 person cIIlWssV9Sk_0 person cIJSKwcTQ10_2 bicycle cIJSKwcTQ10_3 bicycle cIPlCULXXHQ_3 elephant cIPlCULXXHQ_2 elephant cISwax-t_78_0 person cIVGJQrNkT8_0 person cIV9T5ZQmdI_0 person cIh9baL5Hzw_1 person cIjMwiaApEc_0 person cIvqOdvwX6w_0 person cIwDGqmKrfY_0 person cJH4RK9aVR0_0 elephant cJJDfdbopiQ_0 person cJSjHpF7ILg_0 airplane cJUj9q6wgis_0 person cJfW0Gfkzrg_0 knife cJjaVdNaUko_0 bus cJnihDxg0wg_1 dog cJtGcHMJlMA_0 person cJ0hAba-pck_2 giraffe cJ0_u3Ta6kU_2 skateboard cJ0_u3Ta6kU_0 skateboard cJ2f7qDBm7M_0 horse cJ41GQMsJIA_0 dog cJ6BfbrgwDM_0 person cJ7Akre7-Sc_1 cow cJ7ZHI-8gU0_0 person cKO8G1ZXQgo_0 person cKdank8BDik_0 person cKgqIdOoBmE_0 person cK4yj3jgWek_0 person cK5MabT7iIA_2 train cK5MabT7iIA_0 train cK5MabT7iIA_1 train cK9R8KdVuIE_0 person cLKgng5yuC4_0 person cLKgng5yuC4_2 person cLKgng5yuC4_1 person cLPSEK3_jEE_2 horse cLPSEK3_jEE_3 horse cLPSTXefj2Y_0 person cLY_N1jEC8E_0 person cLg1pn5Oh1k_0 person cLlL2uHDyBw_0 bird cLnQAhX42Eo_1 horse cLnQAhX42Eo_0 horse cLn0Kz_p2U0_0 train cLrXQvFZ-y0_0 knife cLvgs19Vm18_1 person cL2jFa-Zd_M_0 person cL4k6bdNmbs_0 boat cL6G_y5LoDo_0 motorcycle cMGnmOyYWcM_1 person cMIyGPpW9Xw_0 person cMJhk7y1Nng_2 bird cMJhk7y1Nng_0 bird cMJhk7y1Nng_1 bird cMOULCqujvs_0 cat cMRhR707ZfA_11 bear cMRhR707ZfA_13 bear cMeXNjQUwe0_0 horse cMg1O__kPFA_0 horse cMwsAfZMG1c_0 person cMwt7xBZ9i4_1 person cM6-id-uhMg_0 person cM6-id-uhMg_1 person cNLuZxPpWho_9 elephant cNLuZxPpWho_14 elephant cNLuZxPpWho_1 elephant cNLuZxPpWho_4 elephant cNLuZxPpWho_8 elephant cNLuZxPpWho_11 elephant cNLuZxPpWho_13 elephant cNalYSGXOkM_0 person cNnMvF7oiUo_0 horse cNr9rjOJ0ps_0 person cNxEreBWMRc_0 person cNxEreBWMRc_1 person cOD8xhwGfME_0 person cOD8xhwGfME_1 person cOYK17trE9k_0 person cOYK17trE9k_1 person cOZOzY6XDLU_0 person cOalncX8fwg_0 airplane cOalncX8fwg_1 airplane cOalncX8fwg_2 airplane cOalncX8fwg_3 airplane cOalncX8fwg_4 airplane cOkVxYbnFRs_0 person cOkiG4LRtQU_1 truck cOp33oi4C8E_0 skateboard cOzNmIBhiMY_0 person cO1F_0l1vSU_0 person cO1MbnbgUbU_0 dog cO3WA2g_UeM_4 bear cO3WA2g_UeM_2 bear cO5xsG3ud_0_0 train cO7nCAZ-uLk_0 person cPBvSHKPNvk_0 person cPdRddyxsVA_0 cow cPdjr1zTQQ4_0 person cPeGSXSLepg_0 person cPkbg5bdpcE_1 person cPkbg5bdpcE_0 person cPn5c5t2g6w_3 skateboard cPqAK1E1Ajo_1 dog cPqAK1E1Ajo_0 dog cPsXS3_4zOk_0 bus cPu-riLrt1c_0 person cPu-riLrt1c_1 person cP-gl2IN_AI_1 person cP-gl2IN_AI_0 person cP_nenKIU4g_2 bear Df70QgKA_Hc_0 person Df70QgKA_Hc_1 person DgSwJVCLkYM_0 person DgcSsQKaX7Q_0 person DgoFmJFWpUw_0 bear DgtiaphLkMc_0 person DguiMPx8nn0_2 person DgvI1azs_0E_0 airplane DgwM5b-eKvc_0 person Dg2sU0bmBho_0 person Dg8r8QlJw80_0 person DhAkswxLuAs_0 person DhJZwbql4dc_1 person DhLD44-KIUU_0 person DhYbvvwSsEA_1 person DhYbvvwSsEA_0 person Dhd-0-xOF6I_0 cow Dhl-jIQaam0_3 person Dhl-jIQaam0_0 person Dhl-jIQaam0_1 person Dh6APdqkNZ0_0 person Dh_6tF8ndZs_0 person DiAj24Xsadk_0 person DiDELcBJWh4_0 person DiPjO5frbNc_0 person DiQ-VgXIDMo_0 person DiVX_-kQv0k_0 person DiVX_-kQv0k_1 person DiWi-oWT9EI_0 boat DiXsD6VHEr4_0 person DiZ4OCT30AM_0 person Dia6QIxORbM_4 airplane DihnxPkojnQ_0 giraffe DihnxPkojnQ_1 giraffe Di41WoS7T1M_1 bear DjAQs68BiwA_1 giraffe DjB4dpC4TVs_0 horse DjD15NlLBYI_1 truck DjD15NlLBYI_0 truck DjK1R_LBqgM_0 person DjMnoAbMiIU_0 person DjMnoAbMiIU_1 person DjQF34GUthk_0 person DjS-0VOep0Y_2 person DjXtIIwfITI_0 person Djb2blFeoNM_0 person DjdAxUWgSdk_0 knife Dju4Bl2fx88_0 bicycle DjyldIzPJbA_0 horse Djy5UE0Ofa8_0 person Djy5UE0Ofa8_1 person Dj7DVsCVqqY_0 cow Dj9npayKJqk_0 elephant DkAG7dFDk94_0 person DkC_iJTIrYc_1 person DkC_iJTIrYc_0 person DkF-LqA7wSk_0 bus DkNY4yun6ek_0 boat DkPYbKRQBE4_1 motorcycle DkTfU9q9U_I_0 cat DkTqTY04y30_0 person DkTqTY04y30_1 person DkbRBY4ZlFY_0 bicycle DkbRBY4ZlFY_5 bicycle DkbRBY4ZlFY_6 bicycle DkbikYoLycQ_0 bus Dkmab-wxSy4_0 person Dkmab-wxSy4_1 person DknRMqifZFE_0 skateboard DkpZP7RtrJM_1 bus Dkqy-okNDVM_0 person DkrkY6blx3U_1 person DkrkY6blx3U_0 person Dk0wXCp-USs_0 boat Dk1QPiNji5I_0 skateboard Dk4V0c6Yzbs_1 boat Dk47lOWl3NM_2 cat DlCMYyDhSVY_1 person DlCMYyDhSVY_0 person DlDFQ88ui2A_0 person DlDJpNWKuPM_0 knife DlFJTfO-mc0_0 cat DlG-VsdsPCk_0 motorcycle DlTE01-45gQ_0 airplane DlX2Yvp20gY_0 person DldXGda7zfE_0 person DldXGda7zfE_1 person Dlg5BFm20wI_0 person Dlg5BFm20wI_1 person Dl3fDWG23zU_0 person DmG9v9xVPbg_0 person DmIeMGzqZEc_0 cow DmJ9x-DFdqA_0 person DmJ9x-DFdqA_1 person DmLGGv6YNEo_1 bus DmL_6_a_54g_0 bird DmNmgatXwU8_1 knife DmSRZp63qTo_1 truck Dme3Rfsqbz8_0 person DmiucPhqXMg_1 bus DmiucPhqXMg_4 bus DmlMgF-BuRo_0 person Dmt8pgQG3M4_1 skateboard DnLVGRyXAR4_0 person DnN9tjwPn-0_0 person DnR4VFNo44s_1 airplane DndaJVRuOoo_0 person Dniy3zze90s_0 person Dniy3zze90s_1 person Dnj_fhGXHC8_1 bird DnkUzsPqjE8_1 person DnkUzsPqjE8_2 person DntJ297deXI_1 person DntJ297deXI_2 person DntJ297deXI_0 person Dnx6TlTvRfI_0 person Dn80jV69sbs_0 person DoEWhY2BkZo_0 person DoOq_FhWze0_0 person DoPKGr2HJwM_3 bird DoRoLk97UqY_0 truck DobAdZVysXc_0 cow DohloSZ6YdA_0 person Domgj6ptFOs_0 bus DpH2eSmcTk4_0 bus DpJA_qYLobk_11 bicycle DpJA_qYLobk_0 bicycle DpJA_qYLobk_2 bicycle DpJA_qYLobk_5 bicycle DpJA_qYLobk_6 bicycle DpJWhFnF2Fo_0 dog DpR63uhHTjo_1 horse DpWw1SaCdTQ_0 person DpbGsvglx7Q_0 elephant DpbGsvglx7Q_1 elephant DpimIW1T2Sw_0 person Dpp32dLn0hQ_0 person DpvuhymOiUM_0 person DpwjQ_KcYAc_0 person DpxoJ_GWJA4_0 giraffe DpxoJ_GWJA4_3 giraffe DpxoJ_GWJA4_4 giraffe DpxoJ_GWJA4_1 giraffe Dpz-s6E9VWg_0 person Dp2pGcutqDQ_0 person Dp2pGcutqDQ_1 person Dp4XaG6247k_0 person Dp5KRKUJBGE_0 cow Dp6qJvgV4fQ_0 person Dp71z8eyq7o_0 bus DqBNoutsr4M_0 person DqBNoutsr4M_1 person DqDElT9H4Tg_0 boat DqESUtRuhPw_0 dog DqVUeH6XI2Q_0 person DqegnRXQd5Q_0 airplane Dqi5KTmt04s_0 bus Dqy6NbRkVPE_2 skateboard DrAnw0S9Pmc_0 person DrCKp4YB7rI_0 person DrE7aW7O0eQ_0 person DrFxlXYC6-o_0 person DrGCtlmxxVc_0 person DrPpkd-UxFY_0 cat Drc0Grdb_LU_0 cat DrgjySu3e-c_0 motorcycle Dr9XXUA4UKc_0 person Dr9XXUA4UKc_1 person Dr--We7lD3I_0 person DsA5QOOIZJw_0 person DsP87b0IuoU_0 person DsZ6Cf42EdQ_0 person DsiAcCUi8iE_2 bear Dsm48Msjw6k_0 bird DsxyH6AKBd0_0 truck Ds0GIUe1AFo_2 person Ds0GIUe1AFo_0 person Ds0GIUe1AFo_1 person Ds3E7n1kRQk_0 train Ds44yYfSEr8_0 bird Ds8xwquSVkw_0 skateboard DtKSEQhjq2I_1 cat DtQGDwZ1PIU_0 truck DtQGDwZ1PIU_2 truck DtSpyLMbD9o_1 motorcycle DtU93_s53sI_0 train Dtc3hZBmn9Q_0 person DteEg93cINc_0 person Dtf2WRyd4OA_0 airplane DtgUpKmdw_g_0 person DtuRiD_E6HU_0 person DtyatJX8J1A_0 bicycle Dt1MDqN3TCs_1 elephant Dt1PLFoRvoM_7 airplane Dt1PLFoRvoM_0 airplane cQAr7IVeBrU_0 person cQC7jBc1pC0_0 person cQIviFGN-_M_0 train cQOFvBNN9to_0 airplane cQOFvBNN9to_1 airplane cQPP6SqX-uk_0 truck cQbqByuUnW8_1 car cQgUGmyvkJ8_0 train cQttS-GIM5c_0 person cQttS-GIM5c_1 person cQw1wXvFnLM_0 person cQ29m5z8Cnk_1 cow cQ4aR8OLr74_0 motorcycle cRGrqg7y9tE_0 boat cRVqyVvxjHI_0 train cRczdkzrJ-w_0 cat cRnDFinbH-s_0 bird cRrjU515FKg_0 person cRvAv1Nn-WQ_0 cat cR6qM7wjtDw_0 knife cSDafQMsYwc_0 cat cSJ2ISog6Pw_0 bird cSJ2ISog6Pw_1 bird cSLerMX3IBg_0 person cSNwXF8OcR8_0 cow cSO-70KCypM_0 skateboard cSVIvCYuDtU_0 cow cSdBaGsGWKk_4 bird cSdBaGsGWKk_9 bird cSdBaGsGWKk_1 bird cSdBaGsGWKk_3 bird cSdBaGsGWKk_6 bird cSdBaGsGWKk_7 bird cSdUwiTGXPc_2 motorcycle cSor-u6VHHw_1 dog cSqMDH0-sDs_2 person cS398dAyQ9k_0 cow cS-QgqiUgLQ_0 person cS-QgqiUgLQ_1 person cTGOQnmi7bo_0 person cTLa1dxk76g_0 person cTUTNgp9rZ4_0 person cTUTNgp9rZ4_1 person cTayBCWq6xo_0 person cTiETDBrGv4_0 skateboard cTiETDBrGv4_1 skateboard cTk8pacLUcc_0 bus cTmv-vp89sY_0 elephant cTmv-vp89sY_1 elephant cTsipIh7xF8_0 cow cTvxGA-EvvY_1 person cTzz_ZCUpxc_0 person cT4Y0HSeBgg_0 elephant cT5UlPnc5MQ_0 person cT5UlPnc5MQ_1 person cT7LjXG7ByI_0 airplane cT7LjXG7ByI_1 airplane cT7LjXG7ByI_2 airplane cT7kZP5B_2s_0 bus cT_US5II64I_0 person cUEWtKzcAsM_2 airplane cUEWtKzcAsM_1 airplane cUM5ajI3KJg_3 horse cUNExkBml18_0 person cUSRVmcbXxI_0 person cUS9QgCXcPo_0 person cUWmN_HuZiA_0 person cUYlfMGqB_8_0 dog cU7JEUo5qdM_1 person cU7sT9UHs7s_0 person cVCqOzgt2vI_2 train cVCqOzgt2vI_0 train cVM2h5qbyUw_0 elephant cVXIaONp5o8_2 person cVYqiMXSh9g_1 person cVbcrOx7768_0 person cVfH0tFh5Kc_0 person cVfWBtl-qK4_0 truck cVq5VnfZtNw_0 person cVr16pInr5k_0 person cVsZMfMaxSM_0 person cVtyGQKWFcI_0 motorcycle cV0a2ScBxpE_0 person cV0a2ScBxpE_1 person cV1mBGRlLe8_0 bird cV1szYodba0_0 motorcycle cV8BGLBROa8_0 person cWBCCAo3pUM_0 bird cWBTkrImlLQ_0 train cWBTkrImlLQ_1 train cWGCbw5I6cI_0 skateboard cWIDcoPB3Rg_0 person cWKf_KANUSM_0 person cWRO27zzxF4_0 person cWaVXNQ5cvg_0 person cWb-i8hj8uc_0 person cWcJrAQuNA4_0 bird cWtIT6V98zc_1 person cWxELKsh43s_0 person cW2hQE3lS9k_1 person cW4fmuV2JuU_0 skateboard cW7OrsSn-m8_0 person cXP1Lit5Pmk_0 person cXS9VytLIjM_0 cat cXT5_AFSI8Q_0 person cXUdqfIp-Hs_1 person cXUdqfIp-Hs_2 person cXWgDE6boPQ_0 person cXZt2UZe6QQ_0 motorcycle cXaAcHkHUzU_0 person cXsRP67GHA0_0 person cXsRP67GHA0_1 person cX0yQ5KIAKw_0 person cX3mnglolLE_2 elephant cX3mnglolLE_3 elephant cX6lyv1DI80_1 airplane cX-s4BNxb0c_0 person cYHq8xoYMO4_1 bus cYVLbgGxJMM_1 person cYnyDXx580I_0 person cYpas0B5zEo_0 cow cYvyTVEqiEU_0 giraffe cYwkpA75A8Y_0 person cY1cmlwRnaE_2 bicycle cY1cmlwRnaE_1 bicycle cY6HDOEiINs_0 skateboard cY_INarfLQ4_0 person cZA_Yoq3vy8_0 person cZB5MQY5kVA_0 skateboard cZDoXwn5lv8_1 person cZPvtKaqRxc_0 person cZU2LAWtwUM_0 knife cZZT6OJ6xGk_0 horse cZZT6OJ6xGk_1 horse cZe888DWA8M_0 person cZgt8s4mARc_1 person cZugy4cYVng_0 cat cZz6eOuSV9Y_0 person cZ155yARalk_0 person cZ155yARalk_1 person cZ7siEIFHlI_0 cow caAnHYU-Gwk_0 horse caGQ2b4L930_0 person caGzwv3HLKU_0 skateboard caLKu0yKW0Y_0 dog cacCjMLNpIg_2 bird carYHHE3y3A_3 knife cavT34ZvciI_0 elephant ca4_gKs6MN0_0 bear ca8aNafTzeY_0 person ca_weHSJH80_1 train cbRztq6KZn0_0 horse cbVll1hxlDA_1 person cbVll1hxlDA_0 person cbvbRxOMJ-A_0 truck cb6YFX4CVqc_2 airplane ccIWh5JBil8_2 bear ccIWh5JBil8_0 bear ccQ7JnYrTL8_0 bird ccQ7JnYrTL8_1 bird ccRdzj5Zi-U_0 person ccR-h9z3bRI_1 knife ccR-h9z3bRI_2 knife ccVJXErLdOo_0 dog ccWTUq_mvsU_0 elephant ccWTUq_mvsU_1 elephant ccaCWXJ0jKY_0 person ccaYdn2p4Uk_6 knife ccaYdn2p4Uk_10 knife ccfTQmE0zsA_0 person ccfTQmE0zsA_1 person ccwFXG9D98w_0 person cc0S9924O-s_0 skateboard cc76qcSHNMM_1 dog cc76qcSHNMM_0 dog cdBO6xYUmzE_0 person cdBO6xYUmzE_1 person cdKEh34fsYk_0 person cdNWg2zU6bY_0 person cdOQ7lTQJBw_1 cow cdOQ7lTQJBw_2 cow cdSG1fcxNAA_0 person cdS-7_Egk88_0 person cdW8PgwFm6o_0 motorcycle cdZqtqh5PwE_1 person cdZqtqh5PwE_0 person cdZ1ODMJYKM_0 bird cdbmvoa89QU_3 train cdbmvoa89QU_4 train cdbmvoa89QU_5 train cdf-C-P2bW0_0 elephant cdkSgKIMQEM_0 truck cdkSgKIMQEM_1 truck cdoGDD6m8Og_3 person cdpYTik8eL4_0 person cdruQqCvfrI_0 truck cdxkCeoDX6Y_1 person cd80Ii4FB1Q_0 bird ceH46gqMWak_0 person ceIoRNo5FBk_0 person ceIoRNo5FBk_1 person ceLI06w8-Yo_0 person ceVkcz1wysc_2 dog Dt5UnNOUlZA_0 motorcycle DuMGrFowOWE_0 airplane DuUmKpZym5U_4 boat DuV6ahfZ_yw_5 knife DupWsV-iiys_0 knife Dur1W4FemFs_0 person Du7sKt25RiA_1 knife Du8hVxuK10c_1 airplane Du8hVxuK10c_2 airplane Du8hVxuK10c_3 airplane Du8hVxuK10c_4 airplane Du9r_1zpPkA_0 person DvEWbWxGJvQ_0 bus DvEykMsNibg_2 bicycle DvIS9FV5pag_0 person DvIS9FV5pag_1 person DvKLYYQzmas_0 person DvNTMqUwwWo_0 person DvR9Ctfk8lg_0 person DvWCGbG9LT4_0 car DvWDBQ9eMNQ_0 elephant DvWDBQ9eMNQ_2 elephant DvuQOS7UVI0_2 elephant Dv1e0Y8A8yg_0 cow Dv4azGPr4YI_0 truck Dv7eGdF004Y_1 person Dv7eGdF004Y_0 person DwJntGNV4Gw_0 person DwWzbtiIs7k_0 skateboard DwhCZK1eUPw_0 person Dwi-kq9Gcsw_0 zebra Dwi-kq9Gcsw_1 zebra DwlOBOv0IC8_1 bicycle DwlOBOv0IC8_0 bicycle DwvclcpHQNY_0 horse DwzuhLu_Jew_0 bicycle Dw2QHLXWmos_0 truck Dw7BXQFtH60_0 person Dw8lXatl4wE_2 person Dw8lXatl4wE_0 person Dw8lXatl4wE_1 person DxAMNpw-4qg_0 person DxB962sZJ_c_0 airplane DxB962sZJ_c_1 airplane DxB962sZJ_c_2 airplane DxFjGsjegtk_0 person DxHhkA1fVdA_0 person DxPOOsSCJpc_0 cat DxU9ZTI7KzY_0 bird DxXEapsjhOg_0 cow DxYW3ZMCXUw_0 person DxegJbsalCo_0 person DxegJbsalCo_1 person Dxl8-fknJjM_0 bird Dxl8-fknJjM_1 bird DxmdjAoDhkE_4 knife DxpMePWSgjs_0 person DxsdKCCUvCY_0 person Dxw3Y-UB0jk_0 airplane Dx0fgXYBRV0_0 knife Dx4a9ZiekrQ_0 elephant Dx4a9ZiekrQ_1 elephant Dx5VMmCltKo_0 person Dx8eIjF--eo_0 person Dx8eIjF--eo_2 person Dx8eIjF--eo_1 person DyFNZgEaw24_1 bird DyZHVNsbZeE_0 person DyceiTbkpMw_0 bicycle Dyd1Aj3RO3I_0 cat DyfyfDI4jqk_0 person DytAOZD9DLU_1 person Dy1-ch56AMc_0 boat Dy5kD11Wnbk_0 person Dy5kD11Wnbk_1 person DzAi_cumPY4_0 person DzCPCgkI8XA_0 motorcycle DzCPCgkI8XA_1 motorcycle DzFhvnd07Ck_0 train DzKdERTAA8U_0 cat DzMXxF7XRaI_0 person DzW2oC31Gcs_1 person DzXDPH8p-6Y_0 motorcycle DziXgWdCrvY_3 horse DzkCtRPiI-Q_0 cat DzlPtZXxtpU_6 elephant DzlPtZXxtpU_4 elephant DzlfBATujA8_1 horse Dzp0BrJSMBU_0 person Dz0d79BMerc_0 motorcycle Dz34hVhjpzA_0 person Dz7kWPDxgbg_1 bicycle Dz73CrM7pH8_0 person Dz8_y0iOjLM_0 skateboard D0DtV2eD7cs_0 knife D0HGjOZ5XWU_1 elephant D0O-T4E2DVo_0 cat D0R59ANL6o4_0 person D0TQLmGtPm4_0 airplane D0TTR7qCVXQ_0 person D0WAC7ByU0M_0 person D0Yx5cLcrqk_0 skateboard D0mf15dFGhk_0 person D0pcdPd6hwY_0 dog D0qo2f2Cshk_0 person D0xc1K3BQnQ_1 bicycle D0zhUpZhZi4_1 airplane D04tMZ7n3YM_0 skateboard D09x5ezi5hU_0 elephant D0-sW80X3kI_1 elephant D1Ct81qiyT4_0 truck D1Ct81qiyT4_1 truck D1DYQay-d_E_0 cat D1IQfkEa2-8_0 truck D1KUzeiWmUE_1 cow D1XPuPzMvv4_1 bus D1cTj9Fy4yE_0 dog D1dWoFMnKhc_0 person D1f92BE9HmI_0 person D1ktXwG0_jM_0 person D1plKiNFzvI_0 cat D1tZzoBOWfA_0 person D1yVIEgFGrY_1 airplane D10WSuM8eqU_0 person D19A7AUqZJ0_0 person D2CXHzxp1TU_0 cow D2Iqqb3RP6c_0 person D2Iqqb3RP6c_4 person D2Iqqb3RP6c_2 person D2Iqqb3RP6c_3 person D2KcVzav3YU_2 airplane D2KoBI6R7W8_0 train D2Qw63hsi1E_3 bear D2RT-qUSw_U_0 dog D2RZP8Y6VT8_0 dog D2Ri5Wy9XPQ_0 person D2RkdlTKlsE_0 person D2VABHjSM6E_0 bus D2VABHjSM6E_2 bus D2co1ZGkwCs_0 skateboard D2rbERtPxNM_0 person D2t36StaDcc_0 elephant D2t36StaDcc_1 elephant D2wSgbAelUc_0 cat D2yQaYJDNvs_2 bicycle D2yQaYJDNvs_0 bicycle D24GJS9nKC0_0 person D3EIh6pBTdQ_0 train D3F3xWCoWD8_0 person D3IDGSQSrFY_3 giraffe D3IDGSQSrFY_4 elephant D3IDGSQSrFY_5 elephant D3IDGSQSrFY_7 elephant D3IDGSQSrFY_8 elephant D3OvvA5jYlM_2 bird D3OxudXglSM_1 cow D3XqhAXefSA_0 person D3Zg90Ib5GI_0 cat D3b-w5J-wR0_0 person D3tuGaFbdbE_0 person D36Pwfuad5E_0 horse D4CWBceBJEk_0 person D4OMvYw25w0_0 bus D4aL-0UevEY_0 person D4do8kCWydY_0 person D4do8kCWydY_1 person D4goZXgzVC8_0 person D4oLradsvXE_0 person D4qq5Olmh24_0 person D410FuTGoPI_0 bicycle D4_2g_M4CXM_1 person D5GNIcodIw0_0 bird D5KLVLNs7-0_0 train D5KWKhPhqWE_0 dog D5OtHFsiXiI_0 person D5UGpkiG-CQ_0 person D5hYrAC2iIg_0 person D5jUPc4nQO0_0 person D5kSwHOWPBU_1 bird D5kSwHOWPBU_0 bird D5n4B-O8y8g_0 person D5tLtHWe0Jk_0 person D5uTmoMYXDE_0 cow D5x402SaAk8_0 truck D537kaRoYEk_0 person D552mK5tfLU_0 dog D59Eb3u0iPs_2 person D59Eb3u0iPs_0 person D6EDJA1bO3s_0 zebra D6G1X8WFAA8_0 person D6LDq6Q1Aic_0 person D6NzaXWZGEA_1 person D6UsriFwkjQ_0 person D6XIhwBoaik_0 person D6XUUDKA1CA_0 person D6d20KAVyzk_0 person D6f2wdAt_Ug_0 person D6kIRV5rEPk_0 person D6qXaD6WnVQ_0 bicycle D6zUwxeZ1zU_0 person D7c2tRlXz5k_0 skateboard D7dAkMkQf4I_5 elephant D7kHPyS4Gw0_0 person D7r_HLTwhWY_0 person D71B5jrYOig_2 elephant D77yNiFrtmw_0 person D78FDAi2log_0 skateboard D7_S2hp6aKI_1 airplane D7_S2hp6aKI_0 airplane D7_tUVFGy2o_0 person D7_zjfakeYM_0 dog D7_zjfakeYM_3 dog D7_zjfakeYM_4 dog D8GQWYiVK1U_0 dog ceczRgI6HDM_0 boat cev1umQFsVA_2 person cev1umQFsVA_1 person ce8j1r_CDH8_0 dog cfD9yGF5XmY_0 car cfFAjaziwn4_0 person cfWqngaDvvg_0 person cfWqngaDvvg_1 person cfex3QJFkTY_0 dog cfex3QJFkTY_1 dog cfpiw6KGB70_0 dog cfyY4mfwN7A_0 airplane cf0a6xp7r9s_0 bus cf3VOLwZdKY_0 dog cf6daxmvx6M_1 person cf6kCO9JdOM_1 person cf6kCO9JdOM_0 person cgAiH_9c5DU_1 bird cgD7Gr2Y-c8_0 person cgQ_34JYUkU_0 car cgT26vQK-4A_0 person cgZo7nUeCNE_0 bus cgjjdvXBsFI_0 person cgj_bzL4vsQ_0 skateboard cgmkRlhxVQ8_0 person cgmkRlhxVQ8_2 person cgmkRlhxVQ8_1 person cgxIrs3ySiA_0 skateboard cgyRQ1a79c0_0 train cgyRQ1a79c0_1 train cgzHPxfb-R4_2 person cg4GIYiUNiI_0 person cg9Y2DTUiDQ_0 cow chc30sNO6KA_0 person chl-Wa4_hic_0 person chrXgx4NWck_0 person chrXgx4NWck_1 person chwYzLEqKp4_0 person chyVy1kdL5M_0 person ch_yUR9RHIM_0 dog ciEhviIYSFY_0 bicycle ciFKNPdVskg_0 airplane ciUZ2LoiaCs_0 person ciZNBF9RdaA_1 knife ciZNBF9RdaA_0 knife ciZNBF9RdaA_2 knife ciZNBF9RdaA_4 knife cifpYBLq6dM_0 person cit4hdvCIp0_0 motorcycle ci83tdO3GuM_0 horse cjAhjjWOj24_1 cat cjL-hMHdmN8_0 person cjdImYwFXEI_0 person cjlPNeNKoSo_0 car cjmps6UKu_Y_0 person cjtjQu1YoTc_0 person cjuRQJf1_qs_0 horse cjvMLM_Uzbw_0 person cjye6t7P2XY_0 person ckIaNsLDst8_0 person ckJHbJCefVc_0 bear ckY7Izfnggc_0 person ckfgZsmJEbs_1 elephant ckyL1lkCzU8_0 person ckzaUAcrtY4_0 person ck6hJJVJfvQ_1 person ck6hJJVJfvQ_2 person ck6hJJVJfvQ_0 person clCQhmV8nf8_0 person clL4lyl6J7I_0 person clO2SRgOzAk_0 person clQ98CON1pE_0 person clUGOwaYaPg_0 cat claqhrkmhPg_0 person clmsmTFOSLo_1 dog cl410aCQA8k_0 train cl6C5KiOEHQ_0 train cmAN1SqRkDM_0 person cmGz-63gi5Q_0 train cmHjbUBM4q8_0 elephant cmKnHqPGlTw_0 person cmV1BLuEvpU_0 cow cmeGuaSUg34_1 car cmqxX05lPiI_0 person cmtruoCpSec_0 person cmwRk4-z_BQ_0 person cmwzhxa6Kd8_0 boat cm7Xd_WXZAs_0 person cnAC9g_fYUY_0 train cnAC9g_fYUY_6 train cnAC9g_fYUY_1 train cnAC9g_fYUY_3 train cnAC9g_fYUY_7 train cnAC9g_fYUY_8 train cnAC9g_fYUY_9 train cnJKH5dTKyI_0 skateboard cne8MAKWcjo_2 person cne8MAKWcjo_1 person cnoIwn3cQ7Q_0 bird cnplEeb8Iuk_0 motorcycle cnp30cLXzq8_0 skateboard cnrSdMSCW6w_0 truck cnrSdMSCW6w_1 truck cnrSdMSCW6w_3 truck cnryAbqs0sM_0 horse cnryAbqs0sM_2 horse cnt7MyeNlHA_0 person cnvzLGyGalU_0 cow coBLne1vSV0_0 person coDrWV3qbQE_1 car coIhjdND3yY_0 person coVT-MPjIsc_1 cat cobC6BjJahk_0 person codE_-LtIRY_0 boat cofwfK4F5ac_0 person cohdkT2S_oA_0 skateboard coh6clK_Q6A_0 person comEv_WJ4Uc_0 person cousEghehEo_1 person cousEghehEo_0 person co17Vvf3bag_0 knife co17rRdOvwc_1 motorcycle co5rBTsE2i0_0 knife co7SR4bgOM4_0 knife co9DJtEU4eg_0 person cpEYJnyJ9XM_0 train cpLmgivniko_3 knife cpLmgivniko_2 knife cpO5pHTOelo_0 cow cpQ9HawKR-Q_0 airplane cpQ9HawKR-Q_1 airplane cpUTjBksgdA_0 person cpmMEngbDHE_3 person cpmMEngbDHE_0 person cpmMEngbDHE_1 person cpnZFfnjGYs_0 car cpre_wIt0hs_0 train cpre_wIt0hs_1 train cptcOzotQ0E_0 person cpuYK9y7zu8_1 boat cpxkLEREnwo_0 cow cp4ttild7EA_0 train cqEdqz5F7tg_0 cat cqOLpxxqIBw_1 person cqOLpxxqIBw_2 person cqOclzkqkVg_0 person cqO2VRSBGGg_0 bus cqRNPM3jgNs_0 cow cqS_ZvZF4Kk_0 person cqS_ZvZF4Kk_1 person cqez5FuSf44_0 person cqf4Vh7Vy9M_0 person cqkZZqtr3z8_0 person cqkZZqtr3z8_1 person cq3TwUTSBFA_0 horse cq84vJoKj0A_0 person crXlnYSuCuw_0 person crgSyPjbLBw_0 person crh-ncEjMd8_0 umbrella criMO4N0K5E_0 person crmw_2KCRlY_1 horse crmw_2KCRlY_0 horse cruWABLWvD0_0 person crzo7x07GTs_1 elephant cr02TlSWnkI_6 elephant cr5ddm3njdQ_1 bird csGJS_sNJx4_0 person csKSGFZyk04_0 horse csTChnltOdg_0 cow csiWQna-zcg_0 skateboard csl1NFlhS0I_0 person cswk8vZ6th8_0 person cs16RhEpmu4_1 person cs16RhEpmu4_2 person cs3PfcpDro8_0 cow cs_yLDexfXk_0 person ctAtCH6V1Dw_1 person ctAtCH6V1Dw_0 person ctCQsTBheHg_1 person ctJATSvGLTo_0 elephant ctJATSvGLTo_4 elephant ctJATSvGLTo_1 elephant ctJATSvGLTo_2 elephant ctK8CQu6Nvg_2 boat ctLUri8cnqU_0 bear ctNE8tj4Z18_0 truck ctOTsI_RZps_1 person ctOTsI_RZps_0 person ctPfu5shFA0_0 person ctPfu5shFA0_1 person ctRpeLVhC50_0 bicycle ctWUEkluOFo_0 truck ctWrHmTAoxw_4 dog ct24BXc-tWg_0 person ct8_KhvMuHo_0 motorcycle ct_TbfWVBQc_0 person ct_TbfWVBQc_1 person ct_TbfWVBQc_2 person ct_TbfWVBQc_3 person ct_vznHYblc_0 airplane cuHFcWEuUNo_0 skateboard cuQ5swAtzfk_0 person cuRuiFR7bNY_0 person cuU3htRHPgM_0 person cuWjLEIrs8k_5 bus D8btdwmdRNU_0 knife D8sBFUu104g_1 knife D8urBZQXl6o_0 person D8wVRKGVcLw_0 dog D804JptI7_4_0 motorcycle D8-J5NgmOQg_0 person D9J-SuKzTU4_0 bicycle D9RlyV_QhoQ_0 bear D9WsxKDzM80_1 horse D9WsxKDzM80_3 horse D9WsxKDzM80_5 horse D9XDsr6tkug_0 dog D9XDsr6tkug_1 dog D9XwHuLUv_E_0 car D9ixoNe1mQ8_0 person D94_XdBnfjQ_0 horse D97nupvam-4_0 person D97wkVsbfJk_0 person D97wkVsbfJk_1 person D98TSSeEEXc_0 person D9-PVz9eRtA_0 person D9-PVz9eRtA_1 person D-DNyYPMTvE_0 car D-EA0oKq0qI_0 cat D-UToJ9lT9w_0 person D-YgpB48Efg_0 person D-YtknfK7cQ_0 person D-a0sdpLGlI_0 umbrella D-gTVzHdFAE_0 bus D-gxEOUdm98_0 person D-jl7sUktcE_1 person D-pfJT6Nyfo_0 person D-pfJT6Nyfo_1 person D-u2wEUntuI_0 person D--GMbo7meg_0 person D_FozyNGP_g_0 person D_OvU_wvmsg_0 skateboard D_QDxlwnenM_0 bird D_TbGwH_U4I_0 person D_XHitiDPXI_0 person D_XwOiOHuZU_1 person D_XwOiOHuZU_0 person D_g7kf5F2CE_0 motorcycle D_kMPno6xDw_1 person D_r43ev6HHs_0 airplane D_uO4kxnCwM_0 train D_vXQa4wYoY_0 person D_vxl0ffX4U_6 bicycle D__WGD95lSY_0 cat EABbbYMrVPo_0 person EABxiYRLhro_1 knife EANBKNPscIU_1 dog EANBKNPscIU_0 dog EATgn3uQFCc_0 truck EAecqVilQ60_0 airplane EAh-eJriiEM_0 cat EAlTNLBenas_0 elephant EAmeB0UClfE_0 person EAoS9E3JQM0_0 knife EApLpwcDY04_0 cow EApLpwcDY04_1 cow EAvGskBbSsI_0 person EAvUn45orps_0 person EAvhz7EUrHs_1 person EAvhz7EUrHs_0 person EA2Zq7j78Zw_2 horse EA33eNV3TsM_0 bus EA4Pppxm9q8_2 airplane EA9IwJGPZFo_0 person EBBWzGDSfhQ_0 train EBCEcy1RAZU_0 bear EBDSyGzaeVM_0 person EBDSyGzaeVM_1 person EBGwUwk8_KI_0 motorcycle EBL5WSEhHwQ_0 cow EBTH0ShVz5s_1 horse EBYJEkaJizQ_0 truck EBmABlnU3Ns_0 person EBpvJEz7GAs_0 cow EBqxBh52uek_1 person EBrNePUYA80_0 cat EB0XdJ6nl5Q_1 bear EB5sThk9G-k_0 person EB7yZ9myXmo_2 horse EB7yZ9myXmo_1 horse EB-GUW188Kc_0 person ECDxDS-R1ZU_0 train ECEv0inW5Cs_1 dog ECKwTK9kBHk_0 cat ECLYb63wsdY_0 person ECT7_2qKJJw_0 person ECUpMJzxafs_0 person ECXdLGCGSRU_1 person ECdvMn526ho_0 skateboard EChWuqD2kxc_0 person ECofUr-jIIU_0 person ECpmJNOAfZU_0 person ECuo32_WqfU_0 person EC0Q7uMrJh0_0 cow EC1pupdSC2Y_0 person EC-RADUn0SA_0 skateboard EC-RADUn0SA_1 skateboard EDBYWaa97hs_0 person EDUY2xl1Jkw_0 cat EDYGYkJTUAw_0 person EDZ9Cu6WUAU_1 horse EDcpyGbwAVs_1 train EDcpyGbwAVs_2 train EDqFOrLwfpE_0 elephant EDqFOrLwfpE_1 elephant EDrX2_SzLF8_0 elephant EDtN3eOjUXg_1 motorcycle EDvdnYUw9b0_0 person EDxj4RwQr7k_0 truck EDxj4RwQr7k_1 truck EDxj4RwQr7k_2 truck EDxj4RwQr7k_3 truck EDxj4RwQr7k_5 truck ED-QWlNA_QI_1 person ED-QWlNA_QI_0 person EEFgTj2V6IY_0 person EEMkBuPFopc_0 person EENkey7gvFA_0 cat EENyo-VOtiA_0 person EEQVWkmTS6A_0 person EEQVWkmTS6A_1 person EEfWTq58rX0_0 motorcycle EEfWTq58rX0_1 motorcycle EEiUwF9ID5k_1 elephant EEiUwF9ID5k_0 elephant EEiUwF9ID5k_2 elephant EEiUwF9ID5k_5 elephant EEnpnVNwpgk_0 person EEnpnVNwpgk_1 person EEn1JwzcH7Y_0 person EEtv5FqPqG0_0 motorcycle EEx5nPfhJdI_0 bear EE5owiH92Io_0 bird EFHnwo5U2Bc_0 bird EFHnwo5U2Bc_1 bird EFTcDwwNw_M_0 person EFd6XVMNdEk_0 umbrella EFpWVH06Tf4_3 motorcycle EFpWVH06Tf4_1 motorcycle EFpWVH06Tf4_2 motorcycle EFryCLs5aWc_0 person EFwar_GkK6Q_0 cow EF0hPkNXnoA_0 skateboard EF1htFUPo80_1 bus EF23dhLqzKk_1 person EF23dhLqzKk_0 person EF4KGrH7s08_1 train EF4KGrH7s08_2 train EF4KGrH7s08_0 train EF8PHVKHaq8_0 person EF9VafNyS20_0 person EGCQIKdLkIU_1 train EGHYSrxI1Ek_1 person EGIhtnFv2f4_0 person EGI5Yk7IU8s_0 boat EGOtOZyUpk4_0 train EGOtOZyUpk4_1 train EGOtOZyUpk4_2 train EGZ7-ChFJQI_2 knife EGd19Lwe3vM_0 person EGgvoXoby8c_0 person EGgvoXoby8c_1 person EGiEfcahLzY_0 person EGsRldGZ4Bc_4 truck EGsRldGZ4Bc_5 truck EGsRldGZ4Bc_0 truck EGsRldGZ4Bc_1 truck EGsRldGZ4Bc_2 truck EGvzZJ10zwQ_0 train EG7cF7KMqs8_0 motorcycle EG-A5-_1i-o_0 car EHD613XdEQc_0 person EHMEQV26qfk_0 boat EHUgk_5vbps_0 horse EHafuO8IcpI_3 bird EHcP0uDfEyE_0 umbrella EHft6kH6siE_0 person EHft6kH6siE_1 skateboard EHtU4jYmFWw_0 elephant EHvP9Bwmq7M_2 person EHvP9Bwmq7M_0 person EHvP9Bwmq7M_1 person EHv9RwkIPXM_0 skateboard EIIC6lIbxO4_0 cat EIRbrmP8N9U_1 elephant EISmAs76j_g_0 train EIUHtk1IdtA_0 cow EIcGpS1nsXk_6 elephant EIcGpS1nsXk_4 elephant EIdaSifBFgk_0 person EIdaSifBFgk_1 person EIe7fhZxKpQ_0 person EIe7fhZxKpQ_1 person EInkqD_T5Os_0 train EIwa8hvMQ9g_2 bicycle EIwa8hvMQ9g_0 bicycle EI8OMIBxEOo_0 person EI-G2_K6zus_0 person EJE1AAlhjcQ_0 person EJE2EqHSaLA_0 airplane EJJefx2O7lo_0 person EJJ0aK1Mefo_1 bird EJMke8tdD9c_0 person EJMp6Gszq8M_0 person EJM15lQ1nds_0 bus EJM15lQ1nds_1 bus EJNv-W_Wh3s_0 airplane EJNv-W_Wh3s_1 airplane EJOO-gnqZOQ_0 person EJQZBc87T7Q_0 person EJTbpxYS19w_0 person EJdJUArfCgA_0 person EJdJUArfCgA_1 bicycle EJ2XL046J4A_0 person EJ3IJ7_jx0s_0 knife EJ3IJ7_jx0s_1 knife EKDm7Y7dQ-g_1 bird EKETFVqhfZI_0 person EKOgJfGpWw8_0 horse EKPKBwGLkg0_0 person EKR2BQWkMTI_1 person EKf-TzUsoG8_0 person EKsbh9eVG0w_1 airplane EKv1nvgLQLc_0 motorcycle EK2VY_FFN04_0 person EK56Obpu5ME_0 elephant EK56Obpu5ME_4 elephant EK5-ZuOavbM_0 train EK5-ZuOavbM_1 train EK5-ZuOavbM_2 train EK7wRGel2vk_0 person cuXky9bc80o_1 elephant cuXky9bc80o_3 elephant cuXky9bc80o_0 elephant cuYker921kg_0 person cuZPt_f2GfE_0 person cusvncJOcwQ_0 horse cu0Z8d-ioZA_0 airplane cu_YsyYcbL0_0 cat cvBKWYZidIs_0 person cvFAAQuXQR8_0 person cvJuXsDfcUY_0 train cvUktXqTBBA_0 car cvUktXqTBBA_1 car cveuhB6Z_D8_1 bicycle cveuhB6Z_D8_6 bicycle cvfI6ccn-J4_0 person cvgZ-1Uaigk_0 person cviAzkIEA00_0 skateboard cvlOlYpovm8_0 person cvyLalOdUEY_0 person cvyTQ9oFD8s_0 elephant cv9PMwKXLoA_0 person cwBgT8f3504_0 person cwB99KCLazI_3 person cwEuIwecOZA_0 car cwHQZi15U3s_1 bear cwHQZi15U3s_2 bear cwKndGwjXho_0 person cwKndGwjXho_1 person cwPtR7LsWag_1 person cwPtR7LsWag_0 person cwTq-wB6R3U_0 skateboard cwe2t4eoAs0_0 person cwf1OksNfQ0_1 horse cwjK5oxoq5Y_1 person cwjK5oxoq5Y_2 person cwmY9UYaukc_0 person cwnltT3Eelo_2 bicycle cwp0G17bk0I_0 truck cwp8Oe0F6y0_0 truck cwsLz_ppMx8_0 truck cwsx0Rs732s_0 person cwyDOlWxH00_0 bus cwzHLMKmpWM_0 horse cw054hU6MdM_0 person cw4vlk-0siU_1 boat cw45Y0beNG4_0 bus cw55i8mKHnE_0 person cw55i8mKHnE_2 person cw57dOs_v5A_0 bear cxAcLoLkk2g_0 person cxJp5-r_mjQ_0 person cxLrrWl89wo_0 person cxMcoeT1INo_0 person cxQENdEkIVQ_0 skateboard cxSj2n8O4Vk_0 person cxUXpTWO4iY_0 train cxbTIQtmtLs_0 person cxiI7jApblc_0 boat cxkH0GxPEqU_0 motorcycle cxm8wGi_pl4_0 person cxsitsK8l9w_0 horse cxsitsK8l9w_1 horse cx0cCIp1KeU_0 person cx0cCIp1KeU_1 person cx0tj_0g0-k_0 person cx2bUajKTrw_0 person cx4EC6uXkkY_3 boat cyBgPXda4lw_0 person cydwQgvjXlk_1 person cyd0m3k4Iv8_0 cat cynwjNSXfDs_0 elephant cyz45rMhH9E_0 person cy4xwLUwDN4_0 person cy4xwLUwDN4_1 horse cy4xwLUwDN4_3 horse cy5IjIQ0UNQ_0 motorcycle cy58Sr7mA_Q_0 knife cy6woAEQ0aU_0 knife cy8BRHRLKa4_0 train cy9CeQwHsws_0 bird cy9kq-lD2Q8_0 skateboard czD_BiifXv4_0 knife czLen_XZrRo_0 horse czUjYoRVVYw_1 horse czec9DaQ1sQ_0 person cze3sm-N48s_0 person czjU6Q4s1jc_1 person cznO_APZ6xQ_0 bear czpxbOFiY_Q_1 person czpxbOFiY_Q_0 person cztHS4laeBQ_1 bicycle czto2OaEIww_0 person cz0dXFpjC6o_6 bear cz0dXFpjC6o_4 bear cz0dXFpjC6o_5 bear cz5kAZB6n0k_3 bear cz6eGvs1xNE_1 motorcycle cz8sE1Vn4Gw_0 person cz83QPHVLnk_0 umbrella c0GrJULqad0_0 person c0GstZDjoNM_0 person c0IYOMYovRo_1 person c0J3zJ8n3SI_0 horse c0LibLcues0_0 bear c0MEfCeuV5U_0 bird c0MdSWVdmqY_0 bus c0PyfX2HFqE_0 person c0TJZWOz78g_1 dog c0XKBQNwSlg_0 truck c0aHKGTYgeo_0 person c0bZsiE4obs_0 horse c0jq_aReY5M_0 motorcycle c0kH2qIyG7E_6 horse c0kH2qIyG7E_4 horse c0lBfqi79wQ_0 cow c0lDR6ABjTE_2 person c0nRMc9KiiQ_0 dog c0nXpd7yJsk_0 person c0o_nv0BL6Y_1 bear c0pzN4lVApI_1 person c0qkbu5wLF8_0 elephant c0qkbu5wLF8_2 elephant c0wve_629pA_0 person c0yrclVs1YA_0 cat c02KdAN0Hwg_0 bird c04Vd9VQao8_0 person c04ixznYflE_1 giraffe c07Yqknz4KI_0 train c07k0EtqcVs_1 car c08cFHAOc7I_0 train c0_M6VhGXOo_0 person c0_M6VhGXOo_1 person c1JGF-ltiJ8_1 bicycle c1JGF-ltiJ8_2 bicycle c1PUETYl8Lk_0 airplane c1QAgByBiYE_0 person c1WZ6dEz6kw_0 airplane c1XMeGkSwJQ_0 person c1XfiRiOTb0_0 horse c1a_E7CZsVk_0 person c1djg96PnM0_1 person c1djg96PnM0_0 person c1hBqL_LWE0_3 bird c1j8TlZsEmQ_0 boat c1laLoj4fM8_0 person c10eOkpL080_0 person c10eOkpL080_1 person c2B7cQwr4Pk_0 person c2EIdJJnku0_0 motorcycle c2E5_n_bZKc_0 train c2Kh-3yj9Ak_0 person c2MTwEvPGGk_0 person c2MUYY-qPhA_1 bus c2MqPrUNXQ4_0 train c2UDI136z20_0 elephant c2UDI136z20_4 elephant c2UDI136z20_5 elephant c2UDI136z20_7 elephant c2YlmT-aFE4_0 cat c2a9uwUCFK8_0 cow c2dk3AjUcYs_0 person c2gJYqYcsZg_0 person c2luSxdPZ6A_0 person c2m_PmRSEmw_0 elephant c2qJhOvlIUU_0 airplane c2xTBZttrzA_0 person c22HGSTHjBA_2 knife c22HGSTHjBA_1 knife c22yvcXZcM0_0 bird c2_qHguvZbI_2 bear c2_qHguvZbI_0 bear c3E9z6F-Txk_0 train c3J2U0kR6Hg_0 person c3TisGCbmoU_1 person c3Ur6j05SgQ_1 bicycle c3YFgnDBuXw_0 person c3bCGnwqGxc_0 car c3eo0_ftrn4_0 cow c3pP__Uybq8_0 person c3wt1MUbgD4_0 person c3wt1MUbgD4_1 person c37EOoRHd7E_2 truck c4A01X82TfI_0 train c4FmSUmvYbo_0 person c4FmSUmvYbo_1 person c4Hh2XdTBGY_0 cow c4ICOFVvcTs_0 person c4e-qA4esVY_1 person c4iCXPdqm6c_0 elephant c4jbOCZyGsQ_0 person c4k8Yk1x3H8_1 person c4k8Yk1x3H8_0 person c4xRJS9_5Fk_0 train c4xRJS9_5Fk_1 train c40Mwg88VJI_0 person c43ihGsR1eA_1 person c5AKIs1XUhc_1 bicycle c5AKIs1XUhc_2 bicycle c5AKIs1XUhc_3 bicycle c5BYdZTaBgc_0 person c5CmxgLHcxA_0 bus c5Fw-Fi4daE_0 cow c5GANV8PlSM_0 person c5GIQcIJ9Tc_0 truck c5GOwfkZXFk_0 person c5GOwfkZXFk_1 person c5Q2ZeMDx3o_0 train c5TlkWtFymE_3 dog c5WT0W8SfGg_0 cow c5WT0W8SfGg_5 cow c5WT0W8SfGg_1 cow c5WT0W8SfGg_2 cow c5WT0W8SfGg_3 cow c5WT0W8SfGg_4 cow c5cooFy7-SM_1 elephant c5hEygqOXOU_0 person c5oiA5xy15M_0 person c56nid2YSes_6 bird c56nid2YSes_0 bird c56nid2YSes_1 bird c56nid2YSes_2 bird c56nid2YSes_5 bird c56nid2YSes_8 bird c56nid2YSes_9 bird c5_dNG2vWXg_0 car c6EIognIYWs_0 bird c6ZQRNXfcZA_1 person c6a4xySAJ0o_0 truck c6niMRNXDeo_0 person c6qKbpvd-iw_0 person c6rbqnU4LXs_2 motorcycle c6rbqnU4LXs_0 motorcycle c6s839WnVhE_0 truck c6yBOD3Wo5A_0 person c7B-3x-3V34_0 person c7ILC5wYs8A_0 person c7KoGv5Ha7k_0 person c7PMPnuPjp8_0 person c7RFexe2Ba4_1 bicycle c7RFexe2Ba4_3 bicycle c7RFexe2Ba4_0 bicycle c7RFexe2Ba4_2 bicycle c7SMRurbkY4_0 bus c7bKlPVR5pI_0 boat c7hVbIhp0Wc_0 person c7jWXqWoMz0_4 bicycle c7s8weR8lEY_0 person c7v4ZFCK-A4_0 person c70kaPblMLU_0 cow c74hYNtpwdA_0 dog c75cllxWxZE_0 person c7_op6G05l0_0 airplane c8B4ZVLv364_0 person c8Cl-5olqWk_0 motorcycle c8Gaja-xUeQ_1 person c8I3JAxoLTs_0 bicycle c8I3JAxoLTs_1 bicycle c8I3JAxoLTs_3 bicycle c8LHqWmKrJU_1 airplane c8LHqWmKrJU_2 airplane c8Mo16hH7qs_0 person c8UrmdREAO8_0 person c8Y7MJRWFqE_0 cat c8Y8y9BsPHw_0 cow c8b9qqF9Xvw_0 person c8b9qqF9Xvw_1 person c8ezNTNUXqc_0 cat c8wbvQnndJc_1 bicycle c8wdGQw1jB4_1 bus c8wdGQw1jB4_2 bus c8y3bmW0X9s_1 cow c8zphqgYcJM_0 person c80SYyKXCCw_0 person c8_fHVnrzZ8_2 elephant c9EDbgCRGP0_0 person c9GKsfyRkmE_0 person c9IdrMV-Y_Y_0 person c9Q9LPaqyug_0 umbrella c9SbfXgAoO8_1 airplane c9Somjq2gLs_0 umbrella c9WDXLFtYLU_0 bus c9XaEHVxu4M_0 person c9Y9a6KVWRE_0 bird c9Y9a6KVWRE_1 bird c9ZWCwVv6Q0_0 person c9dPiEkCwR4_0 motorcycle c9gCDztKasg_0 elephant c9pYz2lTh3I_1 person c90ldeMSfL0_0 cat c94gzpjmj24_0 person c9_87BKOW1I_0 cow c-CCw_cyicE_0 cow c-G0LV4kyY0_0 car c-T9ITcEW9c_0 person c-T9ITcEW9c_1 person c-ZnwBvVFGE_0 person c-gH6T1q-sk_0 person c-pKAy_3arM_0 person c-uOjPSq-10_0 cow c-vwn6zqogs_0 person c-vwn6zqogs_1 person c-4uPwFKBdY_0 person c-_iMD-ihnE_0 motorcycle c-_94CuEo_M_1 person c_SQI7NirwY_0 person c_THUYYi_-k_0 airplane c_YojhaB5pI_0 motorcycle c_jNM33kJuA_0 person c_rUQgBtHY4_0 person c_rUQgBtHY4_2 person c_rUQgBtHY4_1 person c_wkIYzEEDk_0 dog c_6OcDyZ93k_0 bus c_9GO2BbPz4_0 horse dAQu2GQSyrY_0 cat dAS6SqC7TCw_1 elephant dAVIZQJ5Af4_0 person dAqurx13i7I_0 knife dAynVVxxb_o_0 person dA7mx3mrJeA_0 train dA_ZtitJeMA_0 person dBDSqZ8rirA_0 person dBGKqrEvsIE_0 boat dBGKqrEvsIE_4 boat dBKexOUQSQA_0 cow dBKexOUQSQA_1 cow dBKexOUQSQA_2 cow dBKexOUQSQA_4 cow dBKexOUQSQA_5 cow dBKexOUQSQA_6 cow dBOrrvJDv54_1 skateboard dBPu5iVlw1Y_2 horse dBSryinfjiI_0 person dBS9maEElcw_0 person dBUpfcdFDUQ_0 bicycle dBWeUQd06l4_0 person dBWeUQd06l4_1 person dBiGneGqmh0_0 cow dBk2FwZgrtk_0 cow dBq77lvujCk_0 bird dBuvGegR_vA_0 person dByVvpTlwL4_1 knife dB29dsCcN9s_0 train dB43vSgLY2M_0 person dCG24UL_NeM_0 person dCSF80Y6lso_0 person dCSF80Y6lso_1 person dCZ9suBocXk_0 person dCgz-7OgwMQ_1 person dCl8hSleXYQ_0 cow dCoi3rXWgbM_0 person dCqdvmS1jts_0 person dCqdvmS1jts_1 person dC9rTC3kzsI_0 cow dDADJZV4i74_0 horse dDA5p5TJ03g_0 person dDB84W_zVOI_0 skateboard dDB84W_zVOI_1 skateboard dDE3p8Gs878_0 elephant dDGiQLFJtPs_0 bicycle dDIbBZtEJ2w_0 knife dDLgQQ2XRc8_5 horse dDLgQQ2XRc8_3 horse dDLgQQ2XRc8_6 horse dDO-RlSt3Gw_0 person dDQ58wciink_0 cow dDZYTPEd9KE_1 airplane dDacKPH4sOw_0 car dDacKPH4sOw_1 car dDcBtNpmCeU_0 person dDgcHWpKMeo_0 person dDkaPLEvAwM_0 horse dDkaPLEvAwM_1 horse dDkaPLEvAwM_2 horse dDqe9sBGR24_0 bird dDx0MqaKT2w_0 person dDx0MqaKT2w_2 motorcycle dD-AlVwxf-g_1 cow dD_Ew85jXzk_1 train dD_PbxvCBcA_1 person dECTTSpEUKg_0 person dEW9ZwvMsDE_0 cat dEc5fHlEXCo_0 truck dEuzpQL0tNo_7 elephant dEuzpQL0tNo_1 elephant dEuzpQL0tNo_2 elephant dE7OwbOHsu8_0 person dE7WsfeVkI8_0 person dE7X93gdVPQ_0 cat dFCUyBTrvNM_0 horse dFCu7E6aYM4_0 person dFCu7E6aYM4_1 person dFEo5YKHAcA_2 skateboard dFEo5YKHAcA_0 skateboard dFMPz16FOzE_0 motorcycle dFZSSPvMBqE_0 zebra dFZSSPvMBqE_1 zebra dFa7TcQRCUU_1 bird dFbZxetmjCQ_0 skateboard dFkNDweVNFU_0 cat dFpJq9s5fec_1 bicycle dFpJq9s5fec_2 bicycle dFsDjjWW00Q_0 knife dFth5-8MEhM_0 person dF7OkxFt3I8_0 person dF_aGgW1jcM_0 person dGE7t6KgXHc_0 person dGFrWX61Zk0_0 person dGS01inQU1U_2 person dGS01inQU1U_0 person dGS01inQU1U_1 person dGZBUkIXMpo_0 person dGZ_pzDrl70_0 person dGdh_BHleU4_0 boat dGh51ZQ9QAg_0 bird dGk8D_De-2E_0 person dGk8D_De-2E_1 person dGpbPaorWys_1 bear dGq1bpRxbiA_0 person dGyR5TWO-p4_1 person dG0CtnphYzg_0 person dG5mjfvTY7c_0 boat dG7DSOtetMY_0 knife dG9J5UpxeyY_0 person dG9J5UpxeyY_1 person dHCgtjlT_Lg_4 horse dHCpH8dTwfw_0 horse dHF9NIqrx6Q_0 car dHGIXivupi4_0 person dHGIXivupi4_1 person dHGIXivupi4_2 person dHJkOetpjQw_0 bus dHO6vTrB66w_0 person dHO6vTrB66w_1 person dHVDjpivOKw_1 person dHVDjpivOKw_0 person dHVgQCO07SU_1 person dHVgQCO07SU_2 person dHfs5GT-YpY_0 cow dHg1Xorklm0_0 person dHimuOjriUc_0 cow dHnk6ulSNSo_0 person dHnsZs2Riqk_0 person dHnsZs2Riqk_1 person dHsD3F8dTpc_0 bird dHvlIrb2Q-k_0 person dHwR5d4xGEk_0 knife dHwR5d4xGEk_1 knife dHwR5d4xGEk_2 knife dHwR5d4xGEk_3 knife dHwR5d4xGEk_4 knife dHxmY1bGbNc_4 bird dH89qyunr6s_0 person dH94i4xFlZU_1 elephant dH94i4xFlZU_6 elephant dH94i4xFlZU_0 elephant dH94i4xFlZU_5 elephant dH94i4xFlZU_7 elephant dICl73jYZ3M_0 person dICrafh45_I_3 airplane dIDxqrhmBE4_0 truck dIDxqrhmBE4_2 truck dIEZ2kfTzzY_0 boat dIJk0w4SnH8_0 bird dIVtaleUNWI_0 person dIVtaleUNWI_1 person dIX81Ov0fUY_0 person dIZM-9d8bSQ_0 person dIZM-9d8bSQ_1 person dIm0Sv_iE2E_0 motorcycle dIqYGVVgYsU_0 person dIzMmAGaF6U_1 skateboard dI93uXfSaRM_0 bird dJB-DXpgq2U_1 bird dJKAhixNM9Y_1 truck dJYNs94fv_0_0 person dJgqX3uy6z4_0 person dJg4R9cpbjI_0 person dJisrPH71tE_0 person dJi_dOrUZnw_0 person dJjrFTy9H3c_0 person dJkzzYh6BkY_1 cat dJnRg-1zO1g_3 knife dJqGj0FeC9I_0 cat dJvoaqZjIDw_0 person dJ2B9A0mYl0_1 dog dJ2kWscI-tc_1 dog dJ4PR9zme-s_0 person dJ6S9bSEYDc_0 cow dJ8J7WOLZtk_0 skateboard ELDxjZXMtCg_0 person ELLTxQ47f90_1 person ELLTxQ47f90_0 person ELNgTt9Jswc_0 train ELOZutiZKMM_0 person ELOZutiZKMM_1 person ELPpy9ABb3s_1 elephant ELTeW4X2mGY_1 cow ELbg8i93W8I_0 person ELbjX2Ya0_o_0 dog ELmktutrkDk_0 person ELqA6fb0un8_0 person EL8H94Lycf8_0 person EMAVfcO6JFE_0 person EMKcTJp7ehY_0 person EMOpCv3vVfE_1 skateboard EMP7p3FNxZU_0 person EMU8vGL7ZFQ_0 person EMb28oLn66k_0 airplane EMgh3pwtnXg_0 person EMiRla730lM_1 person EMiRla730lM_0 person EMmg9OKgyBE_1 boat EMmmZ6ADzfI_0 skateboard EMngQ4YMTv0_0 motorcycle EMorunu9Ik8_0 truck EMqd3lVNUxg_7 bus EMuGAIADn3s_0 person EMwcDTRPPMw_0 airplane EMyQWQ_Yobc_0 dog EM0yGxKJWqY_0 elephant EM1R3HXt7DY_0 person EM1z9o601v4_0 knife EM3tBaIyR0o_0 motorcycle EM5e1snhsCs_0 person EM-k8ZAva6k_0 person EM-zjCQyGAc_0 dog ENAr6j6fcWU_0 bird ENCHiWUV4dk_0 person ENI-JuSPNQA_0 motorcycle ENSEWig-4ZM_0 knife ENXXFcrrxGM_0 car ENc0uxXKsaI_0 person ENkqstdLKl4_0 person ENk4JRIbEaE_1 person ENnPjtPjU6c_0 person ENtoAci6OwQ_0 cow ENvdCzm4whM_0 truck ENvdCzm4whM_1 truck ENvdCzm4whM_2 truck EN0Klsi-AKY_0 bicycle EN4IIJjhBeI_0 zebra EN-QCSvtEd0_3 elephant EN-4SsZnn-k_0 person EOEXVXG1TDk_0 person EOVNlasJhIo_1 person EOdHjLYopi0_1 bird EOedzXaVI4U_2 bird EOe3CfOT53g_0 person EOmVKXeoKBc_1 airplane EOq-3ZRn0SQ_0 skateboard EOt6j5ecODw_0 train EO7NccQDQyM_0 cat EO8Dpvy4oXs_0 zebra EO8mQrkIZuY_0 person EO_DwtyWh0s_3 person EO_DwtyWh0s_0 person EO_DwtyWh0s_1 person EO_DwtyWh0s_2 person EPOXqdKNjKg_2 giraffe EPU630RSI5c_2 person EPU630RSI5c_0 person EPWmdYKJaXk_0 bird EPycDWf2vY4_0 skateboard EP_ezteElzk_0 person EQBFPIdI8gY_0 person EQC8eEghvs8_0 person EQNSjjkyRBg_0 person EQNSjjkyRBg_1 person EQTee9qqTZs_0 person EQVCizuJQFY_0 umbrella EQdEm5HuPG4_5 train EQx1XHc0mRM_1 motorcycle EQzXCoQRbas_1 train EQ5rBLoiT78_0 bus EQ9-lbsee1s_0 person ERCvzMzkDhg_0 skateboard ERGwo6vIXdQ_0 person ERJR-zQYyH4_0 person ERR-qjVJ3lY_0 person ERVp_cX1juc_0 person ERev6rrd5XA_3 motorcycle ERyyYMb2fFk_0 cow ERzh41uuxUE_3 bicycle ER0IdSeymeI_0 person ER0IdSeymeI_1 person ER03PLUBt4c_0 train ER03PLUBt4c_1 train ER03PLUBt4c_2 train ER03PLUBt4c_3 train ER53sUYwz1I_0 zebra ER6vMbAyQ6E_1 skateboard ER6vMbAyQ6E_0 skateboard ESDQMC_70Pk_0 bear ESInVf3ioiA_1 dog ESMdbpGXk4I_0 person EST4CUX19Eg_0 person ESokfN84OYk_0 elephant ESokfN84OYk_3 elephant ESokfN84OYk_4 elephant ESpwZsbwQGA_1 elephant ESpylyha7g0_0 horse ESt5TEXuGIM_0 person ESt5TEXuGIM_1 person ESwsyjITYGM_0 skateboard ETBia7K3ZHw_0 motorcycle ETBia7K3ZHw_2 motorcycle ETQTZgnfRK4_1 person ETQi93bP3YQ_8 elephant ETQi93bP3YQ_2 elephant ETTgj1pxvME_2 person ETWI4nXFANg_0 person ETcmjY7Jigo_1 motorcycle ETgN7EcVVQI_1 person ETmYIq5CF2k_0 motorcycle ET4xC8Wl_CA_0 person ET4yAsJTvlk_0 cow EUH3oSBX950_0 person EUH3oSBX950_1 person EULIYiiV-O0_0 person EULIYiiV-O0_1 person EULchAlLDfM_0 train EURUU5P5flo_0 person EUcHraiUCjA_0 bicycle EUcWvzarnb0_0 umbrella EUdNEi4myuA_0 person EUtfoblvHn0_0 person EUuCDfb8lf4_2 person EUuCDfb8lf4_1 person EU93Mw9WGkc_0 skateboard EVBHY1qGVos_0 person EVBHY1qGVos_3 horse EVElggpPSCM_0 elephant EVE2SBJ-2S8_0 person EVH8Ql7_pYE_0 person EVTW6Ka7-NU_0 person EViJ_JQcv5c_0 train EVmGPGaP6bY_0 person EVnnSfmb4go_0 giraffe EVn52FBjG9E_0 person EVn52FBjG9E_1 person EVxEEc26TWg_1 giraffe EWLiwu56oQc_1 person EWNd02yWiYw_0 person EWP0Hhxsf58_0 person EWQo_1YXfYM_1 person EWQo_1YXfYM_0 person EWTvjjpAUm0_0 airplane EWXyQ1tS3jI_0 elephant EWdNgXvr54s_0 dog EWfPRTjQO9k_0 dog EWgsivaLhl0_6 elephant EWgsivaLhl0_1 elephant EWgsivaLhl0_2 elephant EWi25l2D0cw_0 cat EWkndzLXvLc_0 bicycle EWuOSRFWTzg_1 elephant EW0Mgele6Gc_0 person EW0Mgele6Gc_1 person EW6FHYagN0Y_0 person EW98OEvTxM8_0 person EW-Zuo7ArI4_0 dog EXDDO7gLoL4_1 person EXDDO7gLoL4_2 person EXDDO7gLoL4_3 person EXDDO7gLoL4_4 person EXGwKMtyR1M_0 person EXHZgqkcXG8_1 cow EXJITC62tU4_0 umbrella EXSMz4HnWfg_0 dog EXaiYiUQrMI_1 dog EXfiGeKWKTk_7 airplane EXfiGeKWKTk_1 airplane EXiGyq1TD80_0 person EXiGyq1TD80_1 person EXkbZbo1n5U_2 elephant EXkbZbo1n5U_0 elephant EX817S50E5U_0 person EX-dqihLUwY_0 motorcycle EX-dqihLUwY_2 motorcycle EYCaJR9md8k_0 airplane EYEWPdaJuL0_4 bird EYEWPdaJuL0_5 bird EYEwLM8YTwc_0 person EYFMOBeF9UE_0 knife EYHtNGztiRQ_1 car EYKrEDelAdU_1 bear EYM1oXAmBq0_1 bus EYRf00qGMVU_0 train EYV6D6G6t2c_1 person EYZsYCSedGw_0 person EYd9lSK7Bbk_0 person EYhtY59whvs_0 person EYmWVBDEutA_0 horse EYnEMtlMaPY_0 person EYoj8D64YLA_0 skateboard EYuLodJTgYs_0 train EY2pZ9A48ng_0 truck EY2pZ9A48ng_1 truck EY2pZ9A48ng_3 truck EY25PJWD2j4_0 person EY36YeIgOYI_0 person EY36YeIgOYI_1 person EZWcsRlXIA8_0 person EZbOH9yEe-A_0 dog EZh1lf4yfCg_0 person EZ5Wa2duCoM_0 person EZ5Wa2duCoM_1 person EZ7d9ab31ys_0 giraffe EZ9-_7o9Vds_0 bird EZ9-_7o9Vds_1 bird EZ_xC5EBwvk_0 bus EaBdeSUjDYs_0 dog EaFSd7_S8kc_0 horse EaQ1P4QyRsY_0 person dKEVBoMMD2w_0 boat dKJz_EakSc4_0 person dKMb2S2SSfI_0 skateboard dKTgMjbnNPQ_0 skateboard dKiwficH2d4_0 person dKi4xI4vB-k_0 umbrella dKlCFQxk5Dc_3 person dKlCFQxk5Dc_5 person dKlCFQxk5Dc_0 person dKlCFQxk5Dc_1 person dKlCFQxk5Dc_2 person dKq4S1IVjlA_0 person dLFWcgSewxs_0 truck dLH8fBNk89Y_0 cat dLIld9ux7p4_0 airplane dLT61O_htwI_0 cat dLUCKkji5wo_0 person dLUCKkji5wo_1 person dLV2VJkpyMI_0 airplane dLbhzrFtNC0_0 person dLhVV7DMXkw_0 person dLoxdmLuphk_0 dog dLq5OW1xY54_0 elephant dLq5OW1xY54_3 elephant dLq5OW1xY54_2 elephant dLtQB9P_7BU_2 bear dLty27VgJcc_0 train dLvr7BjgsHg_0 person dLwXzYr8beg_0 car dL3dSZMnBko_0 person dL3vGWsRVCg_0 knife dMDGwTdSHIo_0 motorcycle dMJQi7oYiqQ_1 person dMS5hB4uWdk_0 bird dMWgiVqknaE_2 person dMWgiVqknaE_0 person dMZONdbNFbk_4 bicycle dMZONdbNFbk_2 bicycle dMdUZi9lxrU_0 cat dMiwR-DS6UE_0 car dMsIDwHkWNE_0 person dMulBz-N8oA_0 horse dM7lOj89YZE_0 person dM7-xh2kSmc_0 person dM7-xh2kSmc_1 person dM9u0c0qSV0_0 cow dNCm5MtFcp0_0 person dNEAY77it7o_0 person dNShS9OdIoA_1 person dNShS9OdIoA_0 person dNSlL572gMU_0 truck dNSlL572gMU_1 truck dNVvIPWEH1Q_0 person dNVvIPWEH1Q_1 person dNdTs9Qa1A0_0 truck dNeF_3qppZQ_0 skateboard dNj_77jiPcs_1 cow dNknNwahiv4_0 giraffe dNoz32bgN0U_0 car dNpQfDg_dIg_0 person dNqdMh44imM_0 train dNs2JO9SgGo_1 airplane dNs2JO9SgGo_2 airplane dNyMDstraS0_0 person dN1cn1CPEa8_0 person dODPVlzMR1A_0 person dOHuuTREVQk_0 person dOHuuTREVQk_1 person dOHuuTREVQk_2 person dOHuuTREVQk_3 person dOMW6BLHI2s_0 elephant dOMW6BLHI2s_1 elephant dOOQ32tmk14_0 elephant dORLSKDLr1w_0 cat dOUVBpTWHzc_0 person dOVzO5pkY2o_0 horse dOWhuaTBmr8_0 truck dOdX5nkOBoQ_1 person dOdYYCqd6i0_0 person dOdYYCqd6i0_1 person dOd-8kfbjz4_0 train dOd-8kfbjz4_1 train dOfNevz8wlc_0 bus dO2CbXVpSl0_0 elephant dPA7g60qlnk_1 boat dPJk57_DSuI_0 truck dPJ7_mdmjJo_4 truck dPJ7_mdmjJo_1 truck dPTnDrK0jl0_0 knife dPZPjPwJCnA_0 person dPiOaLH0K4Y_0 bear dPiOaLH0K4Y_2 bear dPma_hb-MR8_0 skateboard dPnxUa8yPbw_0 train dPpwBkl-F9k_3 bicycle dPpwBkl-F9k_0 bicycle dPp0no_eYOQ_0 dog dPqheqisvs8_0 person dPvgWsIPDr0_0 horse dP0jXsi0KUw_0 skateboard dP_-3SJLP1Y_0 person dQB4GI0Bgus_0 truck dQCFCRTz2rc_1 giraffe dQCFCRTz2rc_4 giraffe dQCFCRTz2rc_5 giraffe dQCFCRTz2rc_0 giraffe dQIQv4YkBaM_0 truck dQI-ReUS1hk_0 person dQM_-V4jSpM_0 cat dQNG1syFdKQ_0 person dQPdAoRj8vw_0 dog dQWw3losmfA_1 bicycle dQY2wbSJyOQ_0 person dQh9dmaqW3s_0 person dQh9dmaqW3s_1 person dQlybGW3tbw_1 cat dQnNTlCD_AQ_0 elephant dQnNTlCD_AQ_1 elephant dQoX3OkaI4M_0 person dQzWZhDVLYk_1 person dQ4hJadqL_w_0 person dQ62PlC9Frc_0 zebra dRBb5v_Fv3g_0 elephant dRDdBvl4olg_0 person dRHTO6H764g_0 person dRHYGXImEBk_2 person dRHYGXImEBk_0 person dRInM_HaQZs_0 bus dRVEs1099F8_0 horse dRcLZtR6KFs_0 person dRcrvTR9xIY_0 person dRiBVua-2Ck_0 person dRjzvcGshbA_1 person dRjzvcGshbA_0 person dRs8FcKuu6w_0 boat dRt8H1uQ5Og_0 umbrella dRt8H1uQ5Og_1 umbrella dR7jBT3cxr8_0 person dR8kCc9XNJs_0 boat dR-8FlykNZ0_0 person dSAODa472ys_0 bird dSAYK4yUlDs_4 person dSAYK4yUlDs_0 person dSAYK4yUlDs_1 person dSAYK4yUlDs_2 person dSAYK4yUlDs_3 person dSEv_R8nQik_0 zebra dSFMrnh2szI_0 cat dSLakvIEH9o_0 bear dSLmBYdUku8_0 person dSQTVC-RyAU_0 person dSWhe4RgQ_w_0 cat dSZBg-Vcr7E_0 motorcycle dSojBtCOkqQ_0 person dSx4IloBWZs_0 person dSzAX5l_fs0_0 person dSzAX5l_fs0_1 person dS0mBDDgP_A_0 person dS0mBDDgP_A_1 person dS8x0l5I7f0_0 boat dTDxzi0o_Qg_1 airplane dTMe2Vse97w_0 cat dTVBSXs5Me8_0 person dTVKs9m3eZU_0 cat dTm_DRCtjCo_0 elephant dTm_DRCtjCo_1 elephant dTrt1C_90H0_0 knife dTurjz-gJek_0 person dT6A3DwqZb0_0 boat dT8wudfW9gg_1 horse dT-INB6puFM_0 skateboard dT-INB6puFM_1 skateboard dUAtLBDfmBo_0 airplane dUAtLBDfmBo_2 airplane dUC_SF_mN_E_3 horse dUC_SF_mN_E_1 horse dUInMUIPtTs_0 person dUJH8d3CMU8_0 bear dUMLWt99A7o_0 person dUP4OTLrOA0_0 person dUW_G_--wI8_0 train dUXFUWivXPA_0 horse dUXFUWivXPA_1 horse dUbP54CBYd0_0 airplane dUm9A-1AoMU_0 person dUqrowFcbD0_0 person dUx_UfS9cQI_1 dog dUx_UfS9cQI_0 dog dU-bQRDInro_2 bird dU-bQRDInro_4 bird dVAMoKYgrwE_0 person dVKQhCF8o8w_0 person dVTHVxh6Tro_1 knife dVWAD4gOu-8_1 person dVd7OzbhOq0_0 person dViVbA7N_AE_0 airplane dVqPo7-p71Y_0 person dVtqTTZTFDQ_0 person dWCqnck4Um0_0 person dWFVX1psRZI_0 bird dWGkW13rQBY_3 horse dWGkW13rQBY_5 horse dWGkW13rQBY_8 horse dWVJFIzIKEc_2 bicycle dWVJFIzIKEc_0 bicycle dWVJFIzIKEc_1 bicycle dWXSWEaCId8_1 person dWdOl13DwwY_0 airplane dWdl9RdXrHo_0 person dWdl9RdXrHo_2 person dWd0sszZOXc_0 person dWesodD0ff4_0 airplane dWgfwKBrSiE_0 person dWgpYitSv0c_0 person dWkrnxWB1CU_0 person dWlDN9Hozgg_0 dog dWtqRwEurDU_0 person dW1oE_LHALo_0 elephant dW4DX7lQoGg_0 elephant dW5aU0U7K28_0 person dW53l1sR_zM_0 person dXEH9QiCyHk_0 train dXEH9QiCyHk_1 train dXKi3ZHjgWM_1 umbrella dXLyWGJxHnI_0 person dXOsaszlVY0_0 horse dXSuppGXFeI_0 elephant dXSuppGXFeI_1 elephant dXdFEix8vu4_0 train dXjUZeuzgaw_0 train dXkmG8AR82Q_2 airplane dXkmG8AR82Q_5 airplane dX6W4-sxsX0_0 cat dX9J6yDM5Q8_0 person dX-4XwYWv48_0 person dYGOSaGjHQU_0 person dYQMrQe1pSk_0 person dYRIEDyD9Qs_0 airplane dYRKwU2TJYI_0 elephant dYVcalOS1SE_0 dog EacR2o35-kc_0 bicycle EaeD7utPpTQ_0 person EakGzU5UgWI_0 person EakGzU5UgWI_1 person EakGzU5UgWI_3 person EamZ8De_WFE_6 elephant EamZ8De_WFE_0 elephant EamZ8De_WFE_2 elephant EamZ8De_WFE_3 elephant EamZ8De_WFE_4 elephant EavqjWy5gag_0 person Eaxszmfn7WA_1 person Eaxszmfn7WA_0 person Eay0MFBCdqY_1 horse EazzsVK1-pM_2 umbrella EbJV0e75xtk_1 person EbJV0e75xtk_0 person EbWt1hAb3LQ_0 person EbXzlcsBsfA_0 person EbYJAv5c_G8_0 person EblX3oKGsBA_0 skateboard Eb1n2o0YpOM_0 cow Eb3sGSIWtCw_0 person Eb7juFDG3Dw_0 car EcMh5TIKmzY_0 person EcNpsheyrIU_0 person EcNpsheyrIU_1 person EcWrNFz5J-o_1 dog EcpsBV2FEBE_3 horse EcsiLHpIvL4_0 person Ecu8VEIC2y8_2 elephant Ecu8VEIC2y8_1 elephant EcvYBldDm_U_0 person EdE8zCwJ56g_0 person EdE8zCwJ56g_1 person EdIfx7lQxEw_1 dog EdIfx7lQxEw_0 dog EdOvSD40Tb0_0 cow EdTkeITBkvY_0 person EdTkeITBkvY_1 person EdaY0DFamDc_1 skateboard EdfKMOIOHtI_0 person Eds-fi9s-O4_0 person Ed486SKW0kM_0 train Ed-ENhlS7Dg_1 boat EeCjxMzh5_A_0 person EeDhzR9I-Tc_0 motorcycle EeLllq2Zim4_0 dog EeMUemitsFU_0 person EeRqVkQ1Z7Q_0 car EeRqVkQ1Z7Q_1 car EeTRT4j5GcQ_0 person EeYRHJuK3wo_0 boat EeYqy9QZZTU_0 airplane Eeb2vPJsaN0_0 person Eee6rmiMYKY_1 car Eesk8VSxpIU_0 cat EetKMgVh0Pk_0 person EexaBL5jDL4_0 knife EexaBL5jDL4_3 knife Eeyjjk9-BvY_0 horse Ee7CW7lZfXA_1 person Ee7CW7lZfXA_0 person EfE6r-Iq5CM_0 person EfG_eBrAjdI_0 motorcycle EfHCZUHt0d8_0 person EfMCesQKyoE_3 airplane EfNSTkpl6dQ_0 person EfSMsLkasg8_1 person EfjC0VVD2Do_0 person EfvRGCuPoF4_0 person Ef1Tm3dKzbY_0 motorcycle Ef2GKdopP_A_0 person Ef7-yzJqZto_0 person Ef9YiYODEbg_0 cat Ef9q8mAPYZA_0 person Ef_N7JmICUU_10 bicycle Ef_5u21WLbs_0 cat EgDOCraAd64_2 train EgHVReOnDpM_0 person EgPKMlxhH0A_0 person EgPxUnCFS10_3 knife EgYCBIlDm98_0 horse Egf4iNTfanU_0 airplane Egf4iNTfanU_2 airplane EghxGvj6pTs_0 person Egl_1FgGUyE_2 bird EgpSSMkQOEE_0 bicycle EgxlP5S15uQ_1 motorcycle Eg6YUwqAQqM_0 person Eg7bJ46L4Cg_0 airplane Eg7bJ46L4Cg_1 airplane Eg7bJ46L4Cg_2 airplane Eg82FN1vC3A_0 knife Eg9-5uBMrpc_0 cat Eg-cp7jgFA0_0 person EhF73HJvEWo_1 train EhKAs4Z1JE0_0 person EhSaOGOPUns_0 skateboard EhbaW6F3U6I_1 person EhbuzBK5bes_3 giraffe EhbuzBK5bes_2 giraffe EhcmJOG2Jws_0 person EhfmC9Wa8xs_0 person Eho09eptX7c_0 person EhpwK0_8UJA_0 boat Ehpz_gcdCcY_0 knife Ehpz_gcdCcY_1 knife Ehpz_gcdCcY_2 knife Eh6FARrS1VY_0 skateboard Eh7f9wgtUns_0 bus Eh88_JdkWWs_0 person Eh-x-OzZxGo_0 person EiE9eIJ-Rv4_0 car EiLWN5T6wko_1 person EiNTdTOmvDU_0 person EiUbGE2f6fU_0 train EiUbGE2f6fU_1 train EiZG3M9_EMc_0 bird EiaYgqLcbqM_2 elephant EibdBvTND-I_0 person EibdBvTND-I_1 person Eine_0RExlI_0 person Ei1XBJFaUeI_0 person Ei1XBJFaUeI_1 person Ei6ZitRjwdA_0 person Ei7n3944Ovs_0 umbrella Ei9d8OX0ui0_1 airplane Ei9d8OX0ui0_0 airplane Ei9724H_wUs_1 person Ei9724H_wUs_0 person EjcMZ8Y0Oeg_0 boat EjgxtJaNIH8_0 skateboard Ej2wn6JRBzA_0 skateboard Ej7xV32Trwc_0 person Ej8UwQiT5jk_1 knife Ej8UwQiT5jk_3 knife Ej_zFc5qxRw_0 cat EkMGStKSilE_0 person EkMdmPclE3k_1 dog EkTrskvsL5c_1 horse EkWd3wPBEyg_0 airplane EkawSvsvh3g_0 person EkdP_pWa9s0_1 airplane Eke0rATHhX4_0 person Ekh_cm7q1y8_0 cow EklOuZWH-8Q_0 motorcycle EkyydrsMSkY_0 person Ek1DlGGsUdY_0 umbrella Ek4323MkRYo_0 bicycle ElJtz3uv-AQ_0 person ElLiin7Cda4_1 person ElLiin7Cda4_0 person ElNzy4USrLA_0 truck ElR4MuOUYKM_0 bird ElgmQr70py4_5 train Elrxptn-Zqo_0 person EluRnlB_s6c_0 train EluRnlB_s6c_3 train ElwZ1M6McHo_0 skateboard El2nzuCxrGk_1 horse El5fRl-4vko_0 knife El9Efl32L8w_0 person EmDjVcaznIA_0 zebra EmDjVcaznIA_1 zebra EmDjVcaznIA_2 zebra EmJeLKaG_hE_2 bird EmJk7hDSzaM_0 person EmJk7hDSzaM_1 person EmJk7hDSzaM_3 cow EmWzmxDjjOs_0 person EmkwHglcEKA_1 motorcycle EmlvoH2AxWs_0 person EmqEntvqLw0_0 airplane EmsMjm0VXJc_0 skateboard Em44RLa7Qp4_0 person Em_UT-f7q0E_1 train EnJkvPAMuaM_0 train EnJkvPAMuaM_3 train EnJkvPAMuaM_1 train EnL2FiVIuJg_0 elephant EnL2FiVIuJg_1 elephant EnS1Yte0Xzw_5 knife EnS1Yte0Xzw_2 knife EnUW7YSmli0_0 horse EnVtYzkXwjM_0 person EnbXP2xywwk_0 person EnmwKpZJTQc_0 person EnoNrjMNAC0_0 person EnrcDrbyUxY_0 person EnrcDrbyUxY_1 person EoaeqRc88HU_0 person EoallCLchmo_0 cow EodtHMtH9zw_0 person EojPQY8nQ2Y_0 train EouV6Ut4NP8_1 person EouV6Ut4NP8_0 person EouZIHzCFq8_0 airplane dYVtJPfJmf4_0 person dYgPc190feM_0 person dYgxCdKNrIo_1 airplane dYjCbeBAgYs_0 person dYmREF5dDkw_0 dog dYosdOz5mZo_0 person dYr1OKT1lCA_0 person dYyHudM-fQc_0 person dYyHudM-fQc_1 person dYzh49Wr9bQ_0 airplane dY9dlzr4w0Y_0 person dZFiRqMkFPc_0 person dZHJc_1os9Q_1 person dZHJc_1os9Q_0 person dZHJc_1os9Q_2 person dZMQgxFHQPA_0 train dZQ2o-4a5tU_0 person dZSQXDQcafc_0 knife dZUOCWwr2xs_0 knife dZaFo3C_1ts_0 person dZdvK41DxLI_3 car dZio0uN6DHY_0 horse dZio0uN6DHY_1 horse dZjnkqYO2lE_0 truck dZmG64W2CtM_2 umbrella dZmG64W2CtM_0 umbrella dZsXB4o-wdE_0 airplane dZzfVDrmMj0_0 bird dZzfVDrmMj0_1 bird dZ1vVETiQAQ_0 person dZ6ub2CEvbg_1 bicycle dZ6ub2CEvbg_2 bicycle dZ6ub2CEvbg_3 bicycle daBl0Q92zLE_4 bear daBl0Q92zLE_0 bear daIJjuHo2EQ_0 cow daMcE2oorrE_1 person daWo89I2Tuo_0 skateboard daWo89I2Tuo_1 skateboard daWywQD6R4g_8 elephant daWywQD6R4g_0 elephant daWywQD6R4g_2 elephant daWywQD6R4g_4 elephant daWywQD6R4g_5 elephant daWywQD6R4g_6 elephant daXEykL8UQ0_0 horse daZHZXfmY7k_0 cat daaHTdFcx5o_0 boat daaX2TXbYmo_2 airplane dadAGYt0vS0_1 horse dalHUNR5yAA_1 person dan-4YoB-Vw_0 person daoysu5sfUQ_0 person dapxBMe8Wz8_1 person daqWFFdK8Ck_0 person dawGJDtHlcs_0 person da4jNzO5wL0_0 person da61HPBGEwo_0 bicycle dbU6Fn_5bHI_0 bus dbXr-9m66-U_0 person dbdhdeVMuL0_0 bird dbhGB6XW3fM_0 horse dbxb42TzQ_g_0 skateboard dbysY1V2TwI_0 person dby-fBGIPRU_1 boat dby-fBGIPRU_4 boat db9i2fI8dv4_0 horse dcADt99ndxg_0 person dcADt99ndxg_1 person dcBMrHLTvPo_0 person dcEW4y5AI1E_1 elephant dcHcm85hd5s_2 bear dcH304rxwLY_0 person dcJN3WJZLOE_0 train dcLR55c41rg_1 horse dcLoVk60Gkg_0 cow dcLoVk60Gkg_1 cow dcLp5mtSkPA_0 cow dcO5id4LTVE_0 person dcO5id4LTVE_1 person dcO5id4LTVE_2 person dcO5id4LTVE_3 person dcO5id4LTVE_4 person dcUA_Wf8vrc_2 skateboard dcXdmOY1YCw_0 car dcXdmOY1YCw_1 car dcblbU5lyQU_0 person dcdXiEQkghM_0 person dcdXiEQkghM_1 person dcf4zn9wOjM_1 person dcj9u89LAu8_0 umbrella dcoFS0-09xc_0 person dcoFS0-09xc_1 person dcwbXzJsVDw_1 car dcxhSnf9sg0_1 horse dc1_WHDpL3w_0 person dc-BpV5fuQM_2 cow ddK4WXTyoWw_0 cow ddPN4QZuLBE_0 train ddPxOsA2Cro_0 person ddPxOsA2Cro_1 person ddW0MYEUWlc_0 person ddaqR7COVYo_0 person dddKAnk7-hQ_0 umbrella ddlPux88liU_0 person ddruq0KhCxM_1 skateboard ddsTE3NwHyM_0 person ddtNIDCxqCk_0 person ddw0wDJgJwM_0 person ddxQR-NB6E4_0 person ddzrzJEogWQ_4 motorcycle ddzrzJEogWQ_6 motorcycle ddzrzJEogWQ_0 motorcycle ddzrzJEogWQ_1 motorcycle ddzrzJEogWQ_2 motorcycle ddzrzJEogWQ_3 motorcycle ddzrzJEogWQ_5 motorcycle dd0CsqY6Fbo_0 airplane dd8a6btF_B4_0 person deDEnw72hQk_0 person deNoMwyFOO4_0 person ded6WOfO9O8_1 person deep6EOo6ds_0 person deihMrgBXEc_0 person delKGPVRJsY_0 person demxgFkqGxA_0 bus deqo50gGTBo_1 airplane dew_lb_L9hE_0 person dezAUC4KbJI_0 person de1f8qTDYUI_0 person de2HZ6DBOuM_0 person de4mcJTPj48_0 person de4mcJTPj48_1 person de7-gbLffxs_0 cow de8KeV2waGY_1 person de8V1ovs5eM_0 person de_fGa7Zxus_0 person dfAvID4lRsE_0 person dfAvID4lRsE_1 person dfDTR9mCUZI_2 dog dfEF6SMFbGM_0 skateboard dfKBB3-VicU_0 bus dfK1HsVc2B0_0 person dfh2lETTLZI_0 skateboard dfp4iVaXCpg_0 skateboard dfqLJxxdinA_0 train dfsTKKT5-UU_0 person dfseA2X5Cow_0 person df_PzyC0gTw_0 cat df_SYY4pb3I_2 cow dgGYa05XpYo_0 skateboard dgIsZXSKACE_0 person dgOQKwvhLpE_1 dog dgTYRveHMjM_0 cat dgYN1OH5oc0_0 zebra dgl2b2bRpq0_0 person dgtaJOOOtKg_0 person dgweyIjmmDY_0 cat dgyGZqXgvag_0 person dg6u7R87Gh4_0 person dhFII58PWhI_0 person dhIL9wRZMm0_1 horse dhIt9lg6Sbw_1 boat dhUG1gnTlso_0 dog dhZ-JmFNyak_0 person dhcVp1GmJyI_0 elephant dhcVp1GmJyI_1 elephant dhgs2glg_N8_1 person dhgs2glg_N8_0 person dhiYTV7DJLY_0 cow dhjeKi58cuU_0 cow dhkFVTvJ6ZU_0 cat dhy85XNJT3c_0 horse dh03d5vq1B0_2 dog dh1XFXciUf4_3 bus dh1XFXciUf4_2 bus dh6zZFXD0_c_0 elephant diDDNe-MVfs_1 elephant diMmgSNBO8k_2 person diRn1fE6zMg_0 person diSTaGHORrc_0 person diSZzd4jM0E_0 person diUCxWmV084_0 person diZ-mRLPpqI_0 person didB6Es7Des_0 person didTjworKXg_2 umbrella dif0t09rdZg_1 cow dioELry6bbk_0 airplane dix7GRytfcw_0 person dix7GRytfcw_1 person di1KJ0Mb5M8_0 dog di2TPYyIeWc_0 dog djIw9AQoU3o_2 person djLPrNtPSY8_0 person djLUJy1sWMg_0 cat djNzrBpqnnY_0 car djSxYfG99k8_0 car djaGBINLXTQ_0 elephant djh9QeYLg7M_0 airplane djiTvgkjTW4_0 train djiTvgkjTW4_3 train djiTvgkjTW4_4 train djiTvgkjTW4_5 train djiTvgkjTW4_7 train djlet5--ZW0_0 person djlet5--ZW0_1 person djpCG2oprrA_1 person djvQyzGNp7o_0 person dj2Qk--KIkk_0 person dj6yGGCBFWc_0 person dj8d91U-F_0_0 person dkQWD9hv4fo_1 train dkQbDCav3eM_1 person dkSetHNXnNY_0 cow dkb-6x7zo5E_0 person dkdCTCL5imo_2 truck dkdCTCL5imo_3 truck dkdCTCL5imo_4 truck dkiOcFZwrA0_0 bear dknj-Sv4HUs_1 person dkpsViIYlsI_0 cow dkw4aWG9l6E_0 bear dkw4aWG9l6E_4 bear dkw4aWG9l6E_5 bear dkxLcr2kvIM_1 horse dk3Nf8K3RzI_0 boat dk4gT0vHgeU_0 person dk6h_GL9OZo_0 person dk7QISqnWZc_0 bird dk7juEuA2is_0 bear dk7juEuA2is_2 bear dlAMvsjssrY_0 person dlDsSVM3JJ8_0 person dlG7MtSpAK4_0 person dlIG99k9Hoo_0 elephant dlIkYaty1Uw_0 car dlNMnGKJJjU_0 cow dlQ1Gr54T74_11 bicycle dlQ1Gr54T74_14 bicycle dlQ1Gr54T74_5 bicycle dlVOuZK_1bY_1 person dlVTSnDsl38_1 knife dlW_HPbVriI_1 truck dlW_HPbVriI_3 truck dlW_HPbVriI_0 truck dlW_HPbVriI_2 truck EpH59JsxI3w_0 car EpIb8r7uBqM_0 person EpJ_M6rB_PA_1 bird EpOaQjhIh_M_0 airplane EpP_TLXxb7Y_0 cow EpSURaF1BfY_0 truck EpT8zxDFPf8_0 cow EpVdzlk5GYU_0 truck Epd3r6iiqVk_0 bicycle EpeIZCFbjw0_0 skateboard EpnttpyYTAo_0 person Epoqtu0Pqe4_0 cow Ep8bd1STWKw_0 motorcycle Ep81Lk66O50_0 person Ep84L7WDoyE_0 person EqBJeYu5f_E_0 elephant EqBJeYu5f_E_3 elephant EqHBjvHkvf0_0 person EqJR5UZAlSg_1 car EqLYPeo9ZC0_0 person EqMqvcHp8Ko_0 car EqMqvcHp8Ko_1 car EqSYKCxmeDA_0 dog EqSYKCxmeDA_1 dog Eqh7XqsYl5M_2 person EqmnFPweBmk_1 boat EquATbp9uL0_0 person EquATbp9uL0_1 person EqvMMBAZP2o_0 person ErUllSQJNgI_4 elephant ErUllSQJNgI_5 elephant ErWUOje4g8Q_0 motorcycle ErX04vJ-JcU_0 cat Erf0FkqYsTE_0 person Ero36xFQKS4_0 truck Ero36xFQKS4_1 truck Er4yJXTWNNo_3 bicycle Er4yJXTWNNo_4 bicycle Er4yJXTWNNo_5 bicycle Er5D0fXZsjk_0 person Er9tboOA5k8_0 person EsEreMKZP7Q_1 person EsQ05q5ZZVM_3 skateboard EsQ05q5ZZVM_5 skateboard EsQ05q5ZZVM_2 skateboard EsYZbF7hCTE_0 person EsZV26-jxX8_0 motorcycle EsbWwOYbT8Q_0 train EskqA8x8mX4_2 airplane EsrUSkNrqWs_1 person Es0O5wtTZ2Q_0 person Es9GOUryI0U_0 person Es9Yq8uZ4fA_0 person Es-W0AxQ5Us_1 cow EtebDuK3fUY_0 person EtlKR9-Q2dk_0 person Etx8YkcrSF8_0 person Et0RRuaW-Rg_1 dog Et1PKq61KAk_0 person EuETmswYRrs_0 cow EuHJB5UXmZg_0 umbrella EuHvelij5ao_0 cat EuIGG3PoslE_0 person EuInxfWuqqA_0 person EuZnOeXR020_0 person Eua2VIbXEMs_0 boat EufXUqphYVw_0 person EumfsHXsVGk_0 person Eunz2V1RXXo_0 train EurWaA7qCDw_0 bear EuwjSGtSYlY_0 person EuwjSGtSYlY_1 person EuzDIk8ag30_0 person EuzVaAXsy4o_0 motorcycle Eu0nzh2HQNk_0 person EvDZK2cFYVE_0 motorcycle EvGoGf-YCA8_0 bicycle EvKPt0vynKY_0 truck EvN8x67_EQ0_0 person EvZF9DagIoQ_4 horse EvZF9DagIoQ_0 horse EvZF9DagIoQ_1 horse EvmcyDEPnoA_1 skateboard EvvbUe6FBSM_0 bird Evvij-hmE4A_0 person EwBKceBTBbo_0 dog EwBwIUrHR3o_0 person EwBwIUrHR3o_1 person EwDyryqt94g_4 airplane EwDyryqt94g_5 airplane EwKIz0qAvKQ_0 person EwSJeylFWsY_0 person EwUGFtWeyMA_0 person EwUeAvO5mrE_0 cow EwU8puKxN8Y_0 person EwU8puKxN8Y_1 person EwWCc9whfDI_0 cow EwYNowdS57c_0 person Ewet2EA1xX8_1 elephant Ewet2EA1xX8_2 elephant Ewet2EA1xX8_0 elephant EwozH_35SDg_0 person Ewq-V9jATzg_0 person Ew8lEc8Ufi8_1 bus ExCPGilpuMM_0 person ExCPGilpuMM_1 person ExCjkt_zXuw_0 person ExCjkt_zXuw_1 person ExJjWM_rAnI_3 airplane ExJjWM_rAnI_1 airplane ExPBVcERfwY_1 person ExPBVcERfwY_3 person ExPBVcERfwY_4 person ExT3xg9phtQ_0 person ExVHmko3jfY_0 horse ExW1ju88BW8_0 cat Exb1TjMi76I_0 boat Exc3W9o5-04_1 horse Exe2EizU9VQ_0 cow Exe2EizU9VQ_1 cow ExfZl3DY8JM_0 person ExfZl3DY8JM_1 person Exl9alp64lE_1 person ExqpcHBGBlw_1 person ExvcP05yrS0_0 person ExvcP05yrS0_1 person ExxZODpPkQQ_1 train Exz2WL2-kR0_0 giraffe Ex4__JMKkqI_0 person EyMzZV5iTEA_0 horse EyP_0uEuXVs_1 bear EybT7tq6XGk_0 person EymmgPoUyuM_0 person EymmgPoUyuM_1 person EymmgPoUyuM_2 person Eyn7IfnWm4o_0 airplane Eyn7IfnWm4o_3 airplane Eyn7IfnWm4o_1 airplane Eyn7IfnWm4o_2 airplane Eyp8nornJW0_0 bear Eyp8nornJW0_1 bear Eyrfi9lGdoo_1 airplane EyuKu6qMB6g_0 person EywYZ3Gjwuc_0 person EywnxH68jDU_0 cow Eyzwbz1ZxmU_0 cat Ey2TgrQ30Z0_1 bicycle Ey2TgrQ30Z0_2 bicycle Ey36TlCS4rQ_0 person Ey4BLGQL2Bg_0 bear Ey7eosaz0zU_0 person Ey7us0SSVAs_0 airplane Ey7us0SSVAs_2 airplane Ey7wIzCkFU4_0 person Ey7wIzCkFU4_1 person EzC0tuKaVGA_0 person EzEX4OUEHHQ_1 skateboard EzGa4SSPsbI_0 bicycle EzYjRjhff20_0 person EzZEWp1cluc_0 person EzeDITt3y5I_0 person EzeDITt3y5I_1 person Ezlyx_EudUQ_0 person Ezlyx_EudUQ_1 person EzuizVcVbSA_0 person Ez6I4TpzC5I_0 person E0K5Ll7wHUw_0 bird E0YZDyUoHTM_0 knife E00cOMpNw3o_0 motorcycle E01EgIBFxRk_0 person E038teDC3EM_0 person E0-Z0KM1UB4_1 person E1AwHXQ00ns_0 person E1MTmF3FAN0_0 bicycle E1NfSTmGCRE_0 knife E1ZhuBRYvKY_0 cow E1bNSKg9iv8_0 horse E1oEO09-bAw_0 dog E1pmsS_ufrs_0 person E1xPwEvYymk_1 person E1xPwEvYymk_2 person E1xPwEvYymk_0 person E1zxNG3Fglo_0 bird E17S76lXHfI_0 person E1_ETAQHwcM_0 person E2O5Y6VAhIc_0 person E2O5Y6VAhIc_1 person E2Pobz5qoAE_0 person E2Pobz5qoAE_1 person E2Vqlq1BQYs_0 airplane E2WWQOKGeb4_0 skateboard E2aiCls-clY_0 person E2lj1iRVceA_0 skateboard E22IW-PgLfU_0 person E28Cad7vBrw_0 person E28Cad7vBrw_1 person E29-bZY3lEo_0 airplane E3NmlH6taDs_0 truck E3SKOBDl6u0_0 person E3enDSeq6P0_0 person E3tmvYSpQSQ_0 person E35M5UWMXeE_0 horse E35M5UWMXeE_2 horse E4Bl9c7JbYs_0 person E4DFW1SxJfY_0 dog E4DFW1SxJfY_2 dog E4TfSUdVt8U_1 truck E4pulnGY9X8_1 person E43SZ65LnfY_0 cow E45LqepDuqg_1 person E5BtXla2lCQ_0 bicycle E5CQkNJct6Q_0 motorcycle E5HB-EDNtE8_0 person dlZZzrMO6yY_0 person dlbAWAuByWk_0 person dlcovhFKigE_0 person dlh5RGS5Bzw_0 bird dlkVXsIhcZg_1 person dlo83yH621I_1 cow dl2g71ftw9A_3 train dl2g71ftw9A_4 train dl2g71ftw9A_5 train dl6ogvuxF78_0 person dl_fuQYhAP8_0 person dmDdRd6wULk_2 dog dmJ1DuWiAdM_0 person dmMz5FhGOCc_1 person dmVAi4WMi3M_0 person dmVAi4WMi3M_1 motorcycle dmVAi4WMi3M_3 motorcycle dmW77KHtuCQ_0 horse dmYSNG-7VCg_0 elephant dmfX7DsSS1k_0 bicycle dmuWxnAfMn4_0 elephant dm4rFNN7FZQ_0 truck dm-lOmiP2d8_0 cow dnAQ7q60f_g_0 elephant dnB0we4_DrY_0 cow dnB6auv8PBk_0 person dnFZkG7_E1w_0 person dnNh07bnI_s_0 cat dnUXo5nstys_0 person dnVV1s-LcAY_0 person dnY-4hOzYts_1 person dncQtuB_6qA_0 motorcycle dncxd1B2sLk_0 giraffe dnwqVE3lPyY_1 train dnwqVE3lPyY_2 train dn_r7u_5apk_0 skateboard doHOuG6wqXY_0 motorcycle doSDuIGLFXY_0 cat doTj5H8Uf1I_0 cow doUwj_z1x5o_0 elephant doX3oiADm_s_1 person domu9ia2Vo8_0 person dorx67yK7WU_0 bird dovn1QHCR7o_0 person dowbL0CZ5do_0 bicycle do1QIWrYeW8_0 person do5o5Dw0vPc_1 elephant do5o5Dw0vPc_4 elephant do7abiC5aZk_1 car do82ENX9cOc_0 person do-LmSJTPj4_0 skateboard dpDG64ULlUg_0 boat dpGCSoTITrw_2 elephant dpJWbIaQYoI_0 person dpJWbIaQYoI_1 person dpQP5r61_GQ_0 person dpUorqkSYZE_0 dog dpYYMgh5TS0_0 truck dpcwUs5srlc_0 horse dpi0u6pfCTM_0 person dpjLyHb9AyI_0 person dpkF3SwOunc_0 dog dpn6vUVXBuM_2 umbrella dptZbHZQYPM_1 dog dptZbHZQYPM_2 dog dpxGzRQqAaU_0 motorcycle dpxGzRQqAaU_1 motorcycle dpxGzRQqAaU_2 motorcycle dpxVPiv62SY_0 person dp2cUWhnP0A_0 knife dp3Q_aTYeJ4_0 person dp_JQh45a50_0 person dp_1VrEUWbU_0 person dqCFYWRf9g8_1 elephant dqDLl7BlAAA_0 skateboard dqFRS9o1CSU_1 person dqOoL5LiXc8_0 boat dqQPbKE4UhQ_0 person dqTlCZzLk6A_1 cow dqWEwvhVNiI_0 person dqavRiIA-38_0 truck dqj-msAUvnc_0 cat dqzc4W6f-x4_1 person drAhAL_F38Y_0 person drAh2lmjDs4_0 skateboard drJGoPHMunk_0 person dreDU-1isrI_0 person dre_PgfS8yw_0 elephant drf5ijiEkUo_0 person drm2oJ3X1HM_0 person drqFwF60pgE_0 airplane drqFwF60pgE_1 airplane drqFwF60pgE_2 airplane drqe2hP0PKI_0 person dr3TumG_tlI_1 cow dr4dU5UDF-Q_0 person dr8s5VC9Fxg_1 person dsLbM2wZHrc_0 dog dsPwJ3J1ZKA_0 person dsTR1vv9XLE_0 person dsTR1vv9XLE_1 person dsUuAVsJSi4_1 motorcycle dstcI7MYsZ0_0 person dsyBSejpe-k_0 person ds1BJMsasQI_0 person ds6FmQYwgYw_0 skateboard dtDGbuCwBuY_0 bicycle dtHgnX0NtxE_0 person dtMbzXL9wO4_2 bear dtOFqz41TJ0_0 bird dtR2UeJbIvg_0 person dtWfbusf4Es_0 horse dtYdUj-d8fA_0 train dtZrB9iDzgQ_0 horse dtlUL4D7_NM_0 bird dtvZaXxNgKQ_0 person dtwUG12h74g_0 person dt8Tngmse50_0 bicycle duOX3z4IJSY_0 person duTvmDpj0sI_3 boat duTvmDpj0sI_2 boat duV82Wn9rXk_0 car duZYUVeDXEM_0 cat duaO7S-EH1A_1 person ducdg4KXQsg_0 person duoFWPZbeNc_0 person dupnmzaPsWA_6 elephant dutp3txJPTY_0 person duvuNqufLjs_0 cow du5hbB5w3UU_0 horse du96VR7vtOk_0 bird dvKKmu56UkE_0 person dvS2DSYGOGg_0 person dvbVbBosw38_1 person dvgf3R9k0uY_0 bird dvur4MZD_yc_0 person dvur4MZD_yc_1 person dv0ptUC-DIE_0 person dv6ymk8duso_0 bird dv_KURooPDU_0 person dwbRsYPV7Ag_0 person dwpopXTeeGc_0 person dwrYJ92znpw_0 cat dwy7k_gtEco_1 boat dw8kejnR7L4_0 person dw-2_KqGeYY_1 bird dxFrLHoW9jI_0 motorcycle dxGlDl4IukI_0 horse dxGlDl4IukI_1 horse dxViI6VXh6Y_0 dog dxn8VDPNvJM_0 person dxq9r-qrJ2A_0 person dxsQn1MuZRA_0 bus dx0z7DYxGSw_0 person dx4rtOOz7tA_1 umbrella dx6ucdpKZP0_0 person dx8nEHWD1xc_0 person dyAC2ey1DQU_4 bird dyJ83t1zgkU_0 skateboard dyPMbIsTtFs_0 person dyPt3VKGZPo_1 person dyR4vnjF5do_0 truck dyZixtbxEE4_1 person dym-lDsiSTM_0 boat dyt8LtUqIMU_0 boat dyy3oxsiErU_19 truck dyy3oxsiErU_14 truck dy2J0aeX5eQ_0 truck dy3nkqKOjbk_0 person dy6zETD5NFo_0 cow dzEKq7fsVnQ_0 train dzNRDfnNbeE_1 person dzS2ClyakEg_0 truck dzXv_YFLPqg_0 person dzahMuEcbCM_0 cow dzeNnQOePGs_0 person dzhSVb26d7Q_0 umbrella dzoQb8C3vxE_0 person dzsHYOJpBbY_0 bear dzv-u3s_YtI_0 person dzyVndvBofo_1 horse dz3SP1rd9zE_0 car dz_ATSJBx6k_0 airplane dz_ATSJBx6k_1 airplane d0J7uodSxF8_2 motorcycle d0NY8eqs19s_0 motorcycle d0NtMMBjQp0_1 truck d0NtMMBjQp0_2 truck d0NtMMBjQp0_0 truck d0RIwZfoGNg_0 person d0ZEYzyD9Vg_0 person d0b8-K_6D68_0 umbrella d0hJditcWj4_0 person d0hQQC2i1Y0_0 person d0hQQC2i1Y0_1 person d0hdtlKidzs_0 person d0h9QWelhII_0 boat d0lVKBOzOQ0_0 skateboard d0qGN1A7XJA_0 person d0vHpkvShqg_0 giraffe d0vUARlHvjc_1 cow d0v47QFRyvg_0 person d0v47QFRyvg_1 person d00UKAQHK2A_0 person d02xOzIVP-s_0 person d04Dr38addQ_0 airplane d09H7U6x-Fc_0 cat E5OHeMbBp9s_1 giraffe E5RbbN1bPN8_0 person E5YibOn90Co_0 skateboard E5b9Yug5vbk_0 bicycle E5b9Yug5vbk_4 bicycle E5b9Yug5vbk_1 bicycle E5b9Yug5vbk_2 bicycle E5b9Yug5vbk_3 bicycle E5dBaFyBYX0_0 airplane E5me_giHEOE_1 person E5trQkGM3Wk_0 cat E5wZ4pk5X0I_1 train E59OnpOGBLU_0 skateboard E6Am4hIuXvk_0 person E6Avey2AVRM_1 person E6A8vfHTdOQ_0 person E6EtoMfo384_0 train E6EtoMfo384_1 train E6GvpwdOQrw_2 train E6GvpwdOQrw_3 train E6GvpwdOQrw_8 train E6JLxU918TE_0 bicycle E6XGO0hx4N8_0 person E6Y2QsetU0M_1 person E6s0XT5G7Eo_0 bird E6s0XT5G7Eo_1 bird E6uGh-cPDjI_0 bicycle E62w4NFSm5E_1 dog E64d0EH39M4_0 train E67ceZopcqQ_0 person E67ceZopcqQ_1 person E68IhhK04s0_1 giraffe E68IhhK04s0_2 giraffe E7BIM8cnCrc_2 train E7BIM8cnCrc_0 train E7F0Gt3Rea4_1 person E7F0Gt3Rea4_0 person E7LY2yKO0Jg_0 knife E7MvCesCxNk_0 knife E7dG4qPI_QY_1 knife E7eYGQjaVYs_0 bird E7hXPqOOiqo_1 boat E7hXPqOOiqo_0 boat E7qoCZ2e-vQ_0 dog E7rhwzBxMqY_0 person E7zwjNToyao_0 person E70FO7I2AQ0_1 person E70FO7I2AQ0_0 person E76moy2SQhA_0 person E8JYTxKfqmQ_0 boat E8OzYJ2gVAs_1 bicycle E8OzYJ2gVAs_2 bicycle E8OzYJ2gVAs_3 bicycle E8RSSepY8tk_0 person E8R5lzlo5qw_0 motorcycle E8Xxr8SUaEY_0 horse E8h4YnZbJg4_1 person E8n_eTUwyhc_0 person E8pbsHhMGOw_0 person E842T5CgJfk_0 person E854nPMWssI_0 horse E8-Z9saoTjk_0 person E8_NjWtQtgI_1 car E9J2Brm4LSg_0 truck E9J2Brm4LSg_1 truck E9N59GTZ8uE_0 person E9R_qLxcZdY_0 bird E9S5Tk5r2wU_0 person E9ZjM9SY__o_1 person E9ZjM9SY__o_0 person E9sCn_XaSHw_1 bicycle E9sHGoiMmXc_0 person E9zmtafFrCo_0 dog E9-1FSPKZ7k_0 person E-DE7HZ04WY_1 person E-OdBMMpwlo_0 umbrella E-VRMpgKXIE_0 elephant E-VRMpgKXIE_7 elephant E-VRMpgKXIE_1 elephant E-VRMpgKXIE_2 elephant E-VRMpgKXIE_4 elephant E-YDPyDXtR8_0 cow E-h1XNBlqsE_0 person E-pnZZeRFyQ_0 person E-q9j7xipsA_0 cat E-seUZ3B-Ts_0 motorcycle E-zFmY_9LWk_0 horse E-0FMMDuLw8_0 person E-3jsRP7KHc_0 elephant E_En6n1IyBw_0 elephant E_GC0IeKtu4_0 person E_K6zdkr0mo_1 person E_Xi5uEIiec_2 bicycle E_e6E8T7on0_0 dog E_02tA9RLyw_0 umbrella E_7qbAkVDYE_0 elephant FAKE4Rfwdik_0 person FATjlgllzBU_0 person FATjlgllzBU_1 person FAdlwBJZk78_0 elephant FAeK9y98GL8_0 zebra FAiIhoJh5uQ_0 cat FAm6HgSzPTA_0 cow FAn11rZ-gsU_0 person FAqiar6B2U8_2 bird FAu0yvyjW-Q_4 boat FAu0yvyjW-Q_9 boat FAu0yvyjW-Q_1 boat FAu0yvyjW-Q_5 boat FAx0CsAigS4_0 motorcycle FA_K15dKk6k_0 bear FBAcUphtxR4_0 person FBIVWWIbq-8_1 cow FBIawPqElJ8_0 bus FBKIUCHqUQk_0 skateboard FBKIUCHqUQk_1 skateboard FBNFSYoMCNM_0 bird FBOWbksU5pI_0 person FBOWbksU5pI_1 person FBjp-C_Sbug_2 bicycle FBjp-C_Sbug_11 bicycle FBnFn5mY2R0_0 person FBwWw9c4KdY_0 person FBz0aAYDBFI_1 person FB8F1ku1XkU_0 person FCBsCwjCPWU_0 person FCQB6p_GcDY_0 person FCRAvY0glAI_0 airplane FCd1d_7Hfpg_0 umbrella FCkT11nk468_0 airplane FClLRpdDi9A_0 person FCnE02wQQk4_0 bird FCp7AKKYViY_0 person FC-ONjCL7tM_0 person FC_gwQU4yrs_0 horse FDJyHtHix-0_0 train FDYS2AyPJhc_0 person FDZBIlbFrk0_0 person FDej1TTCjP0_0 umbrella FDfaLuM3y5A_0 person FDkiv1x0OGQ_0 car FDq3yKNo4Qs_1 person FDvTPzckQKc_0 motorcycle FD3pT-lj2tc_0 cat FEM7OGFO_BI_0 elephant FEN0F0V1nhg_0 dog FEOAvRWKb-k_0 airplane FEU4yHFzkZs_1 person FEU4yHFzkZs_0 person FEWZolQuMv0_1 person FEfYdrS3kFc_0 bird FEjcdYO4xPo_0 person FEoFDmI0pxI_0 airplane FEzBza78J4w_0 person FEzBza78J4w_1 person FE0DpZ9GXoM_2 person FE0DpZ9GXoM_0 person FE0DpZ9GXoM_1 person FE0Q5phKq3c_0 person FE0Q5phKq3c_1 person FE4gj8EYF9k_0 person FE51Dml-nZY_0 person FE7iv_llNT4_2 bicycle FE-JTPLk3fI_0 truck FFCtm1GZH_s_1 bird FFHJUeZ_KKE_1 truck FFHJUeZ_KKE_2 truck FFLxkwDj1b0_2 bus FFME8B_6LNA_1 motorcycle FFQl2DLyjdk_0 cat FFQl2DLyjdk_1 cat FFantnd2gLY_0 person FFantnd2gLY_1 person FFd_4DPNyRI_0 car FFijp_s0YwA_0 dog FFi3nSvA0WY_0 person FFjqbw4R9l0_0 cow FFm26XU-R7c_2 person FFm26XU-R7c_0 person FFm26XU-R7c_1 person FFndlV1rKas_0 airplane FFpyQ_5PU7M_0 bus FF9eHa3K8fM_1 bird FGO6y3WssIg_0 person FGQCxd5EAx0_2 airplane FGQCxd5EAx0_3 airplane FGQCxd5EAx0_1 airplane FGcS28ri5uY_0 person FGdEufjjhtg_0 person FGicL13npRI_0 person FGkNC4hzcfM_0 person FGkx6qk4oDk_0 person FGmjmDC1RoU_1 skateboard FGmjmDC1RoU_0 skateboard FGoutavzP5Y_0 person FGqrkJ3h0DA_0 skateboard FG0PrdHReB0_2 person FG5l2wX8ccA_0 horse FHAj71IwE7E_0 skateboard FHA6nVCnv28_0 person FHB5eraeYEw_1 knife FHJupOaUmtQ_1 train FHOLOunv9Ec_0 horse FHTc_V_05W0_1 bird FHT1DAZpJVY_0 cow FHZ-3pbJQrY_0 bird FHgO4zu5RGA_0 person FHu50D73Fzo_0 person FIA67WzAuNs_0 bear FIA67WzAuNs_1 bear FIB12MYkANg_1 bear FIDI0sZMPVU_0 person FIGhnuJWX5M_0 person FIGhnuJWX5M_2 person FIHYnB8Jrh4_0 person FIMbYQASgkk_0 horse FIQ1iL3jVkM_0 giraffe FIV4OFmfS_s_0 person FIV4OFmfS_s_1 person FInOWVIV_go_0 motorcycle d1N4NJqa_8E_0 person d1PqtOyYTY0_0 train d1PqtOyYTY0_1 train d1PqtOyYTY0_2 train d1Quy8k5O88_0 cow d1UWs3bPTsc_0 person d1UWs3bPTsc_1 person d1YYgiXq3tw_0 person d1YYgiXq3tw_1 person d1bzn92PO0c_0 person d1eo2OWc45Q_0 cow d1tf08A41eo_0 person d1ukwE8h4f8_0 horse d1wbMXvcgNc_0 person d1wlubAM1-k_0 person d10K79pdybE_3 train d14rOFFvTg4_0 person d14rOFFvTg4_2 person d14rOFFvTg4_1 person d165nDy63o8_0 bicycle d165nDy63o8_1 bicycle d17kaiZ5Ztc_0 person d2DRRd9l3TI_0 person d2RD5tyZt6c_0 person d2TxcbWHoBM_0 cat d2WfBDEMf40_0 truck d2ZGi2fOtPY_0 person d2cDVorBK8s_0 airplane d2cDVorBK8s_1 airplane d2e49A9MnF4_0 person d2lSueNvuG4_0 horse d2ns5iCGj78_0 elephant d2sn_b1z1Vw_0 person d2wHwCwQymw_0 person d2zgNRFDpSw_1 bird d203fSHLzv8_0 train d21TfucuHss_0 umbrella d217pENbZVs_0 person d28DHw2okF8_0 person d3F_Gm514J0_2 elephant d3G8COtsJco_0 person d3MN8Sm5tiY_0 person d3MVAijPTjY_0 motorcycle d3P2bH2t8IQ_0 person d3Wdg9MPgLA_1 skateboard d3Wdg9MPgLA_0 skateboard d3duKA35FEI_0 person d3jP_YP-6EQ_0 person d3ro5gubiaQ_0 person d3ro5gubiaQ_1 person d3rzFaWiWwA_5 truck d3sHFgbvhIU_0 car d33yoN6QyYg_0 bus d36tDEgs-IA_0 person d4A2uUrnVWI_0 person d4Cumy6qZPY_0 truck d4DbIWORtjY_0 person d4DbIWORtjY_1 person d4GvMFc_Vqg_0 knife d4Le0GuzhaY_0 skateboard d4QkJdQwkCo_0 motorcycle d4VJot5IZek_0 person d4VJot5IZek_1 person d4WRTfC57h0_0 horse d4b9-LX5V1s_1 cow d4hB6abJCs8_0 person d4mhHPSo7C8_0 skateboard d4q-0AcOs78_0 person d4vhL4dar5s_0 giraffe d4vhL4dar5s_1 giraffe d45YTUkd_9M_0 person d47DPSbvftI_0 person d484zxSSkJM_1 person d4_lDGwny4k_0 skateboard d5Ao3JBz7WM_0 person d5B0EMjLeZE_0 person d5PBtpn_6JQ_0 person d5gDBPwofbs_0 person d5gDqlNLGmw_0 person d5hj8eaC5fQ_0 person d5jIlHa1Y6o_0 cow d5m8giMORSk_0 person d53_McJDtt4_0 person d55FAEl6kfM_0 cat d55rz05ynyg_0 airplane d6AkvjKCaE0_0 person d6AkvjKCaE0_1 person d6TWHVESLa8_6 cow d6TWHVESLa8_5 cow d6VCXnnHXGQ_0 person d6VCXnnHXGQ_1 person d6YTAD3T2i8_0 person d6a2EN1cB-4_0 person d6cgbxc35Ms_0 person d6mM21E4x-4_0 umbrella d6m3DUG5E7Y_0 person d6uLbEhrIvw_0 airplane d65wDJoMyA8_0 person d67YXl13SSo_0 person d6-bn34gHFc_0 person d7H5qLPNFz0_1 elephant d7cwZ3G7xSU_0 bird d7kWNGqyvRk_0 person d7mQdSSoZ2E_0 person d7m0BF65qro_1 person d7m0BF65qro_2 person d7n5m9UuhP4_0 person d7n5m9UuhP4_1 person d7yxmt8AvOM_0 person d7yxmt8AvOM_1 person d71rdGKeKkE_0 person d74EhPMCxb0_0 person d7-3m4Nz8fk_0 horse d8CJ5urtRlk_0 train d8HIJN0pULI_0 person d8XcNMVXCD8_0 bicycle d8b-SN3JEvk_0 person d8dPRbquLuM_1 person d8dPRbquLuM_0 person d8t8y3kLzgc_0 person d84iekZaJHc_4 knife d9JyT5Kko5c_0 person d9LvxSh5P-Q_0 person d9OaiymMq0w_0 person d9PCSJzZTy8_0 person d9Pj3WrvXXc_1 person d9S0dKjWhNU_0 person d9S0dKjWhNU_1 person d9S0dKjWhNU_2 person d9YlucRFs0U_0 person d9cSZXEb_5E_1 person d9dysX9rdmA_0 skateboard d9hh6urZ5FU_0 train d9kzobAaimY_0 motorcycle d9lIw5maa3M_0 person d9qijNyVVmU_0 person d95k-74VSVE_0 cow d-JD-mAXyIA_0 dog d-Mnc38YAmw_8 truck d-OQw6tKhuM_0 knife d-S3AmiMI1s_0 car d-e8mKtYWjk_0 person d-e8mKtYWjk_2 person d-e8mKtYWjk_1 person d-fv8fmGSlY_0 person d-hMPjLP2WE_0 bicycle d-hMPjLP2WE_1 bicycle d-hgDDQ3kwg_0 person d-h6ncywZ58_1 person d-h6ncywZ58_0 person d-oFe9Z0Obs_0 dog d-oFe9Z0Obs_1 dog d-rpsQgR8sw_0 person d-22m5Sq5OU_0 elephant d-5xdAZSjX8_2 skateboard d--9RMf5LCA_1 boat d_AudyfCYzg_0 car d_EP2nM4YMw_0 bus d_ElAbuvxGQ_0 dog d_ElAbuvxGQ_8 dog d_SB-LVXyi0_1 horse d_SmnRMWLD8_0 dog d_S0JCKcFCg_0 cow d_hsQ2L-klo_0 person d_nTA-SKHNM_2 knife d_nTA-SKHNM_6 knife d_ocJQiPpn0_1 skateboard d_vnePeLmwI_0 person d_2HhXHP8fg_1 cow d__UUbvo2t4_0 person eADPEBi8wWs_0 car eADPEBi8wWs_1 car eADqJI9JKq8_0 person eAFdLVF01GU_0 bicycle eANH6WnEpPs_1 person eAPcJi7CaBw_2 horse eAPcJi7CaBw_1 horse eARl2H_FaEU_0 cat eAXN0KAt66I_0 person eAXN0KAt66I_1 person eAYoRncVO74_1 person eAZbke5Perk_0 car eAfmOFI5jUM_0 horse eAsHKktPNSo_1 horse eAvDt4p-AvA_1 knife eA3lmhfjTuM_0 cow eA5hiUXY2_Q_4 airplane eA5hiUXY2_Q_6 airplane eA7FV9uQbYw_0 bus eA8fIAfGi5k_0 person eBB5vRA9JPE_0 knife eBHEKUkaBcI_0 bird eBLisw9b8i8_0 cow eBLisw9b8i8_1 cow eBLisw9b8i8_2 cow eBMqhmQr7vI_0 bicycle eBRcZ5KDeEA_0 knife eBgLKDW3lH4_0 person eBmdALv9WEE_0 person eBwWJ_geg4Q_0 knife eBy554vRg9M_0 person eB83_xIotrw_0 bicycle eB83_xIotrw_3 bicycle eB_ZHbAvx-c_1 person eCInOWr32gc_0 dog eCNG8qj36vs_0 cow eCSzfVb87kI_0 person eCUuH2vPeDI_0 person eCWhtTVetLA_0 umbrella eCeVtq40bcM_4 bus eCf8h359-j0_0 bus eClBvJnyYa4_0 truck eCmgHa6ThE4_1 person eC3Fwv7Uows_0 person eC-5SEhAGvo_0 cow eC_fRVwxsiI_0 person eDJamx945Ao_0 elephant eDJamx945Ao_1 elephant eDSAGlcfwKA_0 person eDSmePW-Vrg_0 person eDXqzj7vKFI_0 motorcycle eDX2HUt9ttU_0 person eDX2HUt9ttU_1 person eDuzDDESzU0_0 person eDwjZL3IGqM_0 person FIrviDrZriY_0 bus FI2T176uKi4_0 person FI4oF175yHo_0 cow FJCE3uzu0i4_0 dog FJL5lb3wBKI_0 airplane FJPRJ0A8BII_0 boat FJVcRzA_pdI_0 airplane FJdcStnbgU0_0 person FJl_FwYbg8s_1 knife FJmyu27Omwk_0 person FJsMdQrRgFs_0 train FJvHbRGgbXM_0 giraffe FJvSXVq8PPk_0 bicycle FJxbfz8q8Qw_0 person FJzU4eC5GiI_0 person FJ5jeLsVXys_0 elephant FJ5jeLsVXys_1 elephant FJ7oeGn4dBM_0 cat FKGFVLnchKE_0 skateboard FKGFVLnchKE_2 skateboard FKGFVLnchKE_4 skateboard FKGFVLnchKE_5 skateboard FKKoXDLhFjo_0 person FKMCYA2_RMs_0 horse FKMCYA2_RMs_2 horse FKMsbMSiqrQ_0 person FKTETXdoJjk_0 person FKVxjU1kTMM_0 person FKWzB37H8-E_0 cow FKdcZ0D4-K8_4 horse FKdcZ0D4-K8_1 horse FKhBf2FcrKE_1 person FKhBf2FcrKE_0 person FKnj73Wv84c_0 umbrella FKsZiccYt_g_0 person FKwKsWjLhiI_0 person FKzvgRVfOjM_1 horse FKzvgRVfOjM_5 horse FK0ezSvbg7o_0 dog FK37T3KvNUU_0 cow FK8OxK802HI_0 person FLF92L3WRrs_0 person FLF92L3WRrs_1 person FLQzeGFBo2I_1 bird FLQzeGFBo2I_2 bird FLTewjXG6Wc_1 person FLTewjXG6Wc_2 person FLTewjXG6Wc_0 person FLWAw0tGOo8_2 bicycle FLWAw0tGOo8_3 bicycle FLZeutEdtzU_1 horse FLqZVv798FE_1 person FLqZVv798FE_0 person FLq3zU7UtgQ_0 skateboard FLr23Hv4LfE_0 train FLr23Hv4LfE_2 train FLskMa3WD7M_0 person FLyV4pkEHUg_0 person FL1q74zVLvo_1 truck FL8ulwhcOho_1 car FL-QttmKDc0_0 airplane FL-73OGqifE_0 cat FL_DeYOGkaU_2 horse FL_DeYOGkaU_0 horse FMHc-oH_rOE_0 person FMTZga_deFY_0 dog FMig7WOUQyU_1 bear FMig7WOUQyU_2 bear FMv3NfETfq4_0 bicycle FMv3NfETfq4_1 bicycle FNCMx4Aum_M_1 motorcycle FNJmejn3KNQ_0 truck FNJmejn3KNQ_3 truck FNJmejn3KNQ_5 truck FNKJAi0Xbz0_0 person FNNdAL0qtWM_0 horse FNSpSfZSQfE_0 person FNbjJJgHt6c_1 person FNgfcu9JUHA_0 cow FNjDy-du_gs_0 truck FNv5k4sCs5k_0 person FNxfPhr1AZk_0 person FN1B1veyxCQ_0 cow FOAmP97Gboo_0 elephant FOAmP97Gboo_2 elephant FOL80Pq_HSs_0 cat FOXwGm4ddCk_1 person FOacAsl9vUM_1 bird FOnRpTgHAdI_0 person FOyA2uyFS0s_0 car FO-yhRhInHQ_0 motorcycle FO_sYJabdgQ_1 bird FPBkLbjkE0I_1 person FPC9a1ebnRk_0 person FPFEZjz68RM_0 person FPHxPqZ9of4_0 elephant FPIVRAQI9Ao_1 airplane FPS-rWu8sfw_0 truck FPdj2aDA2Is_0 person FPd8NgysFbw_0 person FPhiHYzZrc8_2 bird FPmbKUp9Apc_0 person FPoBK2S6-kE_0 elephant FPpdaMeuTPM_0 person FP-joReSPjM_1 train FP-joReSPjM_4 train FQBe4ewvq3k_0 bus FQDYCsUTzLU_0 person FQIKRtrwRJU_0 person FQKMItJWON8_4 bicycle FQNa7v1nuHs_0 bird FQNa7v1nuHs_1 dog FQPeEa0PIhY_0 person FQQ5mFLQS_8_0 airplane FQTA_Rs2r4k_0 airplane FQa2-poPUOQ_0 person FQiI3CA-HsU_2 person FQiI3CA-HsU_0 person FQiI3CA-HsU_1 person FQnnRHyzLcE_0 boat FQyvUPmvsSo_0 bus FQ0G5VjpRO8_0 cat FQ09pTeRKXM_0 person FQ8nNpJodyM_0 person FQ_PnAPHimg_0 train FQ_YvOmwGng_1 skateboard FQ_YvOmwGng_2 skateboard FQ_YvOmwGng_0 skateboard FRBmAObAjLg_0 umbrella FRCsksZQW0g_0 motorcycle FRFZtNbUMfU_0 person FRFZtNbUMfU_1 person FRKbwt_HIJY_0 cat FRUF5D_Bg4I_0 boat FRZeTLb7R70_0 person FRcpw1KTh4w_0 skateboard FRh68K9peM8_0 knife FRs6gVga80M_2 airplane FR0IeE_jWVE_1 person FSCpm1kxTIE_0 umbrella FSJSVNwlHck_0 person FSSrkLtKRBk_0 person FSchPfgxMmk_0 person FSmTDuGYKRo_0 person FSrvVBrHdIY_0 person FSrvVBrHdIY_1 person FSs-_cK-4DE_1 bird FS8ZnDA42Xg_0 zebra FTHxfldxSrg_0 person FTlLAXuBE2M_1 person FTlLAXuBE2M_2 person FTlLAXuBE2M_0 person FTr8b641J_g_2 zebra FTr_sg-tAYA_0 person FTr_sg-tAYA_1 person FT7LfULOrmU_0 person FUNI1-oxWb0_0 person FUPer2xPyRM_0 person FUQokq7Dm_0_0 bird FUWPXNKt90g_0 skateboard FUcLObUwigo_1 person FUcQGevNVQs_0 person FUp8cy7p6kc_0 person FUt-f-8QJmk_0 boat FUzb9oSwhq4_2 horse FU63gEB5T14_0 person FU63gEB5T14_1 person FU-Gyo-nX8w_0 person FU-Gyo-nX8w_1 person FVGYeJ_eKRY_0 person FVGYeJ_eKRY_1 person FVSihamjW0c_0 person FVcaEg-4Saw_0 airplane FVm133076uE_0 person FVxqyMXxbTg_0 person FVxqyMXxbTg_1 person FVyZRq7FJUM_2 person FVyZRq7FJUM_0 person FVyZRq7FJUM_1 person FWAdovzWBpk_0 person FWCxpF5CAAo_0 person FWH6qzGM4Ko_0 cat FWTx-_C46YA_0 dog FWVW97tTSiI_2 skateboard FWZANVS2JwI_0 bird FWbVfjbC570_0 train FWd_KJNB1hY_0 person FWeJwZsAuq4_3 knife FWiwkCVxsvU_0 airplane FWpcgznz11Q_0 knife FWqFrwl7d-g_0 airplane FWqFrwl7d-g_2 airplane FWuSKVVP9Gw_0 airplane FXPnVqm98h8_2 car FXbqlcQOm4U_1 car FXcjcGBH8uA_0 airplane FXdP8V2Fyag_0 bus FXdevKY06to_0 bus FXjUPTGnrIk_1 person FXjUPTGnrIk_0 person FXrzFKXFtUE_0 skateboard FXvqDQa0_pw_0 bus FXz3PiouB_s_0 truck FX7DATABx3o_0 person FYPRZ3A5Wug_1 horse FYQxEw6enVw_0 knife FYR_8E37mhY_1 boat FZJlwJ_5CIY_0 person FZJ0L36775Q_0 bear FZOwW_igs2Q_0 person FZUo3m0w40U_1 boat FZXz9ivLbZE_1 person FZfD0ASOr-0_0 person eD5a0lOEA4c_0 person eD5_C8Rnll0_1 cow eD9mxZpbjpo_3 knife eEBoNITml_U_2 airplane eEBoNITml_U_5 airplane eEKY2ZIJ7cw_0 person eEKY2ZIJ7cw_1 person eEUzIzmFpmg_0 dog eEZirBqUuUc_0 cow eErb9l8tm9Q_1 person eEwALO20qQs_0 motorcycle eEzaprIjPOA_1 horse eE7zgmIkklg_0 person eE_bJ6JguBg_0 person eE_bJ6JguBg_1 person eFDTDuBtPdg_1 elephant eFIUN94eOFY_0 skateboard eFKWB3vWXzM_0 person eFNnJotKCuE_0 dog eFQAqsrxJIk_1 cow eFQAqsrxJIk_0 cow eFYXRQfFBFk_0 person eFYi8GYHOwc_0 bus eFYi8GYHOwc_2 bus eFYi8GYHOwc_1 bus eFbHzEjDjsQ_0 person eFbHzEjDjsQ_1 person eFbOmylKLps_0 bicycle eFbOmylKLps_1 bicycle eFbOmylKLps_2 bicycle eFbOmylKLps_3 bicycle eFbOmylKLps_5 bicycle eFbmkhM4yvA_1 skateboard eFeLxXgEWb4_9 airplane eFeLxXgEWb4_10 airplane eFeLxXgEWb4_19 airplane eFkMiDqxNNg_0 person eFn7qz_Ik-g_1 bicycle eFsEtWFKOCE_0 person eFsEtWFKOCE_1 person eFsJVO58dOk_0 motorcycle eFsJVO58dOk_1 person eFtXO4KQyP0_0 person eF6vo2K3X7Y_1 horse eGANqnJQvcA_0 person eGEeIkSKn9I_0 person eGFxLRdHt9o_0 person eGIMcDTDuZI_2 giraffe eGKe_SHbpew_0 dog eGLaqISw-ZU_0 cow eGXX9n0KkAw_0 train eGavpqx_a-Y_1 person eGeSgNqD64Q_0 cat eGp90l6AeQM_3 horse eGp90l6AeQM_4 horse eGp90l6AeQM_6 horse eGp90l6AeQM_7 horse eGp90l6AeQM_1 horse eGp90l6AeQM_2 horse eGsO1ybeNmw_0 person eGulNc3Hz6E_1 person eGw-BT7HLw0_0 person eGx11vRzfMI_0 person eG420j0UncU_0 cat eG9ay7ouawQ_0 boat eG_gCk-NdFc_0 bicycle eHFxA8eOkKo_1 dog eHJOSAF8Ksc_0 boat eHMokGJS_8k_0 bird eHPZiFRZgH8_0 person eHS3e7Drwlw_0 horse eHYl5vL9urI_0 person eHYl5vL9urI_1 person eHZGFVBiNbM_0 person eHhu8cP6sYY_1 truck eHlKAc_jO3w_0 horse eHlKAc_jO3w_1 horse eHmn6jMH470_0 bicycle eHo7GgOz-4M_0 bicycle eHo7GgOz-4M_1 bicycle eHpMDoo4x9o_0 person eHpMDoo4x9o_1 person eHrYu8_xQuI_3 airplane eHuFhF5mn60_2 dog eHuHorwvDFE_0 person eH-lfDuzZRU_0 person eIbRJYX77lM_1 person eIceWO1K4hg_1 knife eIlLo4L0TBY_0 person eIm2mZqCQIU_0 person eItSvz_9tc8_1 horse eI5A6Q8wsk8_0 person eJGswWs5a_U_0 person eJJBtIMsces_0 cat eJNeGPvJZBs_0 person eJN7jtqxGc0_1 person eJO3ahTuQlg_2 knife eJTzEdYt2KA_0 person eJTzEdYt2KA_1 motorcycle eJZyuG0FB0M_1 person eJg7Dq1HzW8_1 person eJi66YisQnM_0 cat eJnTGfqwSKw_0 person eJntPRQdD6A_0 cow eJntPRQdD6A_3 cow eJxFV3LV_-o_1 elephant eJzkkZWgmiM_0 person eJ2omVOUJv4_0 person eJ4AprAxRh4_0 airplane eJ4AprAxRh4_7 airplane eJ4AprAxRh4_5 airplane eJ9q5sR4oiE_1 train eJ9q5sR4oiE_3 train eKBgCy3izjg_0 person eKCONra70xU_1 person eKGFKx5vbJw_1 bird eKGFKx5vbJw_2 bird eKJMggclbAI_0 truck eKYCRb3cMSc_0 cat eKcN648xBxg_0 cow eKdNbqJsxIY_1 car eKirxEVv1N4_1 giraffe eKpHpiZZSOY_0 motorcycle eKsu0SXh0Cg_0 giraffe eK5wkhSqhQg_0 person eLAIclbgwtw_1 motorcycle eLAIclbgwtw_2 motorcycle eLCZ9U490do_0 person eLK_O-E6TXY_0 cow eLLFV2_GBOs_1 cow eLLFV2_GBOs_4 cow eLLFV2_GBOs_5 cow eLLFV2_GBOs_0 cow eLLFV2_GBOs_3 cow eLRLhwJpaKE_0 person eLXWvZhL6g4_0 cat eLfUxNIWQn8_0 cat eLsJ-MoKt-c_0 motorcycle eLzEA8IlB5E_0 cat eL2OKu4DhkM_1 bear eL-v_R-bG30_0 skateboard eMJ8eEFu7lo_1 car eMJ8eEFu7lo_3 car eMN980Fn4Kc_1 horse eMQEyMimXFU_0 cat eMWM---NOF0_0 person eMcgmNHMY_g_0 person eMdVb5oIUWc_0 person eMgUOtsKC0w_0 train eMsSwXfIf7o_0 person eMv2h_s0LpQ_1 skateboard eMwSfQmonxM_0 bird eM5e2PBO5hY_0 giraffe eM-1RwyzQpI_1 truck eM-1RwyzQpI_4 truck eM-1RwyzQpI_5 truck eNDHGq_Vm3A_0 person eNEaC09BQF8_0 person eNG3je3HCHI_0 person eNG3je3HCHI_1 person eNIXfUjWW10_0 bus eNSkFxbG_L0_0 skateboard eNTeTVBDq8U_0 person eNVGmOIKNII_0 skateboard eNVGmOIKNII_2 skateboard eNYeXwUr7rY_0 skateboard eNbwp7DEy6A_0 dog eNbwp7DEy6A_1 dog eNlXrdcWYPA_0 person eNllsU_utBs_0 giraffe eN0ufEmLTDM_0 person eN3a3uFzNxw_0 person eOJorgJNcl4_1 car eOMSAOLQMc0_0 person eOMro57lp5o_0 bicycle eON5oS1ddkA_2 knife eOXMKiuur7c_0 person eOZ2mMo0l60_0 person eOe9DskHw1g_4 airplane eOe9DskHw1g_3 airplane eOhLZkf2gyQ_0 person eOj2KctQDKQ_1 bear eO0M1RCeWaA_0 dog eO9s3APOXdI_0 bear ePDBmIR0Mnk_1 bear ePEoVXrSERQ_0 person ePPnXOa8FII_0 motorcycle ePWPPUSuctk_0 horse ePWPPUSuctk_2 horse ePWPPUSuctk_3 horse ePaqZZz_gtY_1 horse ePgL4a_1DcI_0 person ePgqzaxKKo8_0 person ePhchRaBs-k_1 airplane ePhchRaBs-k_2 airplane ePjAF53eBSA_0 person ePkzyffCJhs_0 person ePli_zXbgF4_5 bear ePli_zXbgF4_1 bear ePli_zXbgF4_2 bear ePli_zXbgF4_3 bear ePli_zXbgF4_4 bear ePoC0Pj8xLA_2 person ePo6J3guHBw_0 person eQA0KwcbJlQ_0 person eQI72zFfl34_0 cow eQI72zFfl34_2 cow eQMmOyBJUaA_0 person eQOqA8LeUOU_1 truck eQOqA8LeUOU_2 truck eQOqA8LeUOU_8 truck eQS3V0HV61g_0 person eQTlUSSbOyY_0 person eQWRQaVSPT8_0 skateboard eQXSsw2MJGk_0 horse eQZEFoxVGuY_2 person eQZOAGlSYBc_0 person eQcocP3auyk_0 car eQfbBM_c96I_0 knife eQfbBM_c96I_1 knife eQi8AZ4DQO4_0 airplane eQjFi5iBL-c_0 skateboard eQl0Q82jNOY_0 cat eQmSzg2ZEpw_0 person eQmSzg2ZEpw_1 person eQoRdZR8_q8_0 person eQpbjnMSNLE_1 bus eQ1R5EruVgo_0 bird eQ1R5EruVgo_1 bird eQ2eWzgVggo_0 person eRAZ8LnDRN4_0 person eRBc8OmROx4_0 cat eRCMzS-dM8o_0 person eREzhoz4UA8_0 bicycle FZieBxFsZO4_4 bird FZieBxFsZO4_7 bird FZieBxFsZO4_8 bird FZieBxFsZO4_11 bird FZsDQUdCBiE_0 person FaINra3PYko_0 bus FaINra3PYko_2 bus FaINra3PYko_1 bus FanmFyCIvSc_1 skateboard Faxr0F1n4lk_0 person Fa8JS9CCs60_0 person FbC6M7cRN1k_0 person FbLE0CqDZ_I_0 person FbN-_RdBAoA_0 person FbRfH2tJCZg_0 train FbUasHXeVXg_1 person FbVrmfwHLD8_1 car Fba1mHso_c8_0 person Fbcl3O89qPI_0 person Fbryy4ItyRo_0 motorcycle FbsxP5HIH-w_0 person FbtbQbo3w6A_0 person FbtbQbo3w6A_1 person FbtbQbo3w6A_2 person FbzdX2M1spw_0 person Fb9GVgZUQkk_1 bird Fb-bT-5HFvo_1 person Fb-bT-5HFvo_0 person FcAKq2q6WuI_0 person FcGoc7P1MnA_0 airplane FcHZFDzsW6U_0 person FcI2xE1s0tE_0 person FcJofbjqKR0_0 person FcNTnULQ914_0 train FcPxUMks1f8_0 airplane FcQ9ypCnsnM_3 elephant FcdE5l-9Cl4_0 person Fcfkxe_EegE_3 skateboard FckxSGw75TA_8 elephant FckxSGw75TA_1 elephant FckxSGw75TA_3 elephant FckxSGw75TA_4 elephant FckxSGw75TA_6 elephant Fcmq6FVlPrs_2 horse FcyT7NFOtOU_1 truck FczLlZB8PPQ_0 horse FdBcdDQa2Yc_0 person FdG3QrZtdYo_0 person FdM1BVOZnpc_0 person FdM1BVOZnpc_1 person FdYZH48B1gQ_0 giraffe FdYpikKc6Rk_0 person FdcxQx4sFow_0 person FdgWx-kasEQ_0 car Fdgw87Au0kg_0 person Fdp1t1Kk42s_0 person FdvgBe0Ix0A_0 person FdvgBe0Ix0A_1 person FdviMb1gxkI_0 elephant FdyA9CQ40Xo_0 cat Fd1Rn6HvibQ_0 bus Fd1ZmuLPSNA_2 truck Fd1ZmuLPSNA_0 truck Fd1ZmuLPSNA_1 truck Fd1ySlMqOEk_0 horse Fd6kpMD00LI_0 person FeAmji-BcLE_0 skateboard FeHGwC6UYlQ_1 person FeHGwC6UYlQ_0 person FefqZU-M3NQ_0 dog FeioRbELmKY_0 cat Fel-MqoIa98_0 person FenJI9gPekk_0 person FevOpclGxX8_0 person Fe0XVxKTD10_0 person Fe1ne3adKqs_0 person Fe1o0fdRyjk_0 person Fe1o0fdRyjk_1 person Fe1o0fdRyjk_2 person Fe_r1BcuOm8_0 airplane FfCfKve9svg_0 elephant FfGzM6IRg6I_0 person FfTyXxo_JLY_0 horse FfWtRI5MlvQ_1 person FfWtRI5MlvQ_0 person FfddIx2fdDE_0 person FfkcxMLN90Q_1 person FfkcxMLN90Q_0 person FfpScNxcfaE_0 person FfpuED53W2w_0 person Ff3kCsp4dss_0 horse Ff37VadXulw_0 person Ff37VadXulw_1 person Ff-s3k4nzl0_0 cow FgAW1wm55t4_0 umbrella FgBAfHhZDtY_0 cow FgCkJ9L956k_2 horse FgHkoen3Fbs_0 person FgHkoen3Fbs_1 person FgHkoen3Fbs_2 person FgK205YdiNI_0 zebra FgaH6B8Im-s_1 person Fgh-oweWR10_1 truck Fgh-oweWR10_2 truck Fgh-oweWR10_5 truck FgkgjnYWuvc_0 motorcycle FglWoBFeCGs_0 boat Fgqe5FVDM7w_1 bus Fgqe5FVDM7w_3 bus FgtxhgrL-1s_0 bicycle FhAkQ-D6j7M_1 person FhNe0p3NvAk_0 person FhS2OrbfOqA_0 horse FhTIUIB4MQk_0 person Fhdb7UXlKgw_0 person FhhQQi3XBRs_0 person Fhim9zq_3dc_0 dog Fhtl-JSkWvY_1 skateboard Fh1QSbERb_I_0 person Fh1jlYGKYy8_0 cow Fh2wm1SuBlM_0 person Fh5hapK4iY0_0 horse Fh-e1BaovqE_0 person FiAj5FRP_QI_0 bear FiAj5FRP_QI_1 bear FiAj5FRP_QI_2 bear FiGZEZ8BFeg_0 person FiLeL7fMtKI_0 person FiMl9o33Uaw_0 person FiQbZpev_LA_0 person Fim4ZNdANXI_0 horse FipIgAA0lFk_0 bicycle FirrKl6H41c_0 person FirrKl6H41c_1 person FivrGIBKDvo_1 elephant Fiz1rnLi2OM_0 person Fi4kJfnwDFc_1 bicycle Fi4kJfnwDFc_0 bicycle Fi7LPQxqu14_0 person Fi9uLLmtWaQ_0 person Fi_IAiAUqaU_1 horse Fi_IAiAUqaU_0 horse FjBRf4S85bg_0 elephant FjBRf4S85bg_1 elephant FjCz86a5wp4_0 person FjF5nRRKjKc_0 person FjMslXNPmHo_0 airplane FjRDB5KtmZk_0 cow FjUvDc65QJo_0 person FjZltjNG2NU_0 skateboard FjfP5wdsmM0_0 cat Fjo3Q6r1Unc_0 cow FjsVcnD_MIg_0 motorcycle FjvoIjZBqfU_1 person FjvoIjZBqfU_0 person Fj98ZrblH1g_0 umbrella FkAQLLdAAbk_0 elephant FkAQLLdAAbk_1 elephant FkFAVoUYxPc_1 skateboard FkOkAlvY34U_0 cow FkSrQgrkwxM_0 person FkSrQgrkwxM_1 person FkZy3LGoN9I_0 dog FkkUslZGIbg_0 bear FkvcJknwKuY_0 person FkzewHxki8o_0 skateboard Fk4XzK5XI6A_0 bus Fk4XzK5XI6A_1 bus FlD1RAiVpek_0 person FlD1RAiVpek_2 person FlD1RAiVpek_1 person FlEhS-F3ygQ_0 umbrella FlGO6UYJUzE_0 horse FlNEteNmUhc_0 person FlR1fAhH2Xo_0 dog FlYY0RaMPNY_0 person FlgN1oA45yM_0 bear Fl2yqFTps4E_0 person Fl6OhW0-1w0_0 person Fl9EhNo7Keg_0 person FmDFcSMFeno_0 person FmDFcSMFeno_1 person FmDOHRJspxI_0 person FmMYoani5Vg_0 person FmOLwdbHDxQ_0 person FmOfXWRFoXQ_2 bird FmUhkvEy_7s_0 person FmUhkvEy_7s_1 person FmVDxGIS5zk_5 train FmVDxGIS5zk_7 train FmVDxGIS5zk_8 train FmVDxGIS5zk_9 train FmVDxGIS5zk_10 train FmVDxGIS5zk_1 train FmVDxGIS5zk_2 train Fmc6udEpldU_0 cat Fme4Abd5nUA_2 bird Fme4Abd5nUA_1 bird FmoAxj0I_HE_0 person FmqOvCWa7zg_0 person FmrozJZpKR8_0 train FmsAY671mqQ_7 knife FmuPNtoqS2E_0 elephant Fm1Depfmi_k_1 person Fm5EMiek6AE_0 person Fm6Hq8f2Qxk_1 airplane Fm6Hq8f2Qxk_2 airplane FnEnQ8PP_eE_0 skateboard FnEnQ8PP_eE_1 skateboard FnGScEGhwDA_0 person FnKvuj-emb4_0 person FnKvuj-emb4_1 person FnMl1BAE_jc_0 bear FnMl1BAE_jc_4 bear FnNceIdqZ3w_0 person FnNceIdqZ3w_1 person FnTofG0IZf0_0 person Fnb6xihA7ck_0 person FncXKaqIxJo_0 person FncXKaqIxJo_3 person FniMTwzxRZQ_0 person Fnv6GlZeZ98_2 airplane FnwZm6-uVkU_0 person Fn6j8CspFw4_5 horse Fn6j8CspFw4_2 horse Fn6j8CspFw4_3 horse Fn6j8CspFw4_4 horse Fn7CPx1Df1I_0 dog eRGlFEYZ74g_0 person eRQQ8fY6DVA_0 person eRToPN2xDdI_1 horse eRToPN2xDdI_2 horse eRVbBhT_bcs_0 person eRXcoQINrwY_0 cow eRa3aIGemkw_0 person eRiOVczmKs0_0 person eRk0k7ru0C0_0 person eRlVo64o3EE_0 horse eRn_VZZAhDc_0 bird eRpQzm5PYXw_0 person eRvRu0q-GoE_0 dog eR2L8Yeikhc_0 person eR2s4XgNo7o_2 dog eR6IwGLaa1M_0 bicycle eR7y-Ei3DLg_0 person eSGBtfzFobI_0 train eSId-3VXvKk_3 dog eSIwAUMyFgU_0 person eSKH9cYOKk8_0 horse eSPrJOSU8AM_0 train eSa1vsOaz1c_0 knife eSiLV8rS59E_0 person eSiLV8rS59E_1 person eSljhVPS-Ik_2 person eSljhVPS-Ik_0 person eSpAsKZSmiA_0 airplane eTDKrXMMrQ0_0 cow eTKPoRwNChU_0 person eTKPoRwNChU_1 person eTKSWSWvAyw_1 person eTNf-Cqbbro_1 person eTQF3UDg8qc_0 truck eTTKvmF97nI_0 elephant eTUWLCcJU2k_2 bus eTU8LeMW9qA_0 person eTc1z6mbb50_0 truck eTdIp3O6Gdc_0 bear eTkYJ5e2d6g_0 person eTkbZ2QtHvw_0 train eTkbZ2QtHvw_1 train eTpyN9lx8_4_0 horse eTsE0jLxU3w_0 truck eTsE0jLxU3w_2 truck eT3B8Dicg34_1 person eT5K9fPU-0g_0 person eUGoobFpS4s_0 person eUKe6XaWIfA_0 motorcycle eUQjLdCSTbY_0 person eUQ4P2JG1yg_0 bus eURPg0TbtFI_0 person eUU0KJ-w2bc_0 person eUVgOxQT_-8_0 cow eUbEHnOzRA8_0 person eUbEHnOzRA8_1 person eUbEHnOzRA8_2 person eUe_Rayk8X8_0 person eUyzGl0--ms_1 person eU6G8jITD_Y_0 airplane eVJOOrHqc34_1 skateboard eVL1UQ_nteE_0 car eVNGBAn5Oxc_0 cat eVPABDrI9js_0 bird eVYydWvg5Go_1 person eVcLRosJZew_0 person eVhB8QJJogM_1 knife eVn8akHyS64_0 airplane eVn8akHyS64_2 airplane eVn8akHyS64_3 airplane eVn8akHyS64_6 airplane eVuy4uctm28_0 person eVu1gME4-Qs_0 elephant eVu1gME4-Qs_1 elephant eVywFyCLwko_0 person eVzfhyg8qFU_0 person eV2KIbTSnH4_1 train eV4pA62ABv8_1 train eV6nRsgY8PQ_0 person eV64Qw4Zebk_0 person eV-VIypuuNY_1 bird eWHnCpVoKhw_0 truck eWbvhqFVvXk_0 boat eWlQOgHQT7g_0 airplane eWpIepmfRus_0 person eWpIepmfRus_1 person eWsle8FxRvY_0 person eWyDiulNMGo_0 motorcycle eW6l7xJBq-Q_1 boat eW6o2X8qAtQ_0 car eXDegroOl34_0 person eXECAC_iXPc_0 person eXLLe0Z-fJk_0 person eXUIt5B2NQc_0 person eXYniqUW4z8_0 bicycle eXYniqUW4z8_2 bicycle eXaCA1qL7uY_0 person eXeifN6Jv8c_0 elephant eXeifN6Jv8c_1 elephant eXeifN6Jv8c_3 elephant eXeifN6Jv8c_4 elephant eXeifN6Jv8c_7 elephant eXfkthdw2L4_0 person eXixQXmPyYw_0 elephant eXoF6xS_5u4_3 knife eXuelMqu_1M_0 knife eXveKyc2TQg_0 horse eXxAlPRFiqs_0 person eXxAlPRFiqs_1 person eXxAlPRFiqs_2 person eX3bd4kHxuc_9 airplane eYDpQFJpz7k_0 person eYJe2k1E0XQ_0 bus eYY-Mz3L_Ac_1 elephant eYeHu-IftM0_0 person eYnlQEvgHVc_0 cat eYqlHj6MSc0_7 bicycle eYyGqoW9Q3c_0 bus eYyri5GAJDE_0 person eZEN_5rnTLM_0 person eZL3Ew4O7YI_0 person eZXS_3nTpdo_1 motorcycle eZXS_3nTpdo_2 motorcycle eZZb5rnc1iA_0 bus eZf-Rsr1aNs_1 train eZgo_XfmmO0_0 person eZgo_XfmmO0_1 person eZl_FRsZx3o_0 person eZym_LkJnpY_1 knife eZ2Y_Qtg0VU_0 horse eZ2Y_Qtg0VU_1 horse eZ4N2Y737ss_0 person eZ_peGgPSDE_0 person eaHXGY8ImzY_0 person eaOqHSeEVG0_0 elephant eaR-dFaZRGc_2 giraffe eaTX3J2X23g_1 person eaTX3J2X23g_0 person eaalMrdHsQ0_0 horse earUgdES0lk_0 person eaxPmkwGK5U_0 bird ea1EeKBBjxk_0 umbrella ea1YcZPjbxU_2 truck ea4saeRZ0_M_0 person ea8mbQn2kv0_2 dog ea8mbQn2kv0_1 dog ebFgEyNciRc_0 cow ebMZJ-lUhbw_2 bicycle ebMZJ-lUhbw_3 bicycle ebMZJ-lUhbw_1 bicycle ebOubiwIUC0_0 person ebV9mcxICDs_0 dog ebY5nNOPdN0_0 person ebY52fJyTPs_1 person ebY52fJyTPs_0 person ebagV2pOV20_0 boat ebhnTUXh7Pc_0 cat ebh7xOXlO7Y_1 person eboXP28MlOE_0 airplane ebt0_AWnuyM_3 bear ebyMEAOqPhQ_0 skateboard ebz4umtEYag_1 motorcycle eb0UO8Y5r5A_0 car eb1-qD5D7Us_0 person eb5d4XIDSqs_0 car ecDEmZdWz8Q_0 person ecGOS5ZO0Tw_0 skateboard ecGOS5ZO0Tw_1 skateboard ecJIf9dcDHk_0 person ecKMZLATsNg_0 person ecKst7suEZo_1 motorcycle ecPynengjhg_0 person ecUmR_974l4_0 bear eccbjuLjCr0_0 bicycle ecex13DrS00_1 bus ecgqb4spDo0_0 cow eclnV3fwFVg_0 car ecndV9N-b9M_0 boat ecrgwn6gB7c_2 person ecrgwn6gB7c_0 person ecrgwn6gB7c_1 person ec0L5W9HzYQ_0 person ec0zPF4t8jM_0 person ec10-YUa1PE_0 person ec4Mjwm2hyQ_0 person ec4ya7ogbFU_0 person ec59VG2krTI_0 knife ec7hzm4ZgOM_0 bus ec8daVdUMW8_0 elephant edErePLiFl4_0 truck edFb7FxjVPc_1 person edOvHaEGfM0_0 train edO7Q7znUJA_0 cat edPmPMqUt4c_1 person edPmPMqUt4c_2 person edS79MnRXwE_1 person edYcGdD4UGI_0 person edd8R4oDMdg_0 person edlAlkitTfg_0 bird edlAlkitTfg_1 bird edq1Zw1FWGY_0 person edrtSs6UdCI_0 boat edtqJ_N0258_0 person ed0O35MjM6Q_0 cow ed5jfyH6JyI_0 person eeEjRmROBZs_0 train eeEjRmROBZs_1 train eeJDVUC0bio_0 bird eeV0a3p0uz8_1 dog eeYr-ujfh4Y_1 person eeYtwUSuQzY_1 airplane eeYtwUSuQzY_2 airplane eeYtwUSuQzY_0 airplane eeZyIsjtgj0_0 train eeahFaPbx5M_0 skateboard eea6uRdJLL4_1 bird eee-1I8uLeU_0 cow eefTfPIGkq4_1 person eef-qkyU0jY_0 boat eepn_UxMI5o_0 skateboard FoFA-VOPhV8_0 zebra FoIc9MjzbBk_0 person FoSynLz7aJ8_0 horse FoSynLz7aJ8_1 horse FoUqmWxXlNU_0 person FoUqmWxXlNU_1 person FobAHnW_q6s_0 person Fog-McdMlO0_0 person FomH9b8uRKs_2 knife Fot4m5WU4Aw_1 person Fot4m5WU4Aw_0 person FouVJvkYyPs_0 person FpCdNHknwMQ_3 car FpCdNHknwMQ_5 car FpEzn8x46OE_0 bird FpGO4RTCIuk_6 bicycle FpGO4RTCIuk_0 bicycle FpGO4RTCIuk_2 bicycle FpGyjKY-NIk_0 motorcycle FpGzMvzCvKo_0 person FpI0Do5LaU8_0 person FpTdRnuOS8M_0 person FpTdRnuOS8M_1 person Fpaob2f1sqE_1 person Fpaob2f1sqE_0 person Fpev0w7vGO4_0 person FprxIVYXUL4_0 horse FpzpuYeDf6M_0 bus Fp1vbL5guA0_0 person Fp2HgWZlr2k_0 person Fp7RJqXwz6c_0 person Fp-TG2XDrC4_4 car Fp-TG2XDrC4_0 car Fp-TG2XDrC4_1 car Fp-TG2XDrC4_2 car Fp-TG2XDrC4_3 car Fp_5yBxyvR4_0 umbrella FqFhpogmR2s_0 cat FqHStgmNnKA_0 bicycle FqHStgmNnKA_1 bicycle FqTHQ5KBbaY_0 elephant FqjhuAhttZw_2 train FqjhuAhttZw_1 train FquAMi_ikSA_0 truck FqxWiT-6dLM_0 person FqxZmvVkHIA_0 giraffe FqxZmvVkHIA_2 giraffe Fqx-wOpqzZo_0 airplane FqzYUW3X9pc_0 person FqzYUW3X9pc_1 person Fq_esHSu_sk_0 person FrC2HuRBsYA_0 person FrC-Gp1GmVw_0 cow FrC-Gp1GmVw_1 cow FrIO6gNGeao_0 person FrUCytgm6sM_2 horse FrViqM6fVR0_0 dog FrVxG6x7tj0_0 knife FrVxG6x7tj0_2 knife FrgvokGeeds_0 person Frk0tcM1o_w_0 person Frm5N8YRz_E_0 person FrpsbU7nO00_0 person FrxIGKawDiA_0 person FrzgyfVukw4_0 person Frz8huGrR4M_2 motorcycle Fr0K__Q_Kv4_1 bird Fr2qdnHURF4_0 boat FsHjWJUILr4_0 person FsScYp1HNk0_0 person FsXYM3nf7O4_0 horse FsZyoaRLGfw_1 person FskWl7cTGUU_4 motorcycle FslFjbzL4rY_0 train FsuA_2-7e1w_0 elephant FsuA_2-7e1w_1 elephant FsvwyL1hLDU_0 bus FswGt3qhUXE_1 horse Fs6Lk0xDsWk_0 dog Fs6Vua80iU4_1 bus Fs-DmOC6Ksw_0 person FtAgz58w2vs_0 person FtC0Y3Dca60_0 dog FtD8uBgTi3E_0 cat FtJ8y0gIpKg_0 dog FtJ8y0gIpKg_3 dog FtJ8y0gIpKg_5 dog FtJ8y0gIpKg_1 dog FtJ8y0gIpKg_4 dog FtMshKheG8Q_0 elephant Ftet3EW_gR0_0 skateboard Ftet3EW_gR0_1 skateboard Ftj_1qTEwE8_0 cow FtqLCjhRQgQ_1 person FtqLCjhRQgQ_0 person FtwMaVMlLbM_0 person FtwZasadNWo_1 person FtwZasadNWo_0 person Ft3Xr78g1jg_0 dog Ft4RUB75d64_0 horse Ft5ZV3L5LV4_0 person Ft8VPp_VNJs_0 knife FuCuNV5vL-8_0 person FuIIvsD7qyY_1 person FuMf00RPDmg_0 bear FuNvDTe7cAM_0 bird FuR3p7f2R30_0 person FuTf8iiIHWI_0 motorcycle FuVQuZfX71w_1 elephant Fuc49AUfyaA_1 train FufG8eRehvk_0 person FuoKMOMcl0I_0 bus Fu3A7S4V26Q_0 person Fu4p4U9AqY4_0 train Fu5TDXXdHyc_3 train Fu5TDXXdHyc_0 train Fu5TDXXdHyc_1 train Fu5TDXXdHyc_2 train FvB0FA24g0c_0 motorcycle FvD-5pXN6B4_0 person FvF8CGSAVBw_0 horse FvIqBpjD4A4_0 person FvKJQTsxS6o_0 bus FvNiWF5wWJA_0 truck FvN6HD0c3I8_1 bicycle FvQ8wYSFAhA_0 bird FvZ_lMA5MYE_0 person FvZ_lMA5MYE_2 person FvZ_lMA5MYE_1 person FvcxD9PJ1-g_0 bear FvcxD9PJ1-g_1 bear FviKCn2JGbY_0 person FvksDxENves_0 bird FvksDxENves_1 bird FvmW4A9wN1c_0 person FvslrkU6Ii8_1 skateboard FvslrkU6Ii8_5 skateboard FvslrkU6Ii8_4 skateboard FvuJoToFsZ0_0 skateboard Fv2LjW2C5SU_0 knife Fv2LjW2C5SU_2 knife Fv2SAN8CNlg_0 horse Fv6OQz_y5V0_0 person Fv80QjBLyXw_3 train Fv80QjBLyXw_4 train FwBCZ90I_aw_0 cat FwIN5LlmnSA_0 person FwMy9UR3xJA_0 person FwMy9UR3xJA_1 person FwNHDlUxkVE_0 person FwSQA6A_bWE_0 person FwZzzptQg0s_0 person FwZzzptQg0s_1 person Fwf5SGfOguQ_0 bird Fwf_1L-RQB4_0 cow FwhmGtqpt5s_1 skateboard FwrkNuHACuE_0 person Fwtyj6Ut62E_2 dog Fw8NHywJSJw_7 airplane Fw8NHywJSJw_8 airplane FxHZCFGlLk8_0 truck FxI0-u_zPQQ_1 skateboard FxI0-u_zPQQ_0 skateboard FxJg66y6Vj4_0 person FxJ0douRc4s_0 person FxMnA-aNvVI_0 knife FxXVgnAjOCs_0 person FxXVgnAjOCs_1 person FxitbyLzBbw_0 person FxmfshFrhyg_0 person FxmfshFrhyg_1 person Fxp_EDLEylo_1 bear FxxuVRsJiCQ_7 bird FxxuVRsJiCQ_9 bird FxxuVRsJiCQ_11 bird Fx74SXbZiUI_0 boat Fx-8EgSEaDg_0 person FyFea2NifCo_0 elephant FyKB3iEKNlg_0 person FyO1UliwWNQ_0 skateboard FyQulDaVp8I_0 person FyTFrxalrzY_2 bicycle Fyb5_PxuzrI_1 airplane FyjgIZnRT0A_0 person FylDI9Ssx18_0 dog FyqooE73pSs_0 train FyuLo6pvAxk_0 person FyuLo6pvAxk_1 person Fy6UODQTxBw_0 dog Fy8cULzM424_0 person FzJOOqEWb48_0 person FzP8vDH_ynM_0 bicycle FzV_56qru4c_1 person FzV_56qru4c_0 person FzaaAJ_dGjI_1 dog Fzc4L1eWvQ0_0 knife FzeiG746wec_0 person FzoJlCfL5bc_0 person FzpV3zrU7w0_0 cat FzufL9SIDZ4_2 person FzvLoCiUbCU_0 person Fz4RMW4ONrQ_0 skateboard F0Ekv-HAlnk_0 airplane F0G64yaBMBM_0 person F0G64yaBMBM_1 person F0I59IAm-vo_0 person F0Qk5fG3X-M_0 person F0Q9zBIa4vg_0 knife F0Q9zBIa4vg_1 knife F0Q_-7qxWws_3 elephant F0UBtRxGNhA_5 bird F0XjqeFLlgU_1 bird F0ZAshDVPxg_0 person F0c4qnJQtDU_0 bear F0gFV3Zl1ew_0 train F0gFV3Zl1ew_2 train F0hx5kgZ3go_0 elephant F0mBUyvb90Y_0 person F0qXU9y4p-Q_0 person F0z1cmfnPsQ_1 bicycle F1B_Y1twDK0_0 cow F1CZ2DPXJ9M_4 bicycle F1CZ2DPXJ9M_1 bicycle F1KHVI6XeVo_0 person F1eNAhwM5Pc_0 horse F1jGg9828BI_0 person F1j27LEBSpI_1 car F1qXLHQywDc_1 elephant F1sQlUVWZLM_0 person F15XLgp6ED4_0 skateboard F2Bb2pFQRyU_1 person F2EV6W4vdT8_0 bus F2GhztG-3ZM_0 cat F2HupbPd4Rc_0 person F2JDbaIJXuM_0 person F2JeBrL43Kg_0 person F2JnnpLll3c_1 horse F2Kd_wTgfHc_0 bird F2N-fmDDyCs_0 train F2an_w-D4WM_0 dog F2bbT3y10lk_0 person F2dx02YK1MY_0 cat F2kBHcrY7Ck_0 person F2nvlBMOvGc_0 boat F2nvlBMOvGc_2 boat F2nvlBMOvGc_3 boat F2nvlBMOvGc_4 boat F2yvXHbr1Us_6 bird F2yvXHbr1Us_7 bird F20W1m4x2Ys_0 person F20_Ihwr_1Y_0 elephant F21R2kQ-je4_0 person F2244CO9Fuo_0 airplane F250PqK5Gb4_1 airplane F3AMItpIJlI_0 dog F3AMItpIJlI_4 dog F3FUBdTgY7c_0 car F3FUBdTgY7c_1 car F3Lz3rnQ-7A_0 person F3Lz3rnQ-7A_1 person F3NneLgyZiU_0 person F3RkQzIQjeU_2 bicycle F3XFJeSjPDU_0 bird F3XFJeSjPDU_3 bird F3gY7oCc-j8_0 cat F3j318NP2P0_0 person F3j318NP2P0_1 person F3oP1Se_HdQ_0 motorcycle F35JtGCIiCo_0 dog F377W3trtdg_2 dog F4DJmxH-fuw_0 skateboard F4FXVb3DdJE_0 person F4FXVb3DdJE_1 person F4HgVMHEiVQ_1 bird F4Ja9TDp5eg_0 person F4R1rt0I4Ik_1 person F4R1rt0I4Ik_0 person F4WWEXEO6Cw_0 airplane F4hUo05eI2s_0 person F4hVb1AsJ9M_0 umbrella F4hVb1AsJ9M_1 umbrella F4hp-2UBFcI_0 person F4l8U4NGPMU_1 elephant F4rQJlBkGa8_0 person F4tzOjT91r0_2 elephant F41NWCYabpM_0 person F44j0JHVdfU_2 bicycle F44z7XXoIZk_0 cow F4-R6x6hSno_0 airplane F4-R6x6hSno_3 airplane F5IEcbmSBiU_0 person F5UiBt9FiQ4_1 truck F5brWxznDYA_0 bicycle F5drV0qDFvU_0 person F5pSgana5Ds_0 person F5pwABHMaZM_1 skateboard F5y_lQCCiYk_0 person F51aHL_AuQ8_2 person F51aHL_AuQ8_0 person F51aHL_AuQ8_1 person F54NzXjey4Q_1 person F6AkwJu9acQ_0 person F6BUhbvKAY0_3 bear F6I3hGIdHBM_0 airplane F6L1DckOdFs_0 person F6L1DckOdFs_1 person F6L1DckOdFs_2 person F6UTU1zVfY0_0 person F6X-PDReV8U_0 skateboard F6uVxnnSkQg_0 cat F63FWqs6n6A_1 person F63OB46zw20_0 person F66U-dCKTVs_5 elephant F67kQb83GEo_0 person F7Aw74QT7I8_0 motorcycle F7D1ccHfWQM_0 train F7GYFMuRxr8_0 person F7MruF3gqRk_0 person F7MruF3gqRk_1 person F7M2n9Irv10_0 person F7adrDrejOI_0 bicycle F7adrDrejOI_3 bicycle F7adrDrejOI_7 bicycle F7iFGXShjIg_1 knife F7lmwAhsTVE_1 cat F7lmwAhsTVE_0 cat F7wyUoc1ELM_1 person F7wyUoc1ELM_0 person F72e40LPG8g_2 airplane F72e40LPG8g_3 airplane F72yH9hRoS0_0 person F77I6mkMOmM_0 person F77I6mkMOmM_1 person F77WzfDD-Ac_0 person F77WzfDD-Ac_1 person F8VZcw3-DMg_0 person F8XbiaxQYFA_0 car F8kTGPYH29o_0 airplane F8sVrU5FfZw_0 person F8vyo42LQM0_0 airplane F9KIXBo3lNI_0 bird F9KIXBo3lNI_1 bird F9WnfUhb8A4_0 boat F9hhOJk3fdY_0 person F9jiY40SX4g_0 person F9kDOaogdPA_0 train F9kDOaogdPA_1 train F9nirQJj4wc_0 motorcycle F9qYvrO4nMM_1 person F9qYvrO4nMM_0 person F942FTRne2Q_0 person F95fIsG0A7U_0 horse F98XVAomn1s_0 person F-AROt5V1zQ_0 airplane F-L2byRMMEI_0 truck F-QpXlvCAdw_0 giraffe F-RVugkjZ1k_0 person F-RVugkjZ1k_1 person F-dxzMmjOT0_0 person F-dxzMmjOT0_3 person F-dxzMmjOT0_1 person F-poowwxrxU_0 person F-3G1FhnsdY_2 cow F-3G1FhnsdY_3 cow F-7EAK7rTI8_0 bird F_AoZsBu8j8_0 person F_AoZsBu8j8_1 person F_BBB0J-9tQ_0 motorcycle F_CsG_jIxC8_1 truck F_I4rwh1mtE_0 person F_JJmqKJBnY_0 person F_Kw8qyfgjU_0 person F_WtOi2ZeSE_0 umbrella F_oxJfyCUrw_0 person F_wVAS7hR9E_0 cat F_5NdFCcCrQ_0 airplane F_59LD9YnAU_2 person F_8qVC7MHM0_0 person GABXImD8qwM_3 dog GADBGhd7Hbc_0 horse GAF3BbJqKos_0 person GAF3BbJqKos_1 person GAGFuwQyn2A_1 person GAVdXzEftIU_1 person GAaPJd_iVeU_0 train GAb6ZqG64o4_0 person GAb9NG_JnoU_0 cow GAe7SnwoPQQ_1 airplane GAg-aVsz7AI_1 person GAinaDnPPO0_0 elephant GAnYrNhN90c_1 person GAoDRtFNSeQ_0 bird GAoaBt8kfHQ_0 train GApyoyRTlPk_0 person GArUrBTpgzk_4 airplane GArUrBTpgzk_1 airplane GArUrBTpgzk_3 airplane GAzsUwyCRAI_0 cow GBF7wVda328_0 dog GBLwQswYGpQ_0 dog GBUiAfFHr8o_0 person GBYAc4swbr8_0 person GBYFzcFWKtI_0 skateboard GBYeOSgHxaw_1 person GBhV-vm_cDs_0 motorcycle GBhV-vm_cDs_1 motorcycle GBhV-vm_cDs_2 motorcycle GBhV-vm_cDs_3 motorcycle GBjWoHEvi24_0 truck GBnf-AAsQts_0 person GBvWcmiB_zQ_0 person GBv60Rpf6hA_0 person GBwqR6gIUJk_0 person GBwqR6gIUJk_1 person GB0RUQ72TDU_1 motorcycle GB0RUQ72TDU_2 motorcycle GB0RUQ72TDU_4 motorcycle GB1A1gXLxF8_1 umbrella GB1A1gXLxF8_0 umbrella GB2Z9Zd9kCM_0 cow GB3M7jlJvZo_0 umbrella GB3dD_Sz5yA_0 cow GCECUCM275I_0 truck GCECUCM275I_3 truck GCECUCM275I_4 truck GCECUCM275I_1 truck GCECUCM275I_2 truck GCHyhn505e4_0 person GCL5aSCyDAQ_1 horse GCR8piyI8to_0 person GCdYlCKelqg_2 bird GCf79ImcoV4_0 truck GCiR2DBKEUo_3 umbrella GCiR2DBKEUo_0 umbrella GCyZCLCX4jI_1 bus GC5X3-Zi5fo_0 bear GC_4PRhWwy0_1 person GDBvvswiioY_0 horse GDErDO6sQxg_0 person GDHukw9i8AE_0 bear GDPBufHJ6pE_0 person GDVxjq335kg_0 person GDVxjq335kg_1 person GDW_ebhUmXg_0 person GDeoeNk-jj8_1 train GDgRHR5rt5g_0 dog GDhVskUd-i0_0 truck GDkTfXax1EI_1 person GDr1CfMsWCo_0 knife GDyR3j6e9uU_0 bear GD0qZhFYMtE_1 bear GD5H2vUIQUM_0 bird GD7nVz18opA_0 cow GEC16HE9LPs_0 skateboard GEK0W7Soe5I_0 person GEOILdSs_m4_0 person GEXtPkuLXV4_0 person GElPgxFGsYM_0 person GEmM96O2bm0_0 person GEoAqEILC5I_0 bicycle ee4MHg5K9xo_0 person ee4MHg5K9xo_1 person efANTTg0s7E_0 person efD7irKhsjg_1 zebra efFDVTrJnI0_0 person efQ-zUFNN-U_2 airplane efQ-zUFNN-U_3 airplane efQ-zUFNN-U_0 airplane efQ-zUFNN-U_1 airplane efUVmXxR3pI_0 person efXikRhGmrs_0 person efdHHLZ3g1Q_0 motorcycle effHbT0DhsY_1 horse effHbT0DhsY_2 horse effHbT0DhsY_3 horse efj0ZypW97U_0 person efl9qpSfN9o_0 skateboard efo_cgnnucQ_4 knife efqCl5PWA5Y_2 bear ef6fQWU1KdY_0 person ef9zPCUJ5uQ_0 boat egByT16s_54_0 person egByT16s_54_1 person egHnmalt3d8_0 horse egQiifLgKHE_0 person egVsaW3pIR8_0 bus egotrU2sxIs_1 cow egotrU2sxIs_0 cow egymuz3YUjw_0 person eg0xHA2KO2M_0 car eg0xHA2KO2M_1 car ehAg6V-5Puk_0 airplane ehB-VoBE8As_0 person ehFoBFIrRho_0 person ehFvz7g6tcc_1 person ehFvz7g6tcc_0 person ehF--LpGjPU_0 person ehI3hX4P2gg_0 bus ehSU0TuduDM_9 boat ehSU0TuduDM_0 boat ehSU0TuduDM_3 boat ehSU0TuduDM_7 boat ehSU0TuduDM_8 boat ehTOHuz8De4_0 horse ehhoOXi21uc_0 person ehhzn87_kyY_0 knife ehpsJCYWhMo_0 dog eh0-hoyeQv4_0 person eh383O3j2o8_0 train eh8ClQx55Pk_0 elephant eh8ClQx55Pk_3 elephant eh8ClQx55Pk_1 elephant eh-Hpgj7SPM_0 bird eiIxHOvvvog_0 person eiKfZPTeN-M_0 person eiMVAVfFk50_1 giraffe eiNlPbSqaQM_2 bear eiOC7H2_I7E_0 motorcycle eiYV7UFe9_4_0 person eiZm5CglnLc_0 person eiirsESzuHs_0 bicycle eim8NPBqZXg_1 person eis2vlxPtf4_1 person eivFKGFBySc_0 person eivMnaQyUKU_0 person ei0PFx0qNIQ_1 person ei0PFx0qNIQ_0 person ei4Yn0KXnAM_0 person ejDpzIUHAMk_0 person ejD4KjqrkFo_0 cat ejIMw0_a1Zo_0 person ejIMw0_a1Zo_1 person ejVKT8cDDTY_2 motorcycle ejoDQZqi4DU_0 person ejsflVtvinE_0 dog ejzqfqBU2XY_2 horse ejzqfqBU2XY_0 horse ejzqfqBU2XY_1 horse ej5D22-gpzY_0 person ekBhYo1n09M_0 person ekGn7Al_5S0_0 person ekOQkNLi9gA_0 person ekPQmhXqsJs_0 cow ekQPPxQDQrA_0 bird ekYErFjRBcY_0 person ekaQzIhIz6U_0 person ekhId7QWajE_0 person ekw22HGT0TY_0 person ek6F1Yy6r4g_1 person ek6F1Yy6r4g_0 person ek9m3wFRD78_0 motorcycle elAJmgZ3uV8_1 person elIopJ6sLS8_0 motorcycle elS7CV83kDQ_0 cat elbH9USSXbU_1 person ele_x5If5RM_0 cat elfDIDNaxO8_0 bicycle elfDIDNaxO8_1 bicycle elk9Eg_zAzA_0 horse elwOqTHVPb4_0 car el_1tnvsCAY_0 elephant emAlGe0D2Ro_0 car emBk5WfF9MA_0 person emFvwwYH0Dk_0 person emLp02HobE4_0 person emO2DsNKmTw_0 train emVjapACNME_0 person emWHcaPL5H0_0 person emXkTzHEyT4_0 boat emhCPyXIbNk_0 person emqrQO4JZsU_1 skateboard emxIavKneZw_0 person emzfRpng4hM_0 bicycle em3XyVBpKCc_0 train enA3HVeW4MM_1 person enCpXewY40c_0 truck enCpXewY40c_1 truck enR0OQhVBwE_0 person enWAeU6n9LQ_0 person enXS9AGUoow_0 motorcycle enY96p1ZALE_0 knife enfPrTim6AU_0 cow engcDIwacLg_1 person engcDIwacLg_0 person en06DIx0cz0_1 person en06DIx0cz0_0 person en6AOaqCY1s_0 truck en9gUgAJoek_0 person eoFFf1yMhOg_0 person eoauVNDdle8_0 person eodvToXk2OQ_1 cow eodvToXk2OQ_0 cow eohpHQHPoXo_0 dog eovUEztTVZ4_0 person eoyj6UfwM1c_0 airplane epIcFi7yUZg_3 cow epK_YUgNzQI_0 cat epUTWEmTW1o_0 bus epXYWAgJeJM_0 person epZSAxAzWRs_0 person epeLK68bI3k_0 person epeLK68bI3k_1 person eph8ACa_bv4_0 person eph8ACa_bv4_2 person epis0oQPudE_1 person epu8oDLyhBw_0 cow epxbwMupoU0_0 truck epxxfkiUpVQ_0 person ep15pnX1AxU_0 truck ep4od2aZYv8_0 dog eqAMk_GzwUg_0 truck eqMRouLMQI0_0 person eqPXFnE2SxE_0 person eqTdm4-YomY_0 train eqWb0eTMl98_0 cow eqiPG6XAei8_1 person eqiVR6aa8XA_0 person eqnF1_Lwa94_0 motorcycle eqswu7XtVeE_0 boat eqswu7XtVeE_1 boat eqvu61eQ-D0_0 person eqwZeHPEjT0_0 bus eq2VUeTEEGM_0 elephant eq2VUeTEEGM_1 elephant eq2-yJIiWyA_0 skateboard eq7fzAhOZEo_0 person eq8-99wqpC4_0 motorcycle eq-XVpUOFlQ_0 cow erDb15O0GYM_0 person erIMuEor6gc_0 person erJzcEpQ-sA_0 person erKEWcCPgjU_0 person erKRZXMcCzQ_0 bus erLW6pBgIrE_0 person erLW6pBgIrE_1 person erWerfoGejo_1 dog erZ0-WmkPj8_0 person erfJrdfPp8M_0 truck eri-jOmjJ5U_0 person erprzr0GCa0_0 person errX-c_luf8_0 horse erwHbfRwbDc_0 train eryYeuoNAdw_0 person esEKixC0bi0_0 motorcycle esFUx8MS7FU_0 person esFUx8MS7FU_1 person esHEHZv3XAw_0 person esdMTvdz7G8_0 person esd9prHEDmY_0 cat esnr6cTpfQI_0 skateboard esnr6cTpfQI_1 skateboard esrkVh27SSg_0 giraffe esr3dKZtZ9I_1 person estRADheTso_0 person esxEV1BYf8g_0 dog es0lurDiGrM_0 truck etCrz_vcvJI_0 zebra etFtHhL2hac_6 bicycle etHjccaFHjw_0 person etZXvy6wqZM_0 cat etZjkcz1NXE_1 person etfOefeQ0NA_2 knife etgjVXNON5k_0 person ethiyhktDW0_0 train etrQY3yeg8M_1 person etrQY3yeg8M_0 person etu6chaT_o0_0 motorcycle etu6chaT_o0_1 motorcycle etu6chaT_o0_2 motorcycle euNO4mGjpL4_0 person euS2rEsG-jA_0 person euaiFpmh6SU_1 person GEuy-JvOFBM_0 horse GEwLV10zHSM_0 person GEwYE_QVNHE_0 boat GE061if8j60_0 horse GE8D0jEjasg_1 bird GFCN_4akSi4_0 person GFMwf7Ly_Sc_2 person GFMwf7Ly_Sc_0 person GFN08ryY-U0_2 knife GFTwQgse_Lk_4 knife GFXh14V5BN0_0 cow GFkCQFowcfs_0 person GFkCQFowcfs_1 person GFlTNatYs1E_2 horse GFlTNatYs1E_0 horse GFmBVLxS0W4_0 person GFsVA4Rxqv0_0 cow GFtZEmPze30_0 person GFytNaOS7eE_0 boat GF28RuK9Mio_0 person GF28RuK9Mio_1 person GF29WU5hVFU_1 umbrella GF29WU5hVFU_2 umbrella GF4b86WLzWE_0 person GF-zdmzb4zY_0 bus GGBhXIkXN-U_0 dog GGCSOyr8iNg_0 cat GGNkUcwxgU0_1 airplane GGX2r0RT9h4_0 bird GGY5BDDn5LE_0 person GGtf7t-SVb0_0 person GGytoCC23B4_0 dog GG2kiaUm9pg_0 person GG_CxOFs69U_0 bicycle GHAR-041e4w_0 person GHF_00q4fw0_0 person GHN9eBe1Bp8_0 knife GHWPuquucrM_0 cow GHZjWHKMwyw_1 truck GHqedSEAQ9k_1 person GHqmzbJnjVg_0 person GHu-Q-Jbh6E_0 umbrella GH_-l0dCs1A_0 truck GINmKyxk55E_0 person GIOByl4-GaE_0 person GIQcZHeI0rA_1 knife GIRWosek2kk_0 person GIesL1NmKrU_0 airplane GIiKoRSDN-Q_0 skateboard GIiKoRSDN-Q_1 skateboard GItE5rGj_-g_0 person GI0iwCtSgJY_0 person GI7YeWGyVRM_0 horse GJAe8ctAWb0_0 person GJHbNDEY178_1 person GJHbNDEY178_0 person GJIPOsnsWAg_0 person GJIPOsnsWAg_1 person GJL8p4_PeKo_0 person GJMk0Meedm0_0 person GJbtzWK_dYk_0 person GJpkQJ1A6Gw_1 cow GJy5Zhvk6lE_0 person GJ1O_aGTN94_0 motorcycle GJ4kWS7SklQ_0 person GJ7mp6eUiPg_0 car GJ9641JuJGs_1 person GJ9641JuJGs_0 person GKCr5DPt-O4_0 car GKC9zObtOMM_0 person GKEhy910De4_0 train GKWJ0lgaDCg_0 umbrella GKWJ0lgaDCg_2 umbrella GKewJtAM0mQ_1 person GKewJtAM0mQ_0 person GKhEkZ-cdNQ_0 train GKlP0uncbyg_0 person GKlP0uncbyg_1 person GKlP0uncbyg_2 person GKlP0uncbyg_4 person GKmEvD6kEV0_0 bicycle GKn-IcumftE_0 person GKpcLh6EzTI_0 truck GKs6SswOMow_0 skateboard GKyR_cV3NzE_0 bird GK1HKUicpqc_0 person GK7khWET2AA_0 person GLBHzmRhRXw_0 person GLCLinUtVWM_0 person GLJJdMPYSaY_0 person GLLgtpj5VIc_2 elephant GLLkz3ew2Cw_0 person GLN48vyNNE8_0 person GLOfyCC7cpg_1 person GLOfyCC7cpg_0 person GLTbuhg3c9c_0 cow GLTcmtEP3PQ_6 person GLTcmtEP3PQ_0 person GLTcmtEP3PQ_1 person GLTcmtEP3PQ_2 person GLTcmtEP3PQ_4 person GLT0qdbJFmE_0 person GLYc7lsUKvQ_0 cow GLemLQ7Taz4_0 dog GLiiNf5XBGw_1 person GLnBX7vZMds_0 car GLncyVpSovs_0 person GLonpYW6Yi8_0 person GLsxpYW-07A_0 person GLy3RuBdLZ4_0 giraffe GL2K160VZnM_0 airplane GL5i6mrfwJQ_0 person GL6eTReYh8E_0 giraffe GL7g579uon4_0 bus GL_EwiiBm1A_1 person GL_EwiiBm1A_0 person GMCQFxoF1UE_0 bear GMJi6djWGYg_0 elephant GMLP7F_Da2w_0 person GMVqWicQ2d4_0 motorcycle GMeN9Z1A9X4_0 car GMj9b1A2R98_0 bus GM3BiiUS2Xw_0 cat GM31sVP8NMA_0 elephant GNJ088XwXpI_2 skateboard GNLzZ4OPnHc_0 boat GNLzZ4OPnHc_1 boat GNN-BevC79g_0 knife GNRZ4AjoiSE_0 airplane GNawMpiTEFs_0 person GNnrNuC9zGU_2 person GNnrNuC9zGU_1 person GNqCvE7d9mE_0 person GNr1nF-F-40_2 boat GNvEs3KBgRw_0 person GN97F0ERx8k_1 person GN97F0ERx8k_0 person GOE3QOj97xk_0 person GOLZ7CWDXjk_1 person GON778LYTqk_0 person GOQICMUoGL8_2 person GOWRiwkZo2U_0 person GOW84-_w-LQ_0 bicycle GOZwEuPDmzc_0 person GOb0e4ojb3c_0 airplane GOkeNGfFi8Q_0 person GOkeNGfFi8Q_1 person GOpAs6aca30_1 person GOpAs6aca30_2 person GOrO-A4yd5c_0 person GO0RyAWdVQA_0 person GO1tmJmOjZU_0 cow GO9YRVC_2SA_3 elephant GO9YRVC_2SA_4 elephant GO98cqZbP2o_0 car GO98cqZbP2o_1 car GPABD8HFpQU_0 skateboard GPCArlk4udc_0 bird GPHwY1J1u04_1 cat GPLKI0foxxc_0 person GPUUqd1IyNA_0 dog GPUdCDtaGOQ_2 boat GPViSMkz1ds_1 horse GPViSMkz1ds_0 horse GPZznxc87vA_0 cow GPlHiCxNeIU_0 person GPnO7jt_-JI_0 person GPn2JSguaBI_4 umbrella GPn2JSguaBI_0 umbrella GPn2JSguaBI_1 umbrella GPtN0Kb9qZs_0 train GPzwYc908OM_0 bicycle GP2YaQXsf0s_0 umbrella GQJu2FlmC0A_0 knife GQRDl6gw-n8_2 bear GQRDl6gw-n8_3 bear GQV1QfplpXU_0 person GQ6mrqpELDs_0 person GQ99sfZjwTo_0 person GRMv9irLuQw_0 motorcycle GRQUwn0jA8Q_0 person GRRXv9O7hNk_0 motorcycle GRRullNXQUY_3 skateboard GRTcBPmHWPU_0 motorcycle GRjf8G-WDvc_0 person GRk94EZiwO8_0 skateboard GRo9Bmi4ghA_0 cat GRwCcOF0NyI_0 train GRwCcOF0NyI_3 train GRwCcOF0NyI_1 train GRwCcOF0NyI_2 train GRwvd8Xl-l0_0 bird GR5qTAjCnB4_0 cow GSD3hdUWKNg_0 person GSD_Asi3tsA_0 elephant GSD_Asi3tsA_6 elephant GSIFRlloCGA_0 cow GSMYNBUuI74_1 motorcycle GSb8ilGRCd8_0 umbrella GSkpDZZFQd4_0 boat GSmR-G7zCN0_0 airplane GSqatXKKzUU_1 boat GS1El_XLryU_3 bird GTaW87cQCZk_0 bird GTegSO4BiDY_0 person GTgztSxvdzw_0 horse GTg35QGB0bQ_1 person GTg35QGB0bQ_0 person GTjqtTiUFFA_0 person GTkZ7eZIV5I_0 skateboard GTpF9CW8Kyo_2 cow GTpF9CW8Kyo_3 cow GTpF9CW8Kyo_0 cow GTpF9CW8Kyo_1 cow GTt9sqczKqg_0 person GTuP3gwjf70_0 person GT4askC-EmE_0 skateboard GT4askC-EmE_2 skateboard GT6Ta63CfGc_0 bus GT7pB1SoSWQ_0 horse GUA64cJx_1s_0 person GUG7toTLyt4_0 bear GURTVjQ25hM_0 airplane eufhHTT-6cc_0 person eujtr13Kbtg_0 cow eutsycO_2Zw_0 umbrella eu0WWqOzPNI_1 boat eu07YiPAVxk_0 truck eu6zY6HpY1M_0 person evA7SzcjAkU_2 knife evA7SzcjAkU_3 knife evA7SzcjAkU_0 knife evDr0RJRRV8_0 horse evMMyqn2S94_0 person evRaMSC7xlI_0 train evVOgDU7DsE_6 truck evcE8ru07G8_0 umbrella evcWn6cN50A_0 umbrella evhP2M5P0rM_1 person evksM4sehcQ_0 cat evtk4IiqjkM_1 person evw-tqTTtQ8_0 horse ev1ATOeJPxY_0 person ev1ATOeJPxY_1 person ev53NALjp3I_0 person ev7a6Z-ZOv4_0 person ev-fVsUuvfA_0 person ewB46nb-ZFI_0 bird ewFZmQCCZm0_0 truck ewFZmQCCZm0_2 truck ewOgoCimrdA_0 elephant ewUWpmdjLHA_0 bicycle ewUWpmdjLHA_2 bicycle ewgdEY7GtsQ_1 airplane ewkBRzmoZzo_1 train ewkBRzmoZzo_2 train ewkeB8zzSVE_2 dog ewkeB8zzSVE_3 dog ewkeB8zzSVE_1 dog ewoUjWEEJS4_0 dog ew9rbdv73TA_0 umbrella exR3lT_G3Yk_0 knife exZF88kJoP8_0 person exjWaQ0ssbM_3 airplane exjWaQ0ssbM_0 airplane exjWaQ0ssbM_1 airplane exn-_MfEP6Q_0 person exoNfV0vU_Q_1 person exoNfV0vU_Q_0 person exw_qJh1qp8_0 cat ex6Il_1Ielw_0 motorcycle ex7mPB9cYwc_0 person ex7mPB9cYwc_1 person ex-yo1W_s34_0 skateboard eyAxkbxVdHA_0 person eyAxkbxVdHA_1 person eyNJXyldIhM_0 person eySeJsY8tZU_0 horse eyZeTi4-udw_0 boat eycvZhhuzOI_0 person eyd3cO1cRyw_0 person eyg_dFAAJ_c_0 umbrella eyi_kSPelbM_0 person eyo2iTfyALs_0 cat ey49lNbkqdQ_0 person ey7evH7qmFA_1 person ey9CIllx21w_2 truck ey9CIllx21w_5 truck ey9CIllx21w_8 truck ezOxb6H18Dk_0 person ezX_8NsARn4_1 person ezYCeDV1Aew_0 bicycle ezam_iANUkY_0 motorcycle ezdehi1wmW4_0 cow ezktd-PtOQo_2 horse ezktd-PtOQo_3 horse ezrNhnjWp-s_0 person ezrNhnjWp-s_1 person ezu6OcJjjLk_1 person ezvAmpvi364_1 person ezyLlrEVZRU_1 train ez4u6-2yh8U_1 person ez7mJtg4aoU_0 cow e0Al-yQwL8w_1 bear e0C174hEUpI_0 person e0HCj6FnKMo_0 person e0HrgDMAL5c_0 boat e0K-Wc2SGSk_0 person e0V--elE2Dc_3 boat e0V--elE2Dc_0 boat e0XejLvBbTw_0 motorcycle e0dXS2okSxo_0 train e0jUh6hQykw_0 person e0jUh6hQykw_2 person e0kJTvItoXc_1 person e0kJTvItoXc_0 person e0qJxStHuGA_1 skateboard e0rXPv5Q8ac_0 person e1KQ3rXcBVg_0 airplane e1KQ3rXcBVg_2 airplane e1KQ3rXcBVg_1 airplane e1S7tY6zlBs_0 bus e1ZNGYPt280_0 cow e1a0tLtZdm8_0 person e1dAdTW0-s8_0 person e1guDr5Lq88_0 person e1iYijyYnIc_0 person e1iYijyYnIc_1 person e1v5-Vy3ikU_0 motorcycle e11u2SRsMQk_0 umbrella e110Ssoc3rc_0 horse e2Biqc_Y8fI_0 boat e2Biqc_Y8fI_1 boat e2C6vpxx1BQ_1 person e2C6vpxx1BQ_0 person e2DeceLJ4QU_1 elephant e2DeceLJ4QU_0 elephant e2DmJ2nN-bM_0 person e2DmJ2nN-bM_1 person e2IXk3LUK0k_1 truck e2Jc499uBac_0 bus e2MbvKCUxBQ_0 skateboard e2oWEimFUeM_0 boat e2oWEimFUeM_6 boat e26M0NUTUcs_0 person e29Si0sk8Vs_0 person e3Ep8F-TVbQ_1 bicycle e3Ep8F-TVbQ_0 bicycle e3MrKt1yh3E_0 airplane e3ezeG4Gm80_1 knife e3fz03vzrmQ_0 person e3pGW6uqeQA_0 cat e3tP581aZ0Q_0 person e34jQApS9Bw_0 person e3_zIH1Jrf0_0 person e4R8Aj-X5iA_1 horse e4ZrrwoRRXc_0 bear e4c8OdRhAyA_0 knife e4c8OdRhAyA_3 knife e4iZ27N3agg_0 person e4rO9AJXQzY_1 person e4yT58KhTcs_1 airplane e4yT58KhTcs_2 airplane e4zdJYlc4z8_0 person e47QRGUx_Hs_0 truck e47QRGUx_Hs_1 truck e48A0CBQct8_0 person e5CFfGS4B1s_0 person e5DZWu7GqG4_3 bicycle e5MbNYLt7wU_0 person e5MbNYLt7wU_1 person e5RlRpaBXnE_0 dog e5UjJAZHaBc_0 person e5VUEXqXFTM_0 umbrella e5kfPy-MIGw_0 elephant e5lFDgi4EIs_0 cow e5-Pz_Q8VUA_0 person e6F88LQJoLc_0 person e6G0gHixPGE_0 boat e6IQ-jfygns_0 person e6IQ-jfygns_1 person e6T5hbKQwAs_0 person e6aWxOF189s_0 person e6hz-jEGxsg_0 person e6muu75RFmg_0 bus e6s13mZyuYY_0 skateboard e6s13mZyuYY_2 skateboard e6s13mZyuYY_3 skateboard e6xT3S6wuwE_0 person e64lVlYKNYs_0 horse e7IeNjbA7ms_0 motorcycle e7JZ2C-e9_w_1 skateboard e7Q3z9gbUw8_0 skateboard e7TKWwysO8Q_0 elephant e7W79Xp4qxI_0 person e7aF0fG2O2U_0 bear e7aF0fG2O2U_1 bear e7eZQb8WjmQ_0 person e7xAzZCvd_Y_0 truck e70XtlB-Au8_0 truck e70XtlB-Au8_1 truck e70XtlB-Au8_2 truck e70XtlB-Au8_3 truck e70XtlB-Au8_7 truck e70jqVThihE_3 knife e70jqVThihE_1 knife e72VJJ7jkoI_2 airplane e76gr0pJMLg_0 boat e8BQbcBgcjc_0 person e8VeeESy9Xc_0 horse e8XzpXJnucs_0 motorcycle e8XzpXJnucs_1 motorcycle e8XzpXJnucs_2 motorcycle e8Y4hXyFPDY_0 person e8ZFu6n4mg8_0 person e8b7eo56B5Y_1 person e8b7eo56B5Y_0 person e8mSJe1G9U4_0 horse e8mSJe1G9U4_1 horse e8mSJe1G9U4_3 horse e8mSJe1G9U4_4 horse e804z6ehgWE_0 train e836XbTclWA_0 person e86xkdgTdTA_0 person e873uWjeaPU_0 person e88X3OKvqTI_0 cow e9Ceg407V2o_1 bird e9GSzFiQj8I_0 person e9GoxfmycMQ_0 person e9MugXot7JI_0 elephant e9MugXot7JI_2 elephant e9MugXot7JI_1 elephant e9Y8BHEdYpg_1 person e9Y8BHEdYpg_0 person e9Z237Wup_E_0 boat e9aADbJBMmQ_1 boat GUY72Rg_9g4_3 airplane GUY72Rg_9g4_0 airplane GUY72Rg_9g4_1 airplane GUY72Rg_9g4_2 airplane GUcZWh6tol4_0 cow GUq5xrqphew_0 cow GVCJZzVnGUQ_2 person GVCJZzVnGUQ_0 person GVCJZzVnGUQ_1 person GVG_dHMt7eA_0 truck GVRLfBtpGgA_0 person GVeNt6hXwK4_0 person GWCwYIRE8YU_0 person GWIAU4GsgZM_0 person GWQD6FxWwpk_0 boat GWckuI3sTHA_0 bear GWmOpSmpGmg_0 car GWmOpSmpGmg_1 car GWmOpSmpGmg_2 car GWsXKIAM9yY_1 cat GWsXKIAM9yY_0 cat GWygvbszdUs_1 train GXS6axKBr7A_0 person GXX1pJeR1HE_0 elephant GXX1pJeR1HE_1 elephant GXZ3IXi7YXk_0 person GXcbgDsx_Zc_0 person GXfsYdVEMeA_10 elephant GXfsYdVEMeA_0 elephant GXfsYdVEMeA_5 elephant GXfsYdVEMeA_6 elephant GXfsYdVEMeA_8 elephant GXgoAnrkdVg_0 person GXiDQ52vcoY_0 person GXoA1zfvnOA_0 car GXrzW-OHh_Q_0 cow GXtA9dxzvII_0 person GXyeuhOYX2k_0 truck GXyeuhOYX2k_1 truck GX1v3ymtHtc_0 person GX-3aTTy4lM_0 person GX-3aTTy4lM_1 person GX-3aTTy4lM_2 person GYA-3PblNaU_0 person GYHWtVM2x6c_0 person GYTD79P3b8w_1 person GYT5Cq1tl2Q_0 cat GYWNYnWPaeE_0 person GYY-ElZl7ZM_0 dog GYldHkVSD_A_3 airplane GYmeM7epDjY_0 person GYmeM7epDjY_1 person GYoXwAkvJns_0 person GYsx_49_O1U_0 truck GYuIsHEGV6o_0 person GYuMuXQgLPI_0 person GY0HVEiAPvo_0 person GY3D9bb9kLY_0 airplane GY65ShkktrM_1 person GY9iCFFBA20_0 person GY-carc6vxw_2 horse GY-carc6vxw_3 horse GY-carc6vxw_4 horse GY-dmOLQNH4_0 truck GZIpKCyb0bU_0 airplane GZLsv-Y_aRw_0 person GZM5nvvMeNo_1 airplane GZOUGcF_xaM_2 train GZThnpa-8Ak_0 train GZUk3BlrK7k_0 person GZWH1bUqm9U_0 person GZYSkuRZwGE_2 skateboard GZb9G8sVRz4_0 person GZb9G8sVRz4_1 person GZgL3ZQI9nM_0 cow GZhuCclpFuk_0 elephant GZq8tIKR9b4_5 bus GZsP_n7aFMo_0 person GZxvpxqvHFs_1 airplane GZ0bYvVD_us_1 bird GZ1aL_iE5a8_1 person GZ6PRvVVeZk_0 person GaAL3IYDUgM_0 skateboard GaD4QsNCcik_0 person GaF_t9Af1hg_3 umbrella GaJvFxg_lFY_0 person GaJ7Bu5UrgQ_1 bus GaJ7Bu5UrgQ_2 bus GaVmURUD-i8_0 person GaYAyNs2FDI_1 person Gad1St-JBls_0 dog GaeWhfSP3EA_2 knife GagCDetg0dg_0 bicycle Gai7qgVSFc8_1 cat GangZBQawtQ_0 person Gax9nZtMs7M_0 person Gayl2EVJTkw_0 dog Ga3YHyqOqYY_1 person Ga3YHyqOqYY_0 person Ga_Oju23T9s_0 person GbBl5CcJgeE_14 elephant GbBl5CcJgeE_6 elephant GbBl5CcJgeE_8 elephant GbBl5CcJgeE_9 elephant GbBl5CcJgeE_10 elephant GbC0DAAn-XU_3 bear GbC0DAAn-XU_12 bear GbC0DAAn-XU_14 bear GbE-oXaNVBA_0 elephant GbE-oXaNVBA_3 elephant GbE-oXaNVBA_5 elephant GbE-oXaNVBA_6 elephant GbE-oXaNVBA_7 elephant GbE-oXaNVBA_8 elephant GbE-oXaNVBA_9 elephant GbE-oXaNVBA_12 elephant GbGEC5pQ9f8_1 cow GbHLET097K8_0 boat GbN_zMz1D6o_0 person GbOK07Tq7mA_0 boat GbVDftpuPMo_1 person GbW-55xLUnQ_0 airplane GbY3uHcC3ys_0 truck Gbbhlv2Obsc_0 person Gbbhlv2Obsc_1 person Gbd1-rm9Oyw_0 truck GbmEMxbMtCI_0 bicycle Gbs4s3pX3H0_5 knife Gbs4s3pX3H0_0 knife Gbs4s3pX3H0_1 knife Gbs4s3pX3H0_2 knife Gbs4s3pX3H0_3 knife GbulfCx1hwo_0 person Gb_YkJHLgns_0 train Gb_YkJHLgns_1 train GcCQF52Ok14_5 person GcCQF52Ok14_1 person GcCQF52Ok14_3 person GcCQF52Ok14_4 person GcEgsdqMiBg_1 person GcEsDxUkr00_5 elephant GcEsDxUkr00_1 elephant GcRRhnk4ynk_0 person GcnVDv6bIAk_0 person GctFFbsebBs_0 person GcwS7IyeG5Y_0 motorcycle Gc0lgXRlxGE_1 person Gc0lgXRlxGE_0 person Gc3iNFz3s-o_0 cow Gc5OyOM0VxI_1 person Gc5OyOM0VxI_0 person GdI2CnryrFQ_2 car GdNJ-VDNc3k_1 person GdQuxx_RXvs_2 bear GdbphRsxpKU_5 horse GdbphRsxpKU_3 horse GdfyxcmHHOQ_0 person GdiGBeJ9m_k_0 person GdiGBeJ9m_k_1 person GdsJ0QHb83w_1 person GdsJ0QHb83w_2 person GduwjeptozQ_0 person Gd5qUjEeqZ4_0 motorcycle GeHV-tf-ZGA_0 bus GeUECF6hDkg_0 airplane Geb74PkjTYY_1 person GehgPYVYwDs_0 person Gek3IJfBaU0_0 train GeuYAXldbbg_4 airplane GeuYAXldbbg_1 airplane GeuYAXldbbg_2 airplane GeuYAXldbbg_3 airplane GewTJtB97l8_2 knife Ge2suMLyOTY_0 cow Ge4SjOnEYWs_1 person Ge4SjOnEYWs_0 person Ge8RWLzmrE0_0 person Ge8RWLzmrE0_2 horse Ge9uJatNWuw_0 person Ge9uJatNWuw_1 person Ge-VfDpriPY_1 person Ge-VfDpriPY_0 person GfCjURNr9T4_0 person GfLxzlZxHic_0 person GfbcHsH3DKI_0 person GfeXUZVyvL4_0 person GfefENTSQOI_0 person GfkX7I9bclY_0 cow GfqA0SZPeXU_2 horse GfqA0SZPeXU_3 horse GfxwasnA0Ao_0 bird GfxwasnA0Ao_3 bird GfyBiJNU7bY_0 car Gf50aWojLhk_1 airplane GgV4eSmNyaA_1 elephant GgV4eSmNyaA_0 elephant GgcoCmlTlbc_0 person GgfESlKFIkU_0 dog GgkncqtrgPI_0 person GgsFohIKlpw_0 dog GgyOGY2q9xE_0 skateboard Gg9uDi7KjJ0_0 person GhBPvHC15BE_0 person GhHPtGuUtRY_0 person GhI4uqxOQpc_0 horse GhLdswZDYMs_0 bicycle GhLdswZDYMs_1 bicycle GhMC34aeHnU_2 person GhMC34aeHnU_0 person GhMC34aeHnU_1 person GhQRZOseJfY_0 truck GhbtO__NASs_0 person GhbtO__NASs_1 person Ghbt5lVT3dk_0 truck GhiVm-6oFyg_0 train GhwtPgHjLvg_0 dog GhxWr3HvvXA_1 person GiRzA3Fe1-s_0 person Gijruln92tk_0 truck Gik59IGJFLo_0 bird GioAI9XlGGg_0 bird GioEMsI07Jw_0 person e9ihaIQuVMU_0 knife e9ihaIQuVMU_2 knife e9iolRKSwBw_0 person e9mOqKDBOVg_0 person e9nH--aGWDM_0 person e90GV6rl3NE_0 person e9-w67QSEBs_0 person e9-w67QSEBs_1 person e9_LqDqVkGs_0 person e9_LqDqVkGs_1 person e9_LqDqVkGs_2 person e-PcZyfAPZ4_0 person e-R-FxrDQao_0 person e-dVHSE1qXI_0 person e-gU8I2kZyY_1 bicycle e-n0pRU6uSk_0 bus e-n0pRU6uSk_1 bus e-qbVMLqnEw_0 person e-siUblegSA_0 dog e-siUblegSA_1 dog e-v2yWUGKiU_1 boat e-zbkYroVUk_0 person e-43rdp3psc_0 person e--Qr92yhBo_2 horse e--vN-5QX-E_0 person e-_nLPye6sc_0 person e_APlM8VSiw_1 person e_APlM8VSiw_0 person e_FyX6iUBZk_1 person e_GD2rN9Jcg_0 person e_SYVD0TY14_0 airplane e_UwPkRMD74_0 person e_aHtRh2PpI_0 cat e_b_4zlKmdo_0 giraffe e_qdDAeerKQ_1 bird e_-SOM0hufo_0 truck fAHFZWyNZQ4_0 bird fAHFZWyNZQ4_2 bird fAJAQb5tzFA_0 dog fAJ939SI_YI_0 person fAKXvHREf8E_0 bird fAMkbedQ0GI_1 person fAQoNDLgds4_0 bear fAUG8-TdflE_0 person fAjj5137yKM_0 bicycle fAm_6grpTOI_0 person fAyBUKM7898_0 person fAz2ecihxEU_0 person fA5ArJS7ScI_0 car fA6XfSl7pqY_0 person fA_OWAI_8kc_0 person fBH6rLEukMU_0 person fBIh-CAYfy0_0 person fBLrr2zYnRw_1 person fBLvIU3Q7Rw_0 horse fBPjBSdwz1o_0 elephant fBPjBSdwz1o_1 elephant fBP3dZYp3sM_0 person fBT1cNog4Lw_0 person fBkDTXhVYCs_0 giraffe fBmp8URVoB4_0 car fBsQegHOF8Y_0 person fBtfkn4uDKE_0 cow fBvAf66603Q_0 person fBwrgO05rqo_0 truck fByljFegqK4_0 person fCADagfWgSU_1 elephant fCK_OirKTO4_0 person fCMJnkyFS5c_0 person fCMJnkyFS5c_1 person fCPVsi1S2jM_0 cat fCTNp-hiUkQ_0 person fCTNp-hiUkQ_1 person fCT0UeuTcQk_0 person fCUZclkgF-c_3 car fCUZclkgF-c_4 car fCUZclkgF-c_5 car fCVoLETgca4_0 bicycle fCW56GByDs0_1 person fCW56GByDs0_0 person fCX_8Q_OAos_1 dog fCZXrHFimHM_0 person fCbvdNQUcRE_0 cat fCdlrWXZ7kY_0 person fCiWi1Dk-yE_1 person fCkgtao7rJk_0 motorcycle fCmwPCLYVXE_0 skateboard fCmwPCLYVXE_1 skateboard fCm-8YmQfoY_1 giraffe fCoXLMBzqTc_0 cat fCohGx6PWyM_0 person fCr-fmsVVWE_0 person fCsSoErwvfw_2 skateboard fCsSoErwvfw_0 skateboard fCsSoErwvfw_1 skateboard fCtyUxRaSdQ_0 skateboard fCwicNYDKmo_0 person fCzWVcZvGuk_1 motorcycle fC6O_2ljm_c_1 person fC6O_2ljm_c_2 person fC6O_2ljm_c_0 person fC8FUnipL3M_0 bird fDBgRd9yK8Q_5 airplane fDBgRd9yK8Q_1 airplane fDBgRd9yK8Q_4 airplane fDCK-s1gX18_0 skateboard fDCadv28EEo_1 person fDCadv28EEo_0 person fDFpsal4hHo_0 person fDIVkvMCQ9I_1 cow fDJjIhw4XBI_2 person fDJjIhw4XBI_1 person fDLBxom0wgI_1 cat fDVesIz_ON0_1 person fDe30IPiQ0Y_1 horse fDuiW9_sHcQ_1 person fDyXAhF761Q_0 person fD89z8ycv7U_0 person fD89z8ycv7U_1 person fD89z8ycv7U_2 person fEDj20Gce80_0 boat fEK6hdzjG5E_0 cow fESV3o1vc1A_1 bird fES_1kR2d8o_0 person fEVLKYBuE7k_0 truck fEXq69B6L0s_0 giraffe fEZ5cqJWg0A_0 bicycle fEdlpwoza6o_0 person fEdlpwoza6o_1 person fEdlpwoza6o_2 person fEgqRE0XOMM_0 person fEh5hyz4LCU_0 skateboard fEiWI60P4XI_0 bicycle fElOryAiN0s_0 person fEmh4mfGsCA_0 person fEupHSTMXLk_0 knife fE0raHY_nY8_0 cat fE_sSvVFvZU_0 dog fFBkKrJlobs_0 cow fFEDu-fiUUM_0 person fFGmvl4E9QI_0 bird fFImZECw1c0_0 skateboard fFImZECw1c0_1 skateboard fFOTZMvg0n0_0 horse fFRp0dBucFA_0 bus fFTJuANVr2I_0 person fFWU4PNTKDo_0 person fFWU4PNTKDo_1 person fFaJ5epORzQ_0 person fFd91uPKDVA_0 person fFksYDaR-NI_1 elephant fFmCHQgzMRc_1 person fFmCHQgzMRc_2 person fFmhW2ygNKw_0 person fFncU3kR5qw_0 car fFogpyIr-Ic_0 person fFq0hnzgGSw_2 bicycle fF0RlMrKBFo_0 bicycle fF1S-952IOU_0 horse fF3WOuwnvrA_3 elephant fF3WOuwnvrA_5 elephant fF3pBoS7xFg_1 person fF3pBoS7xFg_0 person fF34g3sNiHo_0 person fF7snD5S5Q4_0 car fF_BanWRtKo_1 skateboard fF_BanWRtKo_0 skateboard fGGJnSDPzUI_0 person fGI6_U9U_zc_1 person fGPsR0YiVaE_0 train fGgJ0VACAo4_0 umbrella fGlnCmVPzIs_0 person fGrC6VCXVL4_0 person fG1NOqIRoLA_0 person fG6uSVeocMo_0 person fG-4n3Gy1fk_0 person fHO3g6Q_bNE_0 person fHUjlWalvJQ_0 person fHVJzD_AvV8_0 person fHepRAiQQ04_0 cow fHlfVMMfXNg_0 person fHm5WgSYk2Y_0 bus fHoBjwC8H50_0 dog fHoBjwC8H50_3 dog fHsaxiTw0dI_0 motorcycle fHzSK8AEv5U_0 person fHzzixV1xyg_1 cow fH5U2jXbkEg_1 knife fH8PS8Fjvbg_1 cow fH8PS8Fjvbg_2 cow fIABVBcluZ0_0 skateboard fIABVBcluZ0_1 skateboard fIFMCt78hmI_0 truck fILyoB3Pgrg_1 dog fIM7jmsq_FE_0 person fIN8z4lkdyA_0 car fIN8z4lkdyA_2 car fIN8z4lkdyA_3 car fIPXE6MOZp0_0 airplane fIT1bTlW3UQ_0 person fIVT3rTMptI_1 truck fIXFrPFEL0w_0 giraffe fIlXSJxnKD8_0 person fInEVgREyyY_0 dog fInYB8sD7tM_0 person fIrb5Y93wjw_0 train fIvUwaa2ziY_0 person fIyrHecb8SQ_0 elephant fI0VoDDN2lE_2 person fI0VoDDN2lE_0 person fI0VoDDN2lE_1 person fI5fnVs_kWg_0 motorcycle fI8DySScPWU_0 skateboard fJGPTgv8EUs_0 person fJJBGybbnH4_1 knife fJJX9D4siG4_0 cat fJTeqi3aqRc_0 car fJYGkMT9c6U_0 truck fJY5zGaYs8s_0 person fJdWgbIMXZ0_5 train fJdWgbIMXZ0_0 train fJdWgbIMXZ0_2 train fJpRqXhL3wE_0 skateboard fJp4DAu46Yg_1 person fJxbRDMY46o_0 person fJyBgU7rZvE_0 person fJ71o3Q-oVE_1 cat fKDRpRcSnrw_0 cat fKHs2FNZk6M_0 person fKLJqhEdsTY_0 cow fKLJqhEdsTY_1 cow fKLS0DAexvw_1 boat fKLS0DAexvw_2 boat fKLS0DAexvw_3 boat fKRZ4PPWgg8_1 person fKcOtlmf6r0_3 boat fKcOtlmf6r0_2 boat fKgpRiyDlvc_0 person fKhENDvpnmA_0 boat fKhe37bCgeA_1 horse fKp-Lvw2bUM_2 elephant fKp-Lvw2bUM_3 elephant fKp-Lvw2bUM_4 elephant fKrxRvMxZqM_0 person fKxBpYS29uM_0 dog fKyPRwF5y6s_0 person fKzFEc6hR-c_2 person fK89Z2AwlCg_3 bus GiuUBGsdiqI_0 person GizeLrnWRmk_1 person GizeLrnWRmk_0 person Gi--TM8Xz3I_0 person GjCs_s2EnpE_0 person GjFr4qO_LX4_0 dog GjJFQButa0w_0 bear GjJk6U2crcw_0 skateboard GjJp-yqt7xk_0 airplane GjZDPTKpIdE_0 person GjZP-buSAG8_0 person Gjdyi0kf79Y_0 truck GjfhgZMeHAA_0 person Gjgu3OFbWKI_0 bear GjkrI0adkJk_0 person GjmNPrYyCwg_0 person Gj87GZKvhdo_0 horse GkCXvg93pAA_0 cow GkGG1F5by14_0 person GkddmkbGSAc_0 cat Gkfp-yV9e94_0 person GklwzbjOzYQ_0 person GkmRFBuktnQ_0 person Gkxkfi_wHeA_1 motorcycle Gkxkfi_wHeA_0 motorcycle Gk6IzYQADXg_1 skateboard Gk6IzYQADXg_0 skateboard Gk9v8ABOPNw_1 elephant GlLzIn-6ouU_1 bicycle GlLzIn-6ouU_2 bicycle GlPdixjfu44_0 cat GletqIQ8irw_0 motorcycle GlsMcq1cM2c_1 bird GlxEVs7z_7Y_0 person Gl7S2JNezLg_0 boat Gl7S2JNezLg_3 boat Gl9cy66E4FQ_2 knife Gl_UMssuTWU_0 person GmI47tbiNQ0_0 person GmKT2rhDILU_1 knife GmQX3sIhhqo_0 cow GmS0yrU3Hcw_0 person GmUFocQWPTo_1 boat Gmdxq1glmKY_1 dog GmeGRg8XZ5M_0 person GmvKmbIHKHM_1 person GmvKmbIHKHM_0 person Gmww9V50JtU_0 dog Gm9BnQSZlxk_1 person Gm9kb3zHsLA_0 cat GnFoElm_rrw_0 dog GnGd8Q_cSHU_0 person GnGd8Q_cSHU_1 person GnO2sxJNWjk_0 elephant GnRp7QHoAr4_0 train GnkSrEpnmRo_1 person GnmgLr5p-r8_0 bus Gno0JyFsjGk_5 knife Gn0av9LV5FU_0 elephant Gn3AqY6vUyU_0 elephant Gn7B_MiLuhA_0 skateboard GoEBr-GbeCk_0 elephant GoEcYxqxcZ8_1 bus GoEy1J3s8Xs_0 cow GoRGaOgttBU_0 horse GoUjZ5wJ2do_0 car GoWyqQorqOY_0 cat GoXlqK766lk_0 person GolDzhH16vg_0 train GorfZ7y-Jw8_0 skateboard GosFitiV7as_0 person GotzQ9ecvkM_0 person GoubTEJzKUI_0 person Go16BKYvDSs_0 horse Go5M-oyC28A_0 elephant Go8BM-B0ML4_0 skateboard GpCjTjkSw3k_0 train GpCjTjkSw3k_5 train GpCjTjkSw3k_3 train GpCjTjkSw3k_4 train GpCjTjkSw3k_2 train GpDilZGSveI_0 person GpJmJforKzo_0 person GpPbMduP_3Y_0 cow GpProJiVxa4_0 bear GpTPDl3MzZw_0 cat GpVy_gD1slw_0 dog GpY4Nw8LLy4_0 bird GpkftB3rq5g_0 dog Gpn_kF1lXuc_0 bicycle Gpn_kF1lXuc_8 bicycle Gpn_kF1lXuc_13 bicycle Gpn_kF1lXuc_14 bicycle GpzE4RQTM1Y_0 airplane Gp3g6UYBBzw_0 person Gp3g6UYBBzw_1 person Gp70TnjZRfU_1 train Gp70TnjZRfU_2 train Gp70TnjZRfU_0 train GqZeX-EEEL8_0 person Gqc_LkQvKak_2 horse GqjVd_dRiB8_0 person GqjVd_dRiB8_1 person GqjoBpwsgUc_0 person GqjoBpwsgUc_1 person Gqntj1GoicU_0 bus GqzN0dyl5p4_4 truck Gq-mMFeLCyo_0 person GrG-ipHg_4w_0 person GrK4qEJjeKE_0 airplane GrNDwiO4kdI_0 airplane GrQ0zJbkeXE_0 person GrXOOtPiIGw_0 zebra GrYsw9-Skqg_0 person GrZvWtxffXE_0 person GrpvM1_CRqI_0 train GruxXrzWzjk_0 airplane GruxXrzWzjk_2 airplane GruxXrzWzjk_3 airplane GruxXrzWzjk_5 airplane GrzyUDtV-Ug_0 person Gr6be_D6d9Q_2 skateboard GsFDHyoPppk_0 person GsGHB19iuE4_0 person GsKJMkVSeV4_2 airplane GsL7VYYWhu0_0 person GsOgw9XtlWc_0 airplane GsOgw9XtlWc_1 airplane GsTlT_7Zb1Y_0 train GsVvc55IHn0_0 skateboard GshXL9V-lrM_1 person Gsj4aXqBPHM_0 truck Gsn06D15nmk_0 motorcycle GsrSyK5ymQo_0 boat GsrenPacLW0_1 person Gs67R7prarI_1 motorcycle Gs7J9Yo-uF0_0 cow Gs7J9Yo-uF0_1 cow Gs79ZsyWm74_0 person GtAKWYvc9kY_0 elephant GtCbEqqQgqY_0 person GtCbEqqQgqY_1 person GtD2m1EXxjc_1 bicycle GtKaIcQJZcc_1 person GtLYNeredOY_0 boat GtVrmoeEcMM_0 knife GtZPw5ftw88_0 person GtZSRodviU8_0 person Gta1hcIAAE0_0 elephant GtiiYqVQ2Kw_0 person Gtmp8y8APfQ_1 skateboard Gtnqm4SnEXo_0 horse Gtnqm4SnEXo_1 horse Gtnqm4SnEXo_2 horse Gtnqm4SnEXo_3 horse Gtnqm4SnEXo_4 horse Gtqcx01NTTw_0 knife Gtsvc9lA7hs_0 airplane Gt33VfmFDWw_0 person Gt6q9b3QUvE_0 bicycle Gt6q9b3QUvE_2 bicycle Gt7thmVY6aQ_0 person GuQvGMFuhu4_1 car GuQvGMFuhu4_3 car GuXelRN3wMo_4 bear GuaD24NfCe0_0 person GuawwNMbfBI_0 person Gue43DvNTGc_1 train Guf15LHosg8_0 person GugU0nZdPJU_0 bus GuhfGduN9v0_0 person GulmsZq-VsU_6 boat GulmsZq-VsU_0 boat GulmsZq-VsU_3 boat GulmsZq-VsU_4 boat GulmsZq-VsU_5 boat GusEs8RA4_o_0 motorcycle GuwTG6RtcFI_0 person Gu4MWCc2Wws_0 bicycle Gu-vFv_w9Vo_0 person GvFmkdxnKyI_0 horse GvIj2sMkJwM_0 person GvNhgCGtUOQ_0 truck GvQvyfTNykM_0 truck GvRM_UnjJoE_2 horse GvdMRPX4KR4_0 train GvdMRPX4KR4_1 train GvdMRPX4KR4_5 train GvoIcT-hFek_0 person Gv9mTaerVLc_0 person GwFrSa-YwfI_0 bear GwFrSa-YwfI_1 bear GwIn1NaaEwE_0 bus GwbpMG2B14Y_0 truck GwgaNLd1f7s_0 truck GwlNXPuUvXM_0 person GwnBP9a07RE_0 person GwnBP9a07RE_3 person GwnBP9a07RE_4 person GwnBP9a07RE_1 person GwnBP9a07RE_2 person Gwx1ad4lW1Q_2 person Gwyl7djxZkg_0 cow Gwy4ODXAAU8_0 person Gw5YyHT1Nt8_0 person Gw9Vi_Io9DM_0 person Gw_Tiv72jms_1 horse GxANCkxq7Ng_0 motorcycle fLCd0DDhfBk_0 person fLEUT0rTkv0_0 bird fLJniCJFPTg_3 elephant fLPHwVvk6K4_0 person fLPHwVvk6K4_1 person fLWW1YWO26Y_0 bird fLdMmSIfseM_2 person fLdMmSIfseM_0 person fLe279fKywo_0 dog fLsDTJxlsW8_0 person fLwrxElzLZs_0 person fLyNbq9v6kg_0 person fL1w15qwbqE_0 person fMOnb4P7tww_1 person fMOnb4P7tww_0 person fMO1J7ojQqk_0 dog fMTosfHKy2I_0 dog fMi6lVyCOHw_0 boat fMwCpOTv9RY_0 bus fM-puV4uyzs_0 person fNAZ9IDLZy0_0 person fND_OguW0MM_1 elephant fNIdPhAsjiM_0 cat fNJSPU5r3sc_0 person fNO_o1D0kvY_0 person fNdRm3HWQmo_1 motorcycle fNgr2EBEDCQ_0 car fNgr2EBEDCQ_1 car fNg3y0FHjgg_0 person fNhDT1fwzKM_0 person fNhDT1fwzKM_1 person fNh54BNEJBQ_0 cat fNw9dDcM4ms_0 bear fN-FYknWOSk_1 person fN-FYknWOSk_2 person fN-43XPvLwg_0 motorcycle fOLR2dvBtqo_0 cow fOO1pHvrPWQ_0 person fOatLQK_AyQ_3 bicycle fOcPVX4sAxg_0 horse fOjKgQf86dk_0 horse fOkrLuGKDvk_0 person fOkrLuGKDvk_1 person fOkrLuGKDvk_2 person fOsd2aWzfBo_0 cow fOtnatCU7_Q_0 person fOuV2101nEo_0 bear fOv8ocd2xhA_2 knife fO30fgQYdT4_0 bus fO8Do_0RQXU_0 person fO9GgD7GqE0_2 bus fPBIIZV6fuU_0 person fPMNtuJztSA_0 person fPVn9Wxf_HQ_0 person fPVn9Wxf_HQ_1 person fPrhiYslRjA_0 person fPzDDdztZNk_0 horse fPzQyo7caqU_0 person fPzqpL90owQ_6 bear fP5AyxuGIS8_0 person fP8x_x2_k5g_0 person fP-DMm3u5n4_0 cat fQEGEb4W3IE_0 person fQNyLEXwnn0_0 person fQOjoYB5hPQ_0 person fQOjoYB5hPQ_1 person fQOymYsdTtU_0 person fQdA_-549Dk_0 dog fQh5RtZzYzo_0 bicycle fQlChBB42M0_0 person fQoJWcmQmsU_1 person fQo0G2i1QjY_0 person fQt3g_9u1RQ_0 airplane fQyE_yIAu_0_1 skateboard fQ26oO2Y5NM_0 bicycle fQ4H6UmTepU_5 giraffe fREDiuJlBf8_0 person fREDiuJlBf8_1 person fRFF0xtrWhI_0 elephant fROdeQpu88o_1 knife fRS5rhYP7LM_0 person fRXDSh8gr0c_1 person fRZ7Wze7ATs_3 knife fRcegyxH0Is_0 car fRhNtVu6anA_0 dog fRjCbO3MyU8_0 person fRmnBvuwZlU_0 dog fRmnBvuwZlU_1 dog fRrLguORoeU_1 umbrella fRrLguORoeU_2 umbrella fRrd-Z2R-Gs_0 person fRtzYh_gGgI_1 cow fRwzMPH6Kvw_0 person fR1zDIeBHFg_0 person fR6FrFNXUxY_0 person fR-JNy5hccc_0 umbrella fSA7T5svJ-o_0 bus fSBe_a8ZkZU_0 cat fSey4VJgLM0_0 person fSfKYTVt7V8_2 bird fSfX4Z6SR2U_0 horse fSj-h8lAhWw_0 cat fSoqM6oq2AA_0 train fSoqM6oq2AA_2 train fS0098HnnhM_0 person fS3KL3nj7FY_0 person fS73PiHaNi8_0 person fS8_byjM-1M_3 zebra fS8_byjM-1M_0 zebra fS_6fgFOiPU_3 train fTFLfGUcgMs_0 elephant fTFLfGUcgMs_3 elephant fTFVwPKxUHE_2 elephant fTP9YgSJZg8_2 knife fTVb5uxWnsI_0 person fTVb5uxWnsI_1 person fTgirzB_QLU_0 person fThV1JtaTJg_0 person fTkIm1nb6qg_1 bird fTkIm1nb6qg_2 bird fTnnG_WcLYY_3 knife fTnnG_WcLYY_4 knife fTwiavhNzxs_0 person fUB-cH8rjW4_1 person fUB-cH8rjW4_0 person fUF__EdDFVs_0 skateboard fUISEtXSRYM_0 person fUU4R6RP4ek_0 motorcycle fUXpqgf4jUA_0 bus fUd8LjmonBM_0 person fUetaCH3tZk_0 person fUg6JULdTnU_0 person fUonzpmV18o_3 bird fUqVKgWVVNY_1 person fUqVKgWVVNY_2 person fUwzXH9i0yQ_0 person fUx60fl9UkU_0 person fUzsVWD48bA_0 person fU3o6Frqdww_0 truck fU4DzirdCVE_1 airplane fVAmI93Yb6E_0 cat fVAsOuag4vY_1 giraffe fVHZEHosow0_2 person fVH3n0aghP4_1 person fVH3n0aghP4_0 person fVH7PpDqlPE_0 boat fVIVas1R1tk_0 cow fVOy449KQlY_0 person fVX7qR-o-9I_0 cat fVZfWzDBb-c_0 person fVZ_9hWIGpA_2 truck fVdrMKHN9WY_1 cow fVq7Of0Tr-s_0 person fVr3XVUzJaA_0 train fVv5EqFYsAY_0 person fV80H_L3AN8_1 motorcycle fWLqbV7Z7Go_1 person fWLqbV7Z7Go_0 person fWb_-8hhubg_0 person fWmJ9tUUCwg_0 person fWpdcmgr5r4_0 horse fWxgjNDC4OQ_0 car fWxgjNDC4OQ_1 car fWxsOgW3P6U_0 person fW1Z_Mx1RaA_0 person fW4fh_WBiMY_0 train fW7yPljMFRc_0 person fW7yPljMFRc_1 person fW_HPaNBsDE_0 cat fXCFktk2xdc_0 person fXLB02IH0G4_0 person fXLB02IH0G4_1 person fXOdZ0uKuBc_1 dog fXWqvRfBWto_0 person fXX7K6CQfBw_0 airplane fXYn01Cgmqs_0 dog fXY7h0cc6tw_0 cow fXbnEKMaIoM_1 boat fXbnEKMaIoM_0 boat fXka5y708fI_1 person fXowuJDXhhU_0 person fXyBm7_EDVc_0 skateboard fXzIQASqygY_0 bird fX-kSrf_K8w_0 horse fYDgPdRtmjU_0 train fYLtnvuW_VI_0 motorcycle fYMA0fLN8sI_0 horse fYN5ZIicl_k_0 car fYmfHE2mONE_1 person fYnsIFGQfT8_0 person fYql4FiApLQ_0 horse fYtm_pGBWkU_0 person fYu5ChRgapY_0 motorcycle fYw5KVCsg_4_0 person fYyI8x0tNAA_1 bear fY4-6vsjmD8_0 person fY82KLfOpbk_0 person fY82KLfOpbk_1 person fZCdkf9VQzU_2 cow fZEFEAYBlGE_0 cat fZFYdgZbSBg_0 person fZFYdgZbSBg_1 person fZJOS8BlA-w_0 person fZOtury_J_w_0 person fZTIKbSjOhk_0 airplane fZTJH_9Pqvg_0 person fZTJH_9Pqvg_1 person fZWP75nltcM_0 bird fZXzEYFmZ_8_0 person fZXzEYFmZ_8_1 person fZiiYH3WfD8_0 skateboard fZnbOFaSEQc_0 person fZnbOFaSEQc_1 person fZp_UgW_xZU_1 motorcycle fZp_UgW_xZU_0 person fZu7wEVEuX8_0 person GxHmm60dKvc_0 skateboard GxLI4BFLrps_0 person GxPYf4SAQvE_0 person GxPYf4SAQvE_1 person GxWuAfBV300_0 person Gxg0Pt_9bIE_0 person GxwwTXW-DdQ_2 train Gx1zPI3b2oc_0 person Gx3xtKPwlz0_1 horse Gx4ryd6AGl4_1 train Gx4ryd6AGl4_2 train Gx4ryd6AGl4_3 train Gx4ryd6AGl4_0 train GyGdlCtDdJc_0 person GyIKdb5KDHk_1 train GyPRnKI78iA_0 person GyU8x9urAxE_0 motorcycle GyVDsnuS5jU_0 person GyXlgRxQ1jo_0 train GyXlgRxQ1jo_1 train GyZHiIEOBos_0 cat Gya_TrOGXpo_0 person GyhjyC5aJ8U_0 bus Gyjb_P1W7TA_2 bus Gyn_wSuRB3w_1 truck Gyzaf_gaIYY_0 motorcycle Gy9JueTT4XU_0 person Gy_XuBCvbUc_1 dog Gy_XuBCvbUc_2 dog GzB9OTV44PA_0 person GzHy2xjKB_8_0 person GzLmftr6tl8_0 person GzRkvFxVlx0_0 person GzTDLPCsgSM_0 person GzVj8bI0bSk_0 skateboard GzVj8bI0bSk_1 skateboard GzcgYGEqOlY_1 horse GzesZ0laH2w_0 motorcycle GzizYdL25ZY_0 person GzjkTrnmEnU_0 airplane GzjkTrnmEnU_1 airplane GznFDBDT2c0_0 truck GznFDBDT2c0_2 truck Gzrgq_nWH_Q_0 horse GzujCDTak_4_0 horse GzujCDTak_4_2 horse Gzy_PnFtEpM_0 person Gz3Np50b9q4_0 truck G0DQ6VdMp-U_7 car G0DQ6VdMp-U_0 car G0DQ6VdMp-U_1 car G0DQ6VdMp-U_2 car G0DQ6VdMp-U_4 car G0DQ6VdMp-U_5 car G0DQ6VdMp-U_6 car G0FSe53KN-w_0 person G0WsFATo9RQ_0 person G0dXxEbeJnM_1 person G0d44YoKXX4_0 person G0kDhLojiI4_0 giraffe G0leBoTgEx4_0 person G0rwWyFSsYE_0 train G0r2tR6EcF8_1 person G0urH-9ytbc_0 horse G01Xi8VMxgQ_0 person G03JTuHY_RM_0 knife G1AIHF-KITc_0 person G1AtN7CvCXw_0 person G1EnmuHlxig_0 person G1P_XnEL4dc_1 person G1P_XnEL4dc_0 person G1TS-PvdREA_0 person G1TS-PvdREA_1 person G1ThERK4a8E_4 airplane G1ThERK4a8E_0 airplane G1UoN56m5DM_0 person G1YNrrT9-z8_0 bird G1YNrrT9-z8_1 bird G1cY71JK5_E_0 motorcycle G1c0-CTyZ3I_0 person G1dKhZZARDk_0 airplane G1z6RMtKkbM_0 bird G1z6RMtKkbM_1 bird G11cHAnx17E_0 horse G13ARgckI9w_0 person G17Kpx1bgXM_0 horse G1_R_EJpLZU_0 cow G2FXcVDezv4_0 truck G2HOmWxj5gg_0 person G2LNQIwbLHE_0 person G2S4rwP6qJY_0 bicycle G2V6wliL2AA_0 knife G2g4Z-Syzi8_1 dog G2lFYYEolz4_0 train G2lFYYEolz4_2 train G2x5gACWSwA_0 cow G2z7yjdCUuI_0 airplane G23Q_C35Uqs_0 bear G24yJOgl9t0_1 person G25iisvOYhA_0 cat G2-v9IBlnTs_0 person G3AuCS7s68w_0 bird G3IID08lWos_0 person G3P-Vvra2GU_0 horse G3SowFCFa0g_0 person G3VeVH6pbdE_1 person G3a0EYtnqHA_0 person G3cazaory7w_0 person G3f8bIoGGZ0_0 dog G3kNB0zhHQc_0 person G3pT4MJrpDI_5 umbrella G3pT4MJrpDI_6 umbrella G3pT4MJrpDI_4 umbrella G3vP7_U6yXU_1 cow G37Dm4oy794_0 bicycle G38EbyEOITE_0 horse G38SrxcVYWs_1 person G39ryVtNnhQ_3 elephant G39ryVtNnhQ_8 elephant G39ryVtNnhQ_9 elephant G39ryVtNnhQ_11 elephant G4PD_RAK48Y_0 person G4VPBDOgq54_1 skateboard G4VpcUuXgRs_0 person G4VpcUuXgRs_1 person G4ckSGXUGts_0 person G4fbkcKiZVg_0 person G4nRZ4PHvC4_0 dog G4rJejZ9FIM_0 car G4r0UJvtDXs_0 cow G4xFWKKoN0M_0 motorcycle G47wnMA6RVE_0 bus G4_xR7lZIPo_3 bear G5D1cAo2D6s_1 person G5JwolS0D1M_5 elephant G5QgL60_yfc_0 knife G5SlrQeATlc_0 bus G5SlrQeATlc_2 bus G5hG8j0KxBI_0 person G5ixkqq66VA_0 person G5rBbx_kODY_0 person G5ztukDN_Qg_0 zebra G51fdi_hG_0_0 train G52uuPWcC3M_0 umbrella G553b8ZAd3Q_0 person G58FuwBYL-0_0 skateboard G5_UJ1wEKh4_0 person G6OttGznP9E_0 person G6OttGznP9E_1 person G6QMME1QbK8_2 car G6Qmm4T-cd0_0 bus G6WiR4W4WWk_0 person G6b9lySVCCY_0 person G6eAvUHoDkc_0 person G6fvYSH13nI_2 train G6iVTjyPM04_1 horse G6sFOs8MgGU_0 bird G6sFOs8MgGU_3 bird G6sFOs8MgGU_6 bird G66e5ltBFoI_0 person G7DhRPK7pwc_1 bicycle G7F-ufxEXPY_0 knife G7H7fQ_Q1Ec_0 person G7H7fQ_Q1Ec_1 person G7ID9RdMSkE_0 person G7MvPG8Qv84_0 giraffe G7TezoE9Cmo_0 person G7WblvVQPF0_0 person G7Z01jmMzlI_0 bird G7krBQa_KLc_0 person G7p90FBQk_0_0 truck G7slUshqPvY_0 elephant G74HXSqYO-A_0 motorcycle G75uQAEuUkE_0 person G766vinfuBw_5 bicycle G766vinfuBw_9 bicycle G77KKnCpwWY_3 skateboard G8EC6svgwKU_0 person G8NIqmq7YdE_2 bear G8V2UsTc1Ik_0 cat G8V33bTVNII_14 bicycle G8V33bTVNII_1 bicycle G8V33bTVNII_2 bicycle G8V33bTVNII_6 bicycle G8V33bTVNII_9 bicycle G8XX8bkx6Ek_0 person G8hStuDYwH0_2 airplane G8kDZAPbUe8_0 person G8kDZAPbUe8_1 person G8k84FwnW2k_0 motorcycle G8lDrK3u3r0_2 elephant G8lfwRN3Iew_12 boat G8lfwRN3Iew_0 boat G8lfwRN3Iew_8 boat G8lfwRN3Iew_9 boat G8lfwRN3Iew_11 boat G8sDCWad2Bg_0 cat G8s2n3jAKW8_0 cow G8tbj2R0iso_0 person G80DOuBBH_Y_3 airplane G8--2JpJa6g_0 person G9DdsOO1mZo_0 horse G9FQJdIxjsk_0 bird G9YPEOrV5UU_0 person G9YPEOrV5UU_1 person G9YPEOrV5UU_2 person G9ZKH_DS9DU_0 person G9gsnqhd_Sw_0 cat G9hPaEx7Ci0_1 knife G9i66tUOspc_0 dog G9juxPad3zY_0 person G9nlPUwJQB0_0 person G9nvXjuig6s_0 person G9qCl1NZelo_0 cow G9rxIfeUWVo_0 airplane G9vDsElCKAY_0 dog G9zd0G8dIt0_0 person G93PAKTtVpM_0 horse G97UC0qtVDw_0 person G97YtHMd2hw_0 person G99rEXOdlC8_0 horse G9_TgGWQQi8_0 person G-Sr-qmWZNo_0 cow G-YYtvCU7qY_0 dog G-d6o3nTBFA_0 zebra G-nFiFb0Xos_1 knife G-nbiqZuFdc_2 horse G-qCe2DK3Tk_0 motorcycle G-u_ThqhoJE_0 train G-yCRlVSs6w_0 person G-3kOsn1fPY_1 person G_ADLUKVq8Y_0 boat G_LtPKO6be4_0 horse fZ1GVGZmTRA_0 person faJuqm4umTQ_0 person faSv8ijeKeE_0 person faVBgge6xkE_0 person faW2tWwuCMg_1 person faW2tWwuCMg_0 person fahs60oGhLU_0 train fatTPMeG5Pc_1 bear fa-rHhFEloA_1 truck fa--elcQpd4_0 elephant fbDYKST2P-I_0 motorcycle fbFVM0UM5V0_0 person fbM5MhIve5s_0 dog fbM5MhIve5s_1 dog fbiXTCkCkqY_0 skateboard fbmZZXaRkak_5 horse fbmZZXaRkak_6 horse fbmnWcE_64U_0 skateboard fbsyvHQPZZk_1 dog fb3Iq9yQ1VY_0 person fb3WxEfe8l8_0 motorcycle fcCb2W4HMLk_0 person fcD6n99azfw_0 person fcGNPf6n7Ws_0 bear fcWegrm8wCE_0 person fcbcnvGoWLs_0 car fchtQi7-OD4_0 horse fclxNO1L-rY_0 cow fcpGNeDgpDI_0 person fc1qNL5u2wg_0 person fdCTLMd6wEY_0 cat fdQaoSZKA_s_0 person fdRULl8YSnU_0 cow fdYvCuft5zQ_4 elephant fdYvCuft5zQ_5 elephant fdYvCuft5zQ_1 elephant fdYvCuft5zQ_2 elephant fdZBeWyKON0_0 person fdbvWvUoFW8_1 bird fdbvWvUoFW8_2 bird fdbvWvUoFW8_3 bird fdkrZ9uL854_0 person fdlDkbbDniw_1 elephant fdmV18YEDKM_0 cat fdnBDcIwPBA_0 person fd3ea86gmJI_0 motorcycle fd3ea86gmJI_1 motorcycle fd8Ba2cZgxI_2 bear feAexE1IYq8_0 person fePU3BlF4Zc_0 person fePU3BlF4Zc_1 person feQX_1dqh9g_9 bicycle feQX_1dqh9g_1 bicycle feQX_1dqh9g_3 bicycle feZfxIunWHo_0 person feZoXB7I6wE_0 person fedmeW-WImw_0 train fegJtwcNo5c_0 bicycle feh4XVzjQdI_0 cat felt48AIbIs_1 person fenYF-k-y4c_0 skateboard feqLG8n4nDE_1 person fe05wKXl2cI_0 person fe05wKXl2cI_1 skateboard fe5_49oxMwc_0 person ffIQZZ_P3ck_0 cat ffOeGlw8_C8_1 cow ffZoY75S_-k_1 bird ffZoY75S_-k_0 bird ffbSaNikNF4_1 elephant ffeYBfcgF3s_0 person fftSD6UfvEA_1 person ffttXyArNGc_1 knife ffvXiSjPp6c_0 horse ffwk_8ycQiA_0 person ff1PHzfARZk_0 person ff5MH6QQuJk_6 knife ff5MH6QQuJk_2 knife ff5SaJnQg5M_0 person fgEpQHGYIjc_0 person fgFy8l-b1iI_0 motorcycle fgJJxPEHVZQ_0 person fgPShysxuQM_0 cat fgQE-9shdmQ_0 elephant fgUjCKe_e_Y_0 person fgWtwTKCtMQ_0 person fgfizI4AnVs_0 person fggT4HM2Uy4_0 person fgsaC375d38_1 bird fgvUj1mCqio_0 train fg1ISXcyb10_1 dog fg5mCaScLE4_10 umbrella fg5mCaScLE4_0 umbrella fg5mCaScLE4_3 umbrella fg5mCaScLE4_4 umbrella fg5mCaScLE4_6 umbrella fg5mCaScLE4_7 umbrella fhHLCLuQAdE_0 bird fhHLCLuQAdE_3 bird fhHLCLuQAdE_4 bird fhHLCLuQAdE_1 bird fhHLCLuQAdE_2 bird fhQN_vhNmgo_0 cow fhan95LbdqQ_1 knife fhmsHcZfBC4_0 person fhutr5rLQN0_0 person fh5lB6U-7Wk_0 person fiGa0nIEYbw_0 person fiKecNhAgFU_0 motorcycle fiS0pY80kkU_0 dog fiWtkuDUFvM_0 elephant fiZAhg2twZs_0 person figjWJDEn1c_0 person fijO0rB1rfY_0 airplane finRU64JVRU_1 bus fi2s2k_aamk_0 person fi46OpYa89I_3 bicycle fi46OpYa89I_10 bicycle fi46OpYa89I_2 bicycle fi6gdEVUAUc_0 cat fi8YGUm_6x0_0 person fi9GleMDHIc_0 person fjF31Mh-tNQ_0 person fjKXALm76kI_0 bus fjXufPzimEQ_0 person fjZ4J-BZX2U_0 person fjaHYcaE7-w_0 person fjaHYcaE7-w_1 person fjnR81fSTeI_0 umbrella fjnxqBnMZzs_0 person fjtn0lRVX_4_0 truck fjwgdNBSCFc_0 person fjwgdNBSCFc_1 person fj29rB34ea8_0 person fkERi_ma2UE_0 person fkERi_ma2UE_1 person fkHiDyuUaWA_0 person fkIfLHGu_CQ_0 person fkQEEtG6Tbg_0 person fkSf5a3q6oY_0 boat fkSf5a3q6oY_3 boat fkUDB0V3UXc_0 horse fkUDB0V3UXc_1 horse fkVSILZPyXg_0 bear fkaKyYrWPpQ_0 person fkfnbZ2MSXk_4 bicycle fkfnbZ2MSXk_0 bicycle fkfnbZ2MSXk_6 bicycle fkx0e2gvPYA_0 truck fkyM4LNUCck_0 person fk0v7vZDpgU_0 person fk10mtIF_Hs_0 horse fk8yMMO1gRA_0 person fk8yMMO1gRA_1 person flADy--Uwx8_0 truck flERyzHjhzQ_0 skateboard flMijcdhRAU_0 person flgTyT4DB7E_0 bear flgaLcoSjb4_0 bear fluEronPyZk_0 cow fl6-NRwVy10_0 person fl7Q9yxFoOs_2 person fl95IAyDN-s_0 skateboard fmERtylbqN4_0 person fmGJj0qYc6g_1 person fmGJj0qYc6g_2 person fmLKgz4DQhQ_0 airplane fmL66yeOiI8_0 person fmRfUvIIvT8_0 person fmYELQL9Cs0_0 bus fmbEAdugI3Q_0 person fmbb6SQ6qiI_0 person fmbb6SQ6qiI_1 person fmbu89zGN4Y_0 person fmdem4Z9BHI_0 bird fmfg5yyhjkA_1 person fmiq_EhaURY_1 person fmiq_EhaURY_0 person fmtIa6nxUd4_0 train fmuzrZHZYis_0 skateboard fmwC1khd3BU_2 person fm3zFVlJw4k_1 person fm-ScTLdSL8_1 bus fm_bcsJYhu4_0 dog fnAGderLxPg_0 elephant fnAGderLxPg_3 elephant fnDP4B5jpSY_0 person fnFMQ2VFlEc_0 person fnOL3ZL61u0_0 person fnOkwsmzdaI_0 horse fnRq5X91IV0_0 person fnZR6FD_eZ8_0 boat fnZR6FD_eZ8_1 boat fnbSgwO8v0c_1 boat fnbsAmTQJOs_0 bicycle fnbsAmTQJOs_1 bicycle fniJ36z0_Pc_0 cow fnj1YtAaztU_0 person fnkHdQf9H3w_0 knife fnmuFbydHek_0 person fnpjkwiPkSY_0 skateboard fntRlkYDiD0_1 person fntZVzkwhz4_1 person fnvst-Sk4MU_0 umbrella fnvst-Sk4MU_1 umbrella fnz6gTPuInQ_0 dog fnz6gTPuInQ_1 dog foAoOCF4rE4_0 car foI1jEbg9uA_0 train foJs0wXX1O8_0 truck foaFgrzsPOY_0 person fobJTCY7ifQ_0 bus fodsoLtLzqI_1 cat fojRgMUsu3c_0 person G_RgJ0t0Cbo_0 person G_aU-_2ZiSw_0 dog G_lOQAV6xWs_0 cat G_poofS7HD0_1 person G_poofS7HD0_0 person G__VTazZtp0_0 elephant HARRnedV05U_0 car HAVUursfTOI_1 zebra HAtu6frOH1k_0 person HA1TDbNot8E_0 person HA-iE7bcfT0_0 car HA-iE7bcfT0_1 car HBI13CpuAmI_0 knife HBLJbCs1mSg_0 truck HBMah_r3E1g_0 person HBOqQBe7rhE_0 person HBO6G57uhXA_0 person HBY4_6b_sRY_0 cat HBiSuZWtb4E_0 boat HBmaJJ0nTAo_0 person HBwjWdXrpPA_0 dog HBzYVphfmRQ_0 person HCA4jkg9HTY_1 person HCA4jkg9HTY_0 person HCEjNJewxbw_0 person HCJ1EYfF8qg_0 elephant HCKZ7kihdaM_2 airplane HCMBgpQ2z18_0 cow HCSbzHGXxmA_0 cat HCczjWUmlW0_1 truck HCczjWUmlW0_0 truck HCg0k7LnfkY_1 cow HCg0k7LnfkY_0 cow HCiRQdh20qg_0 dog HCm-B3JjzhY_0 cow HCpxRBja8lE_0 person HCp6gYC9NFE_0 cow HC72_Yrigik_0 person HDN4DqO_KLg_0 dog HDQEWwETuU4_0 person HDRKiYaoEnA_0 person HDSw0KM8cSs_0 person HDkI156rPRA_0 person HDmK6y86kYM_0 person HDmK6y86kYM_1 person HDnYEdh7xG8_0 person HDqUvaFm_R0_0 skateboard HDr5if6Mb_4_0 person HDziFGwpXmg_1 car HDziFGwpXmg_2 car HDziFGwpXmg_3 car HDziFGwpXmg_7 car HD1tKnKT1Dc_0 motorcycle HD7QKzuFNas_1 person HD7QKzuFNas_0 person HD_alEnCVhM_0 truck HD_alEnCVhM_1 truck HD_wYO2_O8k_0 person HD_4ZJr68p8_1 horse HEIjtOJze90_0 person HEfIJ3wMKRI_1 person HEmv-biWoEA_0 airplane HErkHysJd-M_0 person HEr_leMW1zE_0 bear HEr_leMW1zE_3 bear HEr_leMW1zE_1 bear HEyY4zEX-no_0 person HE-4YEdBwuw_0 dog HE-4YEdBwuw_1 dog HFDK_y7kibQ_0 knife HFE9ujNILoA_0 cat HFQFlm1jWiE_0 person HFQFlm1jWiE_1 person HFRCZSouOn4_0 bird HFWQl2JJfic_2 person HFa18pRSsXU_0 train HFlanXHBGHg_0 person HFuw8C2bQ6g_0 person HF07qDRPgrw_0 horse HF1xhyTtWLk_0 motorcycle HF3Nn3KqXOk_0 person HF3Nn3KqXOk_1 person HF4PefI86r0_0 person HGFcsJmjWHs_0 elephant HGFcsJmjWHs_9 elephant HGFcsJmjWHs_4 elephant HGFcsJmjWHs_5 elephant HGFcsJmjWHs_7 elephant HGLC_YFRxPY_0 skateboard HGLLnmQiCU0_0 person HGLLnmQiCU0_2 person HGLLnmQiCU0_1 person HGLdrgf2e2c_0 person HGVNoha70iA_0 truck HGZDROOjAY4_1 person HGZDROOjAY4_0 person HGeCBN48g9o_0 person HGm4OftDlT8_2 horse HGnIxotAPOU_0 person HGnegc2CRTM_0 person HGvXva6SUvE_0 person HGw4URr4QUs_0 person HG1zQzSX2rU_0 person HG8oY2Ac4-M_0 person HG_JAnXBzJQ_0 skateboard HHGq5gd6w1g_0 skateboard HHPW65GVeoA_0 person HHRUnCEVnAo_0 cat HHc5mD1TxGQ_1 knife HHe9m9BOi3A_0 person HHgC0pkNiIA_0 person HHgC0pkNiIA_1 person HHi26rWtC38_0 person HHx5E8VfnkY_0 person HH0OILx6PKY_0 person HH1JApHMx2I_0 dog HH148v63a5o_0 person HH9wMNMJ2sE_0 elephant HIBd79qG-XQ_0 person HICJGOFvwoc_2 bird HIHX1rpDx_I_0 cat HIIQ917jPqg_0 train HIJGcmgyEcg_0 knife HIJGcmgyEcg_1 knife HIKyhRtWQ4c_2 horse HIK-Z8wXFug_0 person HISWMgqg80E_0 skateboard HITf8extnnk_0 person HIXuU8Z0N9o_1 motorcycle HIgiF2bkOys_0 person HIgiF2bkOys_1 person HIiu2EVu5H8_0 person HIqhXDkhHsc_0 person HIqr0-BB8Xo_1 knife HIrcAjP1fDs_2 bird HIz27dqnl20_0 bus HI3L38NCy0A_1 boat HI3L38NCy0A_0 boat HI_h7HfFDVw_0 boat HJGPBeom3y4_1 umbrella HJSiTzkFpHk_0 person HJVpMFJT2LU_0 person HJVpMFJT2LU_1 person HJg7wtoy2vk_0 person HJhZhn0zf1s_0 person HJi1L5HxuLo_0 skateboard HJi1L5HxuLo_1 skateboard HJi1L5HxuLo_2 skateboard HJq4kVvdeRg_1 skateboard HJrd3kpvjh0_0 person HJr5BOgO9XY_0 person HJ6BZjeSHTY_0 boat HKFJzdCsRfA_0 person HKGK0FLN9vA_2 zebra HKGK0FLN9vA_3 zebra HKIwynmyQp4_0 person HKWELXwIVvI_0 person HKqHmDjxF6Y_1 person HKsVn1IWaas_0 person HK28Vb__IfY_0 person HLAEqFEcR90_4 horse HLAEqFEcR90_0 horse HLAEqFEcR90_2 horse HLAEqFEcR90_3 horse HLBgSJD-3lg_0 bicycle HLL_j-CQKqQ_0 umbrella HLaiRkL4gFA_0 motorcycle HLhbGKVR4mE_3 dog HLy3UUDhaJY_4 giraffe HL06bx_HNg0_0 cat HL6dNcrAEoM_0 person HL8fh6O6iUA_1 train HL9F68y-0kY_0 horse HL9F68y-0kY_1 person HL9o2Vs9d8s_1 person HMF0KrAf0iI_0 person HMIGIwIcNq8_0 person HMJerOjZn4I_0 person HMQQrRvzwiM_0 boat HMUBbUP6Ko8_2 boat HMV7H81wz84_0 train HMb-pPTMZ5I_0 umbrella HMxMledcSVE_0 person HMyUpcpZGdM_1 bird HM4hJE0Db2Q_0 person HM4zY3uzwOQ_0 person HM7sD8YClkI_0 person HM_3ck6yooo_0 person HNGh3Rvn6Sw_2 knife HNGh3Rvn6Sw_3 knife HNRwM8zXMTM_0 person HNXQ_dkhX-Y_0 truck HNdRITK9TGE_0 person HNeVOXPyunw_2 person fo9SmkQa35Y_0 motorcycle fo9SmkQa35Y_1 motorcycle fpM1eiK3iok_0 truck fpNLFTgOciY_1 umbrella fpRq9BsaPzs_1 horse fpRq9BsaPzs_2 horse fpVZYKlsFsU_0 boat fpdUwZ8Gnd8_1 cow fpeYfCUzvDY_0 cat fpkxYBJDTtI_0 person fpkxYBJDTtI_1 person fpmtNez1u0o_0 bus fpnTZF4bvk8_0 person fpomSxrdTyE_0 person fpo2kf1idyo_0 person fpp_41AxRNI_5 giraffe fpp_41AxRNI_1 giraffe fpp_41AxRNI_4 giraffe fqQL3QPq-lo_0 train fqXvzEGxSak_0 bus fqcie5yyOxA_0 cat fqfHWT5hjkY_0 cat fqkVB4qZbgw_0 person fqlWb2OJg3Y_0 bus fqnioIm10xY_1 train fqpMhE5qOKk_1 person fqxGN6r9oIY_0 zebra fq5Zh2Lo9GQ_0 elephant fq959dAMasM_0 truck frFSlwby-0k_0 train frFrggXiJZY_1 person frItg4I9oEQ_0 person frItg4I9oEQ_1 person frJtciauQQw_0 person frRHj0FPzVQ_1 person frW5BpQ3-Fw_0 person frXxZevI11c_0 person frXxZevI11c_1 person frY6tIPR-Co_0 bicycle freW9Vk3GhU_1 person frfLZ70XIXI_1 dog frgCmAtYao4_1 boat frh4LMyWaQw_0 person frn-rfqmGVs_0 person frx5Uv7-1zw_0 person fr3S3gEtDS0_1 person fr616yExbeg_0 knife fsD7pYdfrpg_0 person fsE0DlVODpY_1 person fsFtKjirvM4_1 person fsFtKjirvM4_0 person fsOoFz6I_js_1 person fsOoFz6I_js_0 person fsVlTdh13Lk_0 person fsXVGaRpUNg_0 person fsd-DhcH5gE_0 person fsd-DhcH5gE_1 person fsh-wcyuPM0_0 person fs3oXXx75XA_0 person fs6L5bmf4pQ_1 person fs6Rgfl4CtI_0 boat fs6p-qaLswQ_0 cow fs7RdtNY3Ck_0 elephant fs9uDpde9ig_1 elephant ftG2YflDq_E_0 knife ftH3_awR5ZA_0 person ftIp5PyaGNc_1 knife ftNSK_rSs98_1 airplane ftSUBEOhdck_0 cat ftX9ErOmiAE_0 car ftX9ErOmiAE_1 car ftcnCvd4yeU_0 person ftlmGO0CnHk_0 truck fuHAM8D3ros_3 bicycle fuO2QMXiDMU_0 motorcycle fuPtCtdvowQ_0 person fuSxdcdxe70_1 person fuSxdcdxe70_0 person fuh4-mC5fvg_0 car fuklviv_MRE_0 truck funKReksXEQ_4 horse fur41mRCURs_0 cow futBuKCP9zw_0 umbrella fu5d7x7pORY_0 horse fu_f4n_bYPU_0 person fvAislzoQVU_0 person fvDUF-aukF4_0 person fvH1bolPY2U_0 person fvKg6ReEigA_14 bicycle fvKg6ReEigA_2 bicycle fvKg6ReEigA_3 bicycle fvKg6ReEigA_4 bicycle fvKg6ReEigA_5 bicycle fvKg6ReEigA_8 bicycle fvKg6ReEigA_11 bicycle fvKg6ReEigA_15 bicycle fvKg6ReEigA_16 bicycle fvKg6ReEigA_17 bicycle fvKg6ReEigA_19 bicycle fvLauezWx5g_1 skateboard fvLkNgA4N0k_1 person fvZYmQ6SJrQ_0 person fvcIpyJFuQA_0 person fvdoipKMj4g_0 person fvfb_kQCs-I_0 horse fvhVuqonUHg_0 person fvhVuqonUHg_1 person fvlGWjjirUQ_0 person fvqWMyJJqog_0 person fvqWMyJJqog_1 person fvtTggVCkFk_0 person fvzbC9c98ik_0 dog fv42-nzlEsY_0 train fv8F7gjL7Js_0 airplane fwCUjUa0cHQ_0 person fwG8C9CEISw_0 person fwLL8mlHf0I_0 bicycle fwL9zu2j3rk_0 person fwQMFtFdERs_0 horse fwQMFtFdERs_1 horse fwTB5tDP4cU_0 person fwT-VIjQCa8_0 person fwop4msktdA_0 cow fwv2gGVEi6g_0 person fwwOICMutXc_0 dog fxFzCD192K4_1 bird fxHZn2FXRGk_0 horse fxHZn2FXRGk_1 horse fxQYhMoNR9I_0 person fxQY5tnybxQ_0 skateboard fxWwYiT8yXk_0 person fxWyDyUmxuY_0 horse fxbNI1vTtq0_0 train fxbjh88g3Vw_0 person fxcDLsblNhs_1 bird fxdVSYuYJOE_0 person fxhuSOpUuGs_0 person fxr4HpTRNS0_0 dog fxxjK3mjCF0_1 person fxyg5GQk8H8_0 airplane fxyg5GQk8H8_2 airplane fxyg5GQk8H8_3 airplane fxyg5GQk8H8_4 airplane fx07mGL1WQY_1 train fx2_nahpAfE_0 person fx4HT1nuEg4_1 person fx4HT1nuEg4_0 person fx9TwmuIYCY_0 skateboard fx9fckiExps_0 person fx_zN3FWeJ0_1 bus fx_zN3FWeJ0_3 bus fx_zN3FWeJ0_0 bus fyE4_usnxHc_0 person fyE4_usnxHc_2 person fyOZZ_u9Jm0_0 person fyOxr6iISdI_0 elephant fyRO8_b4wJU_0 person fyTzI2wuC0M_0 person fybHaZZmAzE_1 train fydZoAN9JpI_0 person fydZoAN9JpI_1 person fydZoAN9JpI_3 person fyhSoeveW3I_0 train fyyLjISjzvM_0 person fyztN8okJkU_0 person fyztN8okJkU_1 person fy5GdRFHsLs_0 cat fzFR54WdDEU_0 person fzV_Z79golE_1 truck fzaNjkWQtW0_1 skateboard fze3woUbt0w_0 dog fzh-lO5lQhQ_1 bird fzoZsW3AMTU_0 bird fzp3cT3c5Wg_0 person fzp3cT3c5Wg_1 person fzp3cT3c5Wg_2 person fzqX7N7ICQw_1 person fzqX7N7ICQw_0 person fzrGdIi_J9k_0 person fzr9mWLJM6E_1 person fzr9mWLJM6E_0 person fzvrWQX908c_0 person fz1PTzziIcg_0 person fz1kPSLo_p8_1 train fz8emqnbleQ_1 boat f0BJ56Dn3D0_0 cat f0E5mPnVSSU_1 person f0JOvKbLwTQ_0 person f0LbneUbWUk_0 cow f0TYLMAZLpA_0 person f0XZTHcpmZY_4 elephant f0XZTHcpmZY_2 elephant f0XpDJO5Tw0_0 person f0XpDJO5Tw0_1 person f0Z8cmobjWs_0 truck f0Z8cmobjWs_4 truck f0Z8cmobjWs_7 truck f0Z8cmobjWs_8 truck f0mYYISWwxo_1 person f0mYYISWwxo_0 person f0o0SmB2JAE_1 cow f0o0SmB2JAE_0 cow f03_N__tWuI_2 elephant f1ASjw4-yL8_0 person f1Da4qa1SIw_1 person f1EKnOQEf5g_0 boat f1GkfW2mOlE_0 person f1G2DlbJqyI_0 person f1HKyLr8nL0_0 person f1JCS5F-LuU_0 person f1KEvGLqqwI_1 umbrella f1O6FYMq5zk_0 person f1XB0uA4Dvo_0 bus f1Z1HedJzos_0 skateboard f1fEuZwBkDQ_0 person f1nxCdtYwdQ_0 horse f1sTzp9ahWM_1 person f1sTzp9ahWM_0 person f1uaPSveXCI_0 person f2ADBeQ0Vys_0 person f2ADBeQ0Vys_1 person f2EbBSZ8osI_0 zebra f2EbBSZ8osI_1 zebra f2HKs4L6fwE_0 person f2HKs4L6fwE_2 person f2HKs4L6fwE_1 person f2MDAAk-Euo_1 person f2ULSb7lIAo_0 cow f2ULSb7lIAo_1 cow f2ULSb7lIAo_3 cow f2hfKAL0ZoA_0 umbrella f2hfKAL0ZoA_4 umbrella f2hhMTSObNY_0 skateboard f2p2YcmHn8c_1 bicycle f2s4nNZ_qew_0 boat f2ypHkP1WUg_0 person f3EOdxK13SU_0 giraffe f3HU85Jx7m0_0 cow f3JkzQkcdVM_0 horse f3Kxw7yBcW0_2 person f3Kxw7yBcW0_1 person f3Np8rGlxOE_1 person f3VJKfFdBW0_1 truck f3aufQBTMME_0 boat f3bk60UZpqE_0 truck f3bk60UZpqE_5 truck f3bk60UZpqE_9 truck f3kQ_6EG8cM_0 person f3spBT1AGyw_0 person f31ePv3WlNc_0 person f33OpHIFMWA_1 elephant f33OpHIFMWA_3 elephant f33OpHIFMWA_0 elephant f33OpHIFMWA_2 elephant f35syqOsqSo_0 boat f38P7AlhP5g_0 person f39rc-7_QQc_0 person HNr7Ed0_pQY_1 bus HNtUUtLCSDY_0 giraffe HNtojLNWnKQ_0 person HN6XGq0aRx4_0 person HN84N_vu_hw_1 person HOAbQ4r1tzM_1 knife HOA47mRJ9B8_0 person HOOwNsMTi9g_0 person HOSMm-4fUVM_0 dog HOZcbA0OPF0_0 person HOkS1ljUX4s_0 bear HOmzECHFah4_0 dog HOxzSXuj0O0_0 elephant HOxzSXuj0O0_3 elephant HO6yeFgs7Hs_1 bicycle HO7Uf5Enr1U_1 person HPAa3KI1Z30_1 dog HPDws9wJu40_0 train HPIdRNu7STU_0 dog HPIxVE3OLG4_0 person HPPTr0Mpe0A_0 bicycle HPRp9F-4ts4_0 dog HPSJZXcOiEc_0 person HPjcp8hS6vs_0 person HPjcp8hS6vs_1 person HP0RUfuvfx4_0 person HP4O8FbEpEg_0 bus HP6ROW7ahtU_0 person HP6YRIGqiI4_0 horse HP62suxiDNw_0 bicycle HP62suxiDNw_2 bicycle HP62suxiDNw_3 bicycle HP62suxiDNw_1 bicycle HP9u4FmRvbw_1 bear HQBhagraDwo_0 cat HQIxUlu7xSY_0 person HQKVBNWD_ls_0 person HQM9aDN7Tf0_0 person HQZVUknJ0lw_1 person HQZVUknJ0lw_0 person HQePQ1mfzKw_0 person HQePQ1mfzKw_1 person HQhnj0h9OyA_0 person HQhnj0h9OyA_1 person HQjXFK_0sFo_0 person HQxihmm6sSs_0 person HQz_At1F0Yk_2 bicycle HQ4ZWia0f1E_2 cow HQ9gmrJ6Bm4_3 airplane HQ9gmrJ6Bm4_4 airplane HQ9gmrJ6Bm4_5 airplane HQ9gmrJ6Bm4_1 airplane HRCOvhALHv0_0 train HRUX75Ve2aQ_0 person HRVMd5SmF8Y_0 umbrella HRl1VhUfhok_0 person HR1wffFOaEw_0 elephant HR4ExP8Ompc_0 horse HSKpu2UmvBo_0 person HSKpu2UmvBo_1 person HSN6tO3rh-c_0 person HSVWpwFagLg_1 person HSdyrMzM64w_0 cow HS3WVWEFHm8_1 person HS3WVWEFHm8_0 person HTAnAeW5Bhs_0 bird HTS20hgMcFQ_0 bicycle HTTz78R4i0c_0 person HTehrgCQAPo_0 person HTgldgqci04_0 person HUFGafskCjw_0 person HULASsoz03U_0 person HULASsoz03U_1 person HUPxNiCgjn0_0 knife HUfwe7j7IBE_0 person HUgX2V1AkVw_0 person HUiMyxUEC_A_0 person HUv2tT_n5Bo_0 person HUy4cHFX-04_0 person HUz7znJTRNg_1 umbrella HU_HuNQ4TDw_0 cow HU_HuNQ4TDw_1 cow HU_HuNQ4TDw_2 cow HVEmUm86PBo_0 motorcycle HVI1w93kCfo_0 person HVOWKezX_bo_0 horse HVOWKezX_bo_2 horse HVYf36PFglw_0 dog HVY9hWgMujc_1 truck HVeqzrLyVtk_0 person HVkFV2q27S0_1 person HVkQkPaQbrw_0 person HWAW-J3ZpIs_0 cow HWA45moBwMo_0 horse HWEI24n2tHY_0 person HWItJuo6DSM_0 bus HWXgDvYdlHE_1 person HWZSmtWVH54_0 person HWZenKFJqkY_0 person HWZenKFJqkY_1 person HWZenKFJqkY_2 person HWfpkRSnZp8_0 train HWfpkRSnZp8_2 train HWjaeLf99dU_0 bear HWr9Kqi0B2A_0 person HWsTMfZok5E_0 person HWtKIjJacjk_0 person HWtyII4CMWg_0 car HWtyII4CMWg_3 car HW7FTNqTKhs_0 train HW7yQK_j65g_0 horse HXARJhNURSs_0 person HXH_F5SX6FU_0 truck HXH_F5SX6FU_3 truck HXH_F5SX6FU_1 truck HXKnqbEGfVw_0 bird HXKnqbEGfVw_6 bird HXKnqbEGfVw_1 bird HXKnqbEGfVw_2 bird HXKnqbEGfVw_3 bird HXLA3nbxgh4_0 person HXWoqdza4oA_0 dog HXaAJtjX1mE_0 bicycle HXaAJtjX1mE_2 bicycle HXaAJtjX1mE_1 bicycle HXa-0NlFTP4_0 person HXcSrTLsF9c_0 train HXhYYfE4uN8_0 person HXvgiezvrYI_0 truck HXx4tRTfGRM_1 dog HX0kjr3XYHI_1 bear HX7P1ipPByA_0 dog HX-gTvdUaOE_2 motorcycle HYLAdzbqvC0_0 person HYWEWmMMrsU_0 cat HYW3dAv02gE_0 cow HYW6VucwAEg_0 person HYXFGMzivds_10 truck HYbuNzqXmyY_0 person HYiN6skKjfY_0 knife HYoonHvZXCc_0 motorcycle HY1aAYxxlQo_0 person HZC5bba_V4Y_0 knife HZJ-JQkt590_1 bicycle HZKExvpKLQ8_1 person HZLdGfto2mI_0 car HZSPPN3TMx8_0 bird HZZadt4SIl0_0 dog HZceU_BV2GM_0 person HZceU_BV2GM_1 person HZceU_BV2GM_2 person HZd4rCCsNMs_0 skateboard HZd4rCCsNMs_1 skateboard HZd4rCCsNMs_2 skateboard HZkmrVeoUV4_0 person HZscUISrdww_0 person HZ-tGW__JOI_0 cat HaE1N8Q1b7s_1 train HaMpIMApSi8_0 person HaO3z-4gcBs_2 train HaRliuOtm7s_1 person HaiLotzzEXk_1 elephant HaiLotzzEXk_2 elephant HaiLotzzEXk_0 elephant HarW34izH-M_1 person HauA239AM7I_0 dog HavxbX8tng0_0 person HayoEz1x5Ks_0 person HayoEz1x5Ks_1 person Hay4Nx9S5-k_4 bicycle Hay4Nx9S5-k_1 bicycle Ha8XGRvxQxs_0 person Ha_OuYxLXIs_0 person Ha_w-xJsHAY_0 zebra HbBCtCXKIEE_0 person HbH7DpR0WUw_0 person HbJufGCjdSE_1 person HbKh31cncOI_0 bird HbLoxqqdYsQ_0 cow HbQu1mfGg4c_2 elephant HbQu1mfGg4c_3 elephant HbQu1mfGg4c_0 elephant HbQu1mfGg4c_1 elephant HbcyjRGbMBY_0 dog HbcyjRGbMBY_1 dog HbhmBauZqxE_0 horse Hbq35QImz2w_0 person Hbq35QImz2w_1 person HbuCy2fsJk8_2 knife HbyKQdGpxhA_0 boat Hb3INTcuOVk_0 person Hb5zCzD4J_E_1 train Hb5zCzD4J_E_2 train Hb5zCzD4J_E_3 train Hb5zCzD4J_E_6 train Hb5zCzD4J_E_7 train Hb5zCzD4J_E_8 train Hb5zCzD4J_E_11 train HcBQXS22BDs_0 person HcJTaK6Q9P8_0 person HcXN4Pwnaeg_0 horse Hcfxwdbwk8c_0 person Hchet3FQwII_0 person HcxL3_INS_0_0 person Hc5ZM6UWTbY_0 person f4OI46BYh08_0 skateboard f4Oj9uMeFdI_0 elephant f4PgAt4YpfE_0 cat f4P-R7h_gTU_0 person f4QyVWC6yrw_0 person f4XkIcezAd8_0 person f4XkIcezAd8_1 person f4Y2tjwOV2k_0 cow f4Y2tjwOV2k_1 cow f4bys9o_Z2M_2 bird f4s0cImpNBM_1 cow f4xLPprxm30_4 knife f49BXPlU-iI_0 knife f4_Mfc9Ccg8_0 truck f5BIXG_nLok_0 bus f5HsrI3Codk_0 bird f5J7yrE24eY_0 person f5LuupUslCU_0 person f5Q2iD7VUx8_0 skateboard f5W37dv91tU_0 person f5apNjAecEc_0 person f5bVoAXze0Q_0 motorcycle f5d1IXK1Tz0_0 bird f5rzpIRd4wA_0 train f5wHsLucnf8_0 person f5zEWaDr1jg_0 cat f50eMXA_-bM_1 person f50eMXA_-bM_0 person f53Jmsa7Jkc_0 person f6AcbdJ77A4_1 train f6E2ODGGF28_1 person f6Px5vjTeRI_1 elephant f6Px5vjTeRI_0 elephant f6Px5vjTeRI_3 elephant f6Px5vjTeRI_5 elephant f6UBVcEIt3I_1 person f6cXiuO-MvQ_1 truck f6dVANLzPTY_0 person f6dVANLzPTY_1 person f6o6ukW_Qog_2 bear f65c6sEDtkE_0 person f7A6AOC8fOg_0 person f7A6AOC8fOg_1 person f7ExsvPto-E_1 motorcycle f7Fs7-jGglk_1 bear f7GJgMh9xt4_0 person f7WvltLziTI_0 boat f7cI-B4pJso_0 cow f7kLnCuNTQo_0 cow f7lmZQGcfBA_3 elephant f7lmZQGcfBA_4 elephant f7lmZQGcfBA_0 elephant f7lmZQGcfBA_1 elephant f7oBEoL94vw_0 person f7pnt1rB9kI_0 person f7x074oihas_0 person f73BEqi2_DM_0 person f7-S_iQAyKU_0 car f7-htlH5qd4_0 bird f7-htlH5qd4_2 bird f8A1o9Nbs64_0 skateboard f8BXIJnggCI_1 boat f8BXIJnggCI_3 boat f8BXIJnggCI_4 boat f8Dp8Yvyr_0_0 person f8PVrlhAIV4_0 person f8T4DHNu6MY_1 truck f8ZxXHSqC_8_0 boat f8cW6kw6240_0 person f8mzzGhPBaw_1 car f8q3fKwf5PY_0 knife f8yFyIwDCQ4_4 giraffe f8zLCa1oGOE_0 horse f8z83D9vGPo_2 knife f80hjE6vabs_0 person f80hjE6vabs_1 person f84ypk41ULc_0 elephant f9H0LrBLc9Y_0 person f9H0LrBLc9Y_1 person f9H0LrBLc9Y_3 person f9H1bUagACA_0 horse f9H6UaPUITk_0 cat f9LOlCLfsJs_0 person f9N4Jxt-kUs_1 knife f9N4Jxt-kUs_2 knife f9TCFTluRIc_1 bus f9e12AC1jXM_0 bear f9oWC3kSP1M_0 motorcycle f9ovukmKaq4_1 person f9sPt8HIN0w_1 skateboard f9sj-0ZFV6E_0 person f9sj-0ZFV6E_1 person f9v2ONFCiwQ_0 person f91XzUXz11U_0 person f96d9EwxAB4_0 person f9-IyW9tVLY_0 person f-FxqFk0TdM_0 person f-JXaNm7TBw_0 person f-J7SQBHRN4_0 truck f-Yei4idfG8_0 airplane f-dhfS-geuI_1 elephant f-dhfS-geuI_2 elephant f-h9L-PN1ZM_1 bird f-iLJUDdrD8_0 person f-niuVrgiIc_1 person f-rp_CghH-E_0 skateboard f-s-4lM4qPA_0 truck f-w51BH60RQ_0 person f-1WVe76te0_0 cow f-4EyKUawVo_0 bear f-7ZEGsCz9U_0 person f_GKi-DGmzM_0 person f_Gf2hpt7y4_0 giraffe f_GudF8uST0_0 person f_NsA6enCZE_0 person f_OOyDOAAOU_7 elephant f_QhMhkyUSY_3 truck f_QhMhkyUSY_1 truck f_QhMhkyUSY_4 truck f_Us8TvJMUQ_0 person f_VwDCt9HTc_0 dog f_WQIaZ5PjY_0 boat f_bXOtZjzfo_0 person f_b0IaRqtbs_0 person f_jLGz53IpQ_0 person f_jLGz53IpQ_1 person f_mo54sXCc8_1 person f_mo54sXCc8_0 person f_rC1JIAMBU_0 person f_wk-NOqceY_0 horse f_yMF9tkk70_1 car f_yvJuTzFHc_0 motorcycle f_yvJuTzFHc_2 motorcycle f_2I0S-EYu8_0 dog f_3x9qJXCjA_0 person f_49EFLQ02I_0 person f_8S2hHC2rc_0 bicycle f__fXHkVh5E_1 cow gAKFUl9e_kg_0 person gAQ92hISW6g_0 person gARNWQDyaYM_0 boat gAYbqApcfGs_0 person gAdIZN7_0SM_1 airplane gAeHmfC6t5s_0 cat gAetQXcftXM_2 dog gAnOylz1kDY_0 person gAnmF0EFcB4_2 elephant gAorjWC_59o_0 cat gAo9Rsd6xwg_0 cow gA2FDYNulg8_1 person gA22uEcTAuY_1 dog gA84cp5Keqk_0 horse gA_a2Ajm7B8_1 horse gBFsvbfVaLg_0 person gBJgWZcXu9o_0 person gBK7NwUcSoY_1 person gBOpan7nm6M_0 horse gBOpan7nm6M_1 horse gBPipHCII3M_0 bus gBRc8zqsL78_0 dog gBUOzZPs_o4_2 person gBUOzZPs_o4_0 person gBYqrtFnN_Y_0 person gBYqrtFnN_Y_2 person gBeaBC0u9cQ_0 person gBhKhiEJUCM_0 horse gBiq_BH15FM_0 dog gBoebgAjbVw_0 person gBoebgAjbVw_1 person gBs3hPLJTGs_1 horse gBwCej92lKg_1 person gB0wConR2VI_1 skateboard gB2QHXkiiHs_2 elephant gCDBnQV_G3c_0 cat gCDBnQV_G3c_1 cat gCGtBmntCiI_1 motorcycle gCHegjuq0os_0 person gCHegjuq0os_1 person gCI1E3Hezdo_2 cow gCI1E3Hezdo_1 cow gCTp3CdMHCo_0 person gCT0VAdPm98_0 cat gCuOoA6aZ5U_0 cat gC7K3OeQFHo_3 bird gC7XtkA9y_Y_0 dog gC-xUbdM-tU_0 person gC-xUbdM-tU_1 person gDAPPFBC9Gw_0 train gDEpD9ek-O8_0 skateboard gDGLrPPl_PU_0 cat gDMsKJ61KPo_1 skateboard gDOGAHsBM_o_0 person gDTs0BOj8Fw_0 cat gDU0hHsqtbU_3 knife gDU0hHsqtbU_5 knife gDU0hHsqtbU_0 knife gDVGs8wTXCQ_0 cat gDkDXOm8z5Q_1 cow gDkDXOm8z5Q_0 cow gDk-zDBsv7g_0 dog gDnSIxaiPzk_0 person gDn3-DCSgNg_0 train gDsBFuJE6D8_2 dog gDvOoWXI3yg_0 person gD2GATPADlA_0 person gD5_x_Bz1z4_0 person gD5_x_Bz1z4_1 person gED4_ImWufA_0 truck gEE_GCrAqF0_0 person gEJi9Jawk2A_0 person gEOxDCDD97k_1 horse gESEn7ZZELM_0 person gESEn7ZZELM_1 person gEai3uMvvFg_0 airplane gEai3uMvvFg_3 airplane gEai3uMvvFg_4 airplane gEhLmQnM720_0 car gEu4mV0DWRQ_0 person gE0ZQD1rCy8_0 person gE0mBxOEwRI_1 skateboard gE0mBxOEwRI_3 skateboard gE8ErAnVuzY_0 bird gE8ErAnVuzY_2 bird gE-GVN9ErhI_0 person gFEnoylVci0_0 person gFac0jUOjCE_0 horse gFcIMdm4qtI_0 train gFdHQTLSmnc_0 airplane gFfVZSPVYmY_0 person gFiSl9m-w0k_0 person HdBc9ySq76E_1 bird HdCyMGZFJhM_0 person HdFYXjdN5_8_0 person HdO2lmXvENQ_0 horse HdR6VoZEwAU_0 cat HdSXU0fhHbM_0 person HdT_9pXdxuc_1 person HdbZzqJGLo8_1 cow HdcXcqUlgI4_0 skateboard HdhKF0UWx4g_0 person Hdh3nOzwVW8_0 person HdjbDB8UvCY_0 person Hdo3_NQiVKw_0 knife Hd85XlwoOMc_0 person Hd-wT5OTZDE_0 person Hd-wT5OTZDE_1 person HeIrGQnIMOE_0 dog HeLNz5XJe08_0 person HeTGT7JfvB0_0 person HeUD1Hrzswg_0 bird HeYNsU-PKJs_0 cow HeYNsU-PKJs_1 cow HedUVNznPK0_0 car HedUVNznPK0_1 car HeoyKd78htI_0 person HeoyKd78htI_1 person HewdFRJAXH4_0 person He08dewEgbY_3 motorcycle He08dewEgbY_0 motorcycle He1OQxCPk_w_0 person He5cucK-e48_0 person He6bAMDkCss_0 elephant HfDHvE46LYU_1 bird HfDzCPRQ2nw_1 elephant HfEXlJ0dOhU_0 person HfEZYvYqq_Y_0 cow HfHNi93ZHoo_3 cow HfHNi93ZHoo_1 cow HfOcLeLWchM_0 person HfZ871F0xSo_0 cat Hfnnbr4CeTg_3 bus HfqI5BIpp0s_0 person Hfq3_YJ7BpY_0 motorcycle Hfq9JFmquE4_0 person HfvJc2dxUR4_0 boat Hf1Iyyz2DMY_0 person Hf1Iyyz2DMY_1 person Hf8JWsbSYYk_0 person Hf8-8h45g-g_1 elephant Hf8-8h45g-g_0 elephant Hf8-8h45g-g_2 elephant HgDimNCaxF0_1 bear HgFCKM4ndEc_0 car HgMYuCtsOwc_0 person HgMYuCtsOwc_1 person HgO57Npp9Yg_0 train HgexaoNeZJk_0 person HgiYmNrxUzg_1 person HgkeptGXNt4_0 motorcycle HglF9x-ORXU_0 person Hgr5__oevds_0 person Hg2vqnLAc8I_0 dog Hg4DJ-x85Dw_1 elephant Hg-R_RMIEN8_0 airplane HhASNiFpJlw_0 truck HhF6cAtp7Xs_0 knife HhGGJNmwWHk_0 person HhVSLU0A-wk_0 car HhcMy4KZ9mY_0 skateboard HhfSUB2LOTU_0 person HhiUVwHWmwM_1 person HhiUVwHWmwM_2 person HhiUVwHWmwM_0 person HhjGAeK-XWg_0 person HhoRf1Ovlf8_0 person Hhvq-cwBJgo_0 person Hhwzl9x_m34_3 cow HhxV27YhiqI_0 skateboard Hh1xD0M0N8Q_0 person Hh6x850teNQ_5 airplane Hh6x850teNQ_7 airplane Hh6x850teNQ_8 airplane Hh6x850teNQ_9 airplane Hh6x850teNQ_10 airplane HiBUWbOyqcQ_0 person HiGZ2EdJh2o_0 person HiMItbtVHcY_0 cat HiMItbtVHcY_1 cat HiNt0G1AIO4_0 motorcycle HiTE5nqzjBw_0 zebra HiUz61ffgHA_0 person HiZDjdREbmc_0 umbrella Him7gJ7sArU_0 person Him7gJ7sArU_1 person HinGUsliCKc_0 truck HirBTVnhNls_0 cow Hi4ITByGP0Q_0 person Hi4mzrYdRBQ_0 horse Hi4mzrYdRBQ_2 horse Hi4mzrYdRBQ_3 horse Hi8Ey0o5mCQ_1 person Hi-7ZtG_JWI_1 person Hi_YHp3Jz48_0 cow HjAtN_MbguE_0 person HjLLTWwaCB8_0 horse HjNfykX021M_0 person HjNfykX021M_1 person HjgdNiVfO9M_0 skateboard HjlX9nu9Vf4_0 person Hjo13y8dFy4_0 motorcycle Hjt_y0CW-dY_0 person Hjt_y0CW-dY_1 person Hjxd2cno65M_0 skateboard Hj0J8FVxBjg_2 person Hj0J8FVxBjg_0 person Hj0J8FVxBjg_1 person HkApyQz8MTY_1 horse HkQ4tzUFCUU_0 truck HkW_wLkAKpg_0 person Hke6h3Sv5bA_1 bicycle HkzYNIDq0q4_0 train Hk45sdCRh9g_1 bear HlEkgK08UfY_1 person HlTQbPXnzu8_0 dog HlWsih27OmA_0 bird HlaPVZM-53c_0 person HlfpirtC6oQ_0 person HlmuHGoCGAI_0 cow HltyUzvtugM_1 bicycle HlurUBv4bh0_1 giraffe HlurUBv4bh0_3 giraffe HlurUBv4bh0_4 giraffe HlwSaYwFLRE_0 horse Hl3qik9GRX4_0 person Hl5MXwWiXWM_0 person HmDDLtJcD5g_0 person HmORePbYJkk_0 skateboard HmPvsdwo_fY_0 dog HmRm2phIiGo_1 bird HmY8zwmIiac_0 cow HmaGylwEFxw_0 person HmbTCfB3Vkg_0 person Hmk4dZnPtRY_0 bus Hmn3xf-zqWI_0 person HmqV_7hAxdw_0 person Hmr0jbygomI_0 giraffe HmwxDK0zo6U_0 person Hmyj1zKgToA_0 person Hm0kxS31F_U_0 person Hm0kxS31F_U_1 person HnNJeASG0-M_3 person HnNJeASG0-M_4 person HnNJeASG0-M_2 person HnNzkYDhWks_1 person HnNzkYDhWks_2 person HnNzkYDhWks_0 person HnP7iXcgg8g_0 truck HnSHJ_iCdi4_3 truck HnSHJ_iCdi4_1 truck HnUrGKpAsOk_0 cat HnbNOJpzYPE_0 person HnjhdtM8qSI_0 skateboard HnptRKjBUF0_2 boat HnwYRWj3fk4_2 knife HnxaJbaAiUI_0 person HoH5exlgIxk_1 skateboard HoLifxKZUpI_0 person HoLifxKZUpI_2 person HoLifxKZUpI_1 person HoNs_4V1pNs_1 bear HoNs_4V1pNs_4 bear HoP_nMgAxAk_0 boat HoeeRkyNozc_0 cow Hon64st5_6g_0 train Ho2ixBE8dzE_0 giraffe Ho5TcUOlb3Q_0 motorcycle Ho5o7aBqNAc_0 person Ho6N0OgD-1M_0 person HpBBda_pbf8_0 motorcycle HpGr16tW9dk_1 person HpQ90KkREGo_0 person HpUPD5_WMYI_0 train HpZ3IzUfsGg_3 bus HpbQsLdUHN4_0 boat HpjyvLHus3Y_1 skateboard HpkTeQdQ03Q_0 skateboard Hprw9lNWGGs_0 person HptcjVcfzgY_0 cow Hpwk73qvroU_1 elephant HpzTTAS6Qt8_0 person Hp0SQy5w9Q4_0 person Hp-eaTbVfLY_1 bear Hp-2Gb7Fwns_0 cow HqxhhM71S2g_0 horse Hq1KLztJBrE_0 person Hq6tGHLzg4Q_0 person Hq814Tfrblw_1 airplane HrHPBJOnFgg_1 train HrHPBJOnFgg_0 train HrHPBJOnFgg_4 train HrHPBJOnFgg_6 train HrdVu5J3rZQ_0 person Hr-keYNRBhA_0 train HsLZwGFHYUg_1 horse HsNcZZ6iwHQ_0 person HsOiHc1moVk_0 person HsOkCwZLv_w_0 bus HsOkCwZLv_w_3 bus HsOkCwZLv_w_1 bus HsOkCwZLv_w_2 bus HsR2xk4I1as_0 person HsVKw_8AQtM_0 person HsZgeesgCZQ_0 person HsjVUPs3XB4_0 boat HslbDMoiABY_0 car Hslld67XdsY_0 person HswufOfUGyk_0 truck HsyscFWIPZs_0 bus Hs0HRqYcYqA_0 car Hs6bVSOu98U_0 dog Hs_vQr20HdQ_0 skateboard Hs_vQr20HdQ_3 skateboard HtErHV_tZqs_0 elephant HtIbfC8DDos_0 truck HtNaGNO6nnc_0 person HtRiNzzfakk_0 person HtUPhgHKN9c_1 boat Hth8t7jhKPs_4 horse Hth8t7jhKPs_7 horse Hth-I5KYVsI_0 cat Ht054jKgWfE_0 person Ht9C8ABsxrg_0 person Ht_bczKGV-0_0 person HuNIgJEUelo_0 person HuNIgJEUelo_1 person HuOzcY9ybpo_2 dog HuVl7peYYF8_0 person HuVoecmBgpM_2 bird HuVoecmBgpM_1 bird HuZPTuSe7Zw_0 person Hue6Q5JKEKw_0 cow Hun4T6fv3cs_0 person HuqC6CX9uRA_1 person Huyd-7WlWWU_0 person Hu3xpcZqwRg_0 person Hu9DGxLcg2c_0 person Hu-VYy60p64_0 person HvHJi-EkL8c_0 skateboard HvIubGltpPY_0 dog HvLq5xDKM6E_2 bicycle HvP4rcOll6k_0 person HvQGnFuiwtg_0 cat HvTvaPx2hXw_1 train HvhkLhJ4YFQ_0 person HvuLPfhVT3s_0 person HvyIg5RMLbU_1 person HvyIg5RMLbU_0 person HvyzpBvy40o_0 person Hv5sH0eTE_M_0 dog HwSP55CmiCk_0 person HwS3weg4aQc_0 dog HwS3weg4aQc_2 dog HwY6kiQlICc_0 person HwdEYJ2bZkg_0 airplane HwdyzravQpY_0 cat HwfLycybCD0_0 motorcycle HwgmR0Qlm_I_0 person HwipRH29Hr0_0 bus Hwnqezsko-Q_0 person Hwnqezsko-Q_1 person HwxnH--ot8o_0 car Hw0JhQaRYcA_0 cow Hw2Bhz2SkUI_0 person Hw2Bhz2SkUI_1 person HxMniz8r1x4_0 person HxP056QWsGY_0 person HxP056QWsGY_1 person HxaFZyog34E_0 person HxaFZyog34E_1 person HxgU1Dh8wMs_1 person HxiBpvG82Ys_0 motorcycle Hxq1wNRv5Yg_0 person Hxv6y6I4mvE_0 horse Hx19D3w4xGI_0 giraffe Hx_Z9TOIV8U_0 motorcycle HyJgfYNotwk_0 truck HyUY7bqdm9Q_7 dog HyUY7bqdm9Q_0 dog HyVLne6RE-A_0 person HyXjUWAQ970_0 skateboard Hygs9OBUgg4_1 person HyuQCu-z558_0 motorcycle HywSTw3dtgs_0 person HywSTw3dtgs_1 person Hy4E2NZEc34_1 train HzAOQnmw_bo_1 elephant HzAOQnmw_bo_2 elephant HzCClfShiwM_0 person HzCClfShiwM_1 person HzDzb9xxc6o_0 person HzESeh3ZV4g_0 person HzHWWeZEU6E_1 skateboard HzJgpBBIk1o_0 cat HzLm3QfIx9w_0 person HzLm3QfIx9w_1 person HzXBY-SJECY_0 horse HzYY4-iAvrk_0 cow HzdSxrJ2oBw_0 skateboard HzkmlCJwvqo_0 horse Hzlcc_lAGVo_2 skateboard HzqIVSJNXAU_1 person HztbwJhPXyk_0 person Hz6I6jLi4NA_0 dog Hz8qayZDGpU_0 person H0Adt_c6kJo_2 elephant H0EEB1bPOjE_0 person H0VjOJvg49Q_0 bicycle H0Ym6NE2ny8_0 cat H0gWl9KRbHo_0 person H0k2WZec6aA_1 train H0k2WZec6aA_3 train H0k2WZec6aA_4 train H0k2WZec6aA_0 train H0u061QsnHw_0 cat H0yhw97jkkY_0 person H0z8VqDW-vg_1 airplane H01F2fhFpr0_0 elephant H097WsXpask_0 person H097WsXpask_1 person H1C2ZZeeVs0_0 cow H1Hd5Japfbc_3 train H1Hd5Japfbc_0 train H1Hd5Japfbc_1 train H1Hd5Japfbc_2 train H1JIvu1dbbk_0 person H1JIvu1dbbk_1 person H1MTfTrQrE0_1 person H1d68B_jDjI_0 person H1hg-0_AS9A_0 cow H1xBJoYM7rE_4 truck H1xBJoYM7rE_5 truck H117IshzypA_0 knife H144B0rpQh0_0 person H144B0rpQh0_1 person H1-_3CvKDzc_0 bird H2Q-46IlKEc_5 truck H2Q-46IlKEc_6 truck H2RoEMwxEAk_1 person H2TqEPsubdM_0 bear H2iTxNLOK1Q_2 motorcycle H2iTxNLOK1Q_0 motorcycle H2iTxNLOK1Q_3 motorcycle H2vkpfO2yqU_0 person H22P5Z4GfkE_0 person H29Xe5gG_-s_0 person H3A2DSw_xNU_1 elephant H3GcVWKTVd4_2 truck H3NrFrjQlfc_0 person H3exbzmmPQY_0 person H3jC0oToDjU_2 person H3jC0oToDjU_3 person H3jC0oToDjU_0 person H3o1VsopVFM_1 bicycle H3o1VsopVFM_2 bicycle H3pifBCagTI_0 person H30IPtBzf_s_5 skateboard H30ifg3HO_I_3 dog H33IRr1Z3-w_1 train H36UOsilz4M_0 person H4Hp-UJYZ_g_0 bicycle H4JiUp8EH3s_0 zebra H4VZD26aqe8_0 skateboard H4VZD26aqe8_1 skateboard H4bN1hcXw9Q_1 person H4dTHFeYa30_0 motorcycle H4eE_LAeWXQ_0 person H4eE_LAeWXQ_1 person H4gxLA7vTo4_0 person H4lBmXOi3Uc_0 dog H40G2dsVha4_1 train H41XJMKpfFM_0 bus H42hQSjU97o_0 knife H5NqMNaMEiM_0 bird H5YO56LD_dY_0 elephant H5YO56LD_dY_1 elephant H5iHzuWmtDw_1 dog H5sijKl_Xi4_0 cow H50EXfjT2O0_2 airplane H50EXfjT2O0_0 airplane H50EXfjT2O0_1 airplane H50-_mqAU14_1 cow H55Ru4hgats_2 elephant H55Ru4hgats_3 elephant H6OhYxXS1So_0 cat H6UwkC3sYic_0 cat H6ZHYEOcjCI_0 bicycle H6ZHYEOcjCI_1 bicycle H6Z8sZ34ZGw_0 motorcycle H6dXJIZnH-k_2 train H63oHdGMBAs_0 bird gFunUi36tVM_0 horse gFunUi36tVM_1 horse gFvhLM1k-IY_2 truck gFwCuQBtZiU_1 umbrella gF7IM-CiOdU_7 bicycle gF7IM-CiOdU_0 bicycle gGBEKYXUhbE_0 truck gGMxVO2zmP4_9 bird gGMxVO2zmP4_1 bird gGMxVO2zmP4_2 bird gGMxVO2zmP4_5 bird gGMxVO2zmP4_8 bird gGSCGkm00jM_1 bicycle gGYN2hnw1SQ_1 elephant gGdKtY4p1E0_0 airplane gGt9CVOzJOI_3 knife gGzaN_8PxZw_0 skateboard gG8tfb-eSuo_0 train gHC3HqRbW6g_0 elephant gHF9PM2MVuw_1 train gHvzU7dfBU8_0 giraffe gHyK46CyQtA_0 cow gH0LLPcn-H8_0 elephant gIBZr7Mh05k_0 bird gIMq_fnjtSM_0 cat gISy0wedyW4_0 boat gInHAdlbB60_1 skateboard gIsXFCo7Nt4_1 dog gIxuS1GwPPo_0 train gJV63DGM7Ew_1 car gJa0yNDBFio_3 person gJa0yNDBFio_0 person gJa0yNDBFio_2 cow gJfD9eHnos4_1 elephant gJn5fXk7dCs_0 airplane gJuZGVWuQQ8_2 bicycle gJ-k_oHkqYc_0 cat gKHR68FmKE8_3 airplane gKHR68FmKE8_0 airplane gKHR68FmKE8_4 airplane gKmF78OWCUc_0 motorcycle gKqUwiPYSh8_0 motorcycle gK7dud30V7k_0 giraffe gK_K33gm3SA_1 motorcycle gLQWgnWqQ1Y_0 bicycle gLRU7lXCgNw_1 dog gLRexWYaW_Q_0 skateboard gLbADp0AlZU_0 bird gLtnBhTBpkA_1 boat gL3uBv5NWJU_1 bus gL7JySv9H4I_0 bicycle gMAW4Am5_pc_0 cow gMBTewi9VZg_0 cow gMCCgBzug_U_0 knife gMFgEtqbTXs_0 boat gMJuszEOURk_0 cat gMMJH4UYboM_3 bus gMXt8X-xC_g_0 dog gMlNev_l4Yg_0 bus gMlhd1gczF4_0 airplane gMsGe7w79Hg_1 car gM9tFNvc1xw_0 cow gNDSQ2l9FYg_1 elephant gNMkDmfkZ1E_0 motorcycle gNcGXjn7g9o_0 skateboard gNwKVPIi010_1 skateboard gN2aKPpTpzQ_1 dog gN7-cLfUlt8_4 giraffe gN7-cLfUlt8_6 giraffe gOOB0RZmnUA_0 cow gORdlzUa3nQ_1 bird gO48FZrUm88_0 skateboard gO-8RNI2Puc_1 dog gPhcXlQLLRU_0 horse gPrWvEE7yjw_0 cat gPteWZyyJeo_0 cow gP3SQErTTOg_1 motorcycle gQBW4py4GhY_0 skateboard gQEGmIhhEQ4_0 train gQEGmIhhEQ4_1 train gQEGmIhhEQ4_2 train gQFqppfDRRk_0 umbrella gQLZ5H-n0Uk_4 knife gQVlREJXkik_0 knife gQWTTEHj5Hs_0 cat gQeqE3dgZoM_3 airplane gQe5gykuyi4_1 train gQpWY94Fx5E_0 motorcycle gQpuEhphXHk_0 car gQpxfwrF7Sc_0 bus gQ6AUvEXuaQ_0 bicycle gQ9HhxeKI4A_0 motorcycle gQ_SF2MtsUc_0 elephant gRFcteFGpLM_0 skateboard gRJGd_HzC-8_0 knife gRJpf6JwJeU_1 giraffe gRNKgw2D_mE_0 knife gRVrvJioWZ8_1 train gRoGrhv1ebI_0 elephant gRsOR1tKh8U_0 truck gR3ihf3rch0_0 car gSXDTJjj1jk_0 train gSi2fNTUsy8_0 horse gSlT3ALqvTM_0 skateboard gS0DTbVQ2x8_1 knife gS25yLrNO98_0 bear gS2-SAccVh0_0 skateboard gS7U-6Z8M2g_1 knife gS_9D3OWXAk_0 airplane gTqgARR0BBQ_1 boat gT27MQBhatA_0 skateboard gUDoTzwZlso_0 dog gUL0-NbHvuA_0 motorcycle gUMLascwbtU_0 train gUNCDmbzxq8_0 train gUbc_OUTnOs_0 airplane H7ONEeAkBFo_3 motorcycle H7ONEeAkBFo_2 motorcycle H7YUH_GBWdQ_0 train H8B-3STVp6E_0 cat H8LitQV6pNM_0 cat H8SccYIiPs8_0 zebra H8coORJpR80_1 skateboard H8k1E1i7AvQ_0 knife H9AQUC0N1zI_0 horse H9JfwPhdCjg_0 boat H9KjlXZYxJU_0 train H9KjlXZYxJU_8 train H9TUml4LflE_0 cow H9UTvMwaoRg_0 cow H9bbSssKl2o_14 umbrella H9eutGBn3zw_0 motorcycle H-C6EBylvh4_1 cat H-IoiGsEU5Y_0 train H-QKbNwtoH8_1 car H-gh485Om10_0 bus H-gh485Om10_1 bus H-kkRVEs3Bg_0 motorcycle H-uiufHSb3s_0 knife H-uvqjsUCLc_0 dog H-uvqjsUCLc_1 dog H-5Ynjv0dQI_1 train H-62b99sK_s_0 train H-62b99sK_s_1 train H_Ei1gRODpw_0 dog H_KMZLSAxMw_0 train H_iI201Iqws_1 truck H_iYHl4pFuQ_0 horse H_mRfG30Gzo_0 skateboard H_1O-OBZ3BA_0 horse H_6vxd3ckIY_0 cat IADSsAb2KSo_1 umbrella IADSsAb2KSo_2 umbrella IAFApeJ5FvM_1 motorcycle IAOiNYVeqzE_0 bird IAaINtcnO7A_0 bicycle IAcbsZcN_pM_1 motorcycle IAkSntQ2Aso_0 horse IAlz_evs7fU_3 car IApV0rfD9oQ_0 dog IAsXYmK1baI_0 motorcycle IAwKojHnvtU_0 train IBD9tJNb9_o_0 train IBFp5y96q78_0 motorcycle IBFp5y96q78_2 motorcycle IBKLgBXZFzw_0 motorcycle IBYJQU6-nGg_2 cow IBYg-hMbb04_0 knife IBm1C4qJtTg_5 umbrella IBm1C4qJtTg_8 umbrella ICQbVnaJL_0_0 bus ICZ4tinBQZg_1 knife ICZ4tinBQZg_2 knife ICZ4tinBQZg_3 knife ICg3W1-Prhk_0 elephant ICnAWjPDzRw_0 cow ICtLhp-qveM_0 boat IDCBO7W7xpo_0 cow IDNvFEra8mc_5 horse IDNvFEra8mc_1 horse IDNvFEra8mc_2 horse IDNvFEra8mc_3 horse IDNvFEra8mc_4 horse IDO6jw3u3_w_1 airplane IDcxChwEqDs_2 horse IDeGA2EV3WY_0 airplane IDeimFOIbVc_0 train IDmwsXLZKUs_0 cow ID1faW2L3rM_0 cat IEOg-ZulFR0_1 bird IEPYJyHfP2E_1 elephant IEYC-aYAQ40_0 boat IE5qZDd7tWw_0 elephant IFGohfPURX4_0 person IFfS7hatV0s_0 truck IFkUMGE7bbc_1 elephant IFkUMGE7bbc_0 elephant IFrHlldbUdQ_0 cow IFvO1O-6vqk_0 truck IHQvg9gYLjw_0 dog IHSCfRs-J38_2 skateboard IHY0eeHfBcY_4 truck IHjI35oW0T4_0 car IHxX0fKU9iM_1 skateboard IH3E7RS6Hn8_0 cat IH9BmEg26Cw_0 person IIBN7FGNNEs_1 train IIBN7FGNNEs_2 train IIBN7FGNNEs_3 train IIBN7FGNNEs_4 train IINTapIzzes_2 skateboard IIw0KKAeBeQ_0 skateboard II0JbbQq-Sg_1 bird II61z65eDCY_2 cow II61z65eDCY_0 cow II94vSsb4Uc_0 car II_okDlDaO0_0 cat gUt0vA8_1Ow_0 airplane gUvZ3RC9tEU_0 knife gU3SNUS1_ng_0 bicycle gU4mBoB-b7k_1 train gVAp7rt84ic_2 bicycle gVCrRXledlU_1 boat gVCrRXledlU_0 boat gVV-5JdLuXk_3 car gVXzT_h1SFI_3 horse gVXzT_h1SFI_4 horse gVXzT_h1SFI_2 horse gVaB7hwBhTA_0 cat gVjL5txcFMI_0 knife gVrTFXdPWJ8_0 elephant gVxqk8tLXL8_0 truck gV27xS9pqNQ_0 train gV3Xmwy3RKo_6 train gV3Xmwy3RKo_13 train gV9A5NfFexQ_0 car gWcacGgcxYU_4 bear gWlmYVY4kW4_1 bicycle gWnhQi-zfEE_0 skateboard gWpNWuo7vio_2 elephant gWpNWuo7vio_3 elephant gWsOR7UiwDs_0 airplane gWz5ZMzC58s_0 car gXBIzdmmHbA_1 bird gXEHUZgPCGg_4 bear gXFmghAzaVg_1 motorcycle gXGvO4k4xQY_0 truck gXHsyuynhso_2 knife gXW33K91X7c_0 bicycle gXn0Y5X5MJE_1 zebra gXn0Y5X5MJE_0 zebra gXt0u16Y6ZY_0 boat gY_Ey8Ps_ZE_0 cow gZhsGXSn5bU_0 motorcycle gZqGyIMgMbs_0 bicycle gZxcxQBlx0s_0 cat gZzmloffFW4_0 bus gZ8kZt451Ww_3 horse gZ92ZDty9wI_0 skateboard gaCEAVQd1-M_1 bird gaS7x3F3gpk_0 bicycle gaS7x3F3gpk_1 bicycle gaS7x3F3gpk_2 bicycle gaS7x3F3gpk_3 bicycle galykATgRC0_0 cow gaqS-4IaQ5c_2 bus gbA3ItatxL8_0 skateboard gbE0vzWpHj0_1 knife gbE0vzWpHj0_4 knife gbGl_-TnPjk_0 bird gbI95ZXEUz0_0 knife gbTTJah5oMw_0 elephant gbTTJah5oMw_2 elephant gbgbqiiEKVs_0 giraffe gcBaPcA_1_0_0 train gcExbr9FO94_0 giraffe gcJ7XqXHPwM_0 elephant gcT_dy3neEk_8 bicycle gcXhYL06Acs_5 bicycle gcYBNx0fUg8_0 truck gchz9HDvVDk_0 train gc80cGOHyKM_0 knife gdCpPYwBVlY_0 knife gdEBkAYaDPw_1 elephant gdELg0NrkdA_0 dog gdvUXfsBMIk_0 train gdzzJI7xjBg_0 train gdzzJI7xjBg_1 train gd2O-Z5dOIk_0 airplane gd4r5aA8jeg_0 bird gd4r5aA8jeg_1 bird geBwGOC-lX4_0 train geBwGOC-lX4_1 train geBwGOC-lX4_2 train geBwGOC-lX4_3 train geQCe6Cq5MU_1 elephant geQCe6Cq5MU_2 elephant geWChvEotKU_0 train gefGPLN-abw_0 person gfGsOzQ7gto_0 bear gfS7FJH6Vkk_0 bear gfUC20NWtjU_0 motorcycle gfVlQhN0BBU_0 bicycle gfuVNdXffSs_0 airplane gf1mvdt9kbI_0 horse ggIyqAThI1g_0 bird ggPHtWoCcKs_3 umbrella ggTFLaNIJck_0 train ggVLptkmsys_0 truck ggpz03j1REI_0 bus gg3sG7O2P-g_0 bus ghEfyxUaVGs_1 cat ghIGC_DOfuk_0 horse ghqqgJWnVEU_0 knife ghyp-SKVuC8_0 motorcycle giVGzMF1Yo4_0 skateboard giVGzMF1Yo4_1 skateboard gipHWMPB-W4_3 bear gipHWMPB-W4_1 bear gitOEvGnoYk_0 airplane gi9bnW7uLkE_0 cat gjGlUXCT9A4_1 knife gjK5A6cIEnw_0 dog gjRhqzTAkWw_0 cow IJFaomtLVDE_0 cat IJNUwvacbKY_0 cow IJVUMGoBSQs_4 cow IJXVtb2GeJ4_0 train IJdYiBYP31A_0 motorcycle IJlBmhH72m4_1 cow IJ6g4ZRBksE_0 cat IKLj0LJIMKs_4 airplane IKLj0LJIMKs_5 airplane IKLj0LJIMKs_2 airplane IKftyV_zwkE_0 skateboard IKqmWAu3GF0_0 dog IK7Mnvty4VY_0 person IK8IJWsxg3M_5 airplane IK8IJWsxg3M_6 airplane ILAGhYr9yts_1 motorcycle ILLYlwlFTzA_0 elephant ILmTjHZqkCo_1 truck ILqxie6aqXg_0 bicycle ILqxie6aqXg_1 bicycle ILqxie6aqXg_2 bicycle IL1HokSKOyY_0 cat IL9r35lU8So_0 skateboard IMD3U_DzO3E_0 motorcycle IMD3U_DzO3E_1 motorcycle IMde-053G78_0 horse IMulJdQXZvM_0 train IM7vwh5qua4_0 cow IM8dlwNTjXU_0 cow IM8v82x7ovA_2 train IM8v82x7ovA_1 train INFs2lfikXE_1 knife INULdzdrdys_0 horse INXkuJ9WvIU_0 train INZhGblywrk_0 bus INkhg9y4asY_0 bear INtj4nfjRA0_1 bear IN2TGHJrQEg_2 skateboard IN2TGHJrQEg_0 skateboard IN2TGHJrQEg_1 skateboard IOPYEZzmeqg_0 car IOPYEZzmeqg_1 car IOQuWawPM3k_0 bird IOfUvlEkN7g_0 bus IOiqrNof90k_1 knife IO3Z-ebx_f8_5 bus IPI2_GXx1tI_0 bird IPWixEFBDOY_0 horse IPfYf-nFKic_0 airplane IPfYf-nFKic_1 airplane IP1CH8MMir0_0 knife IQOfCy4FW8w_0 skateboard IQXAYnslAnc_0 car IQoVuUTZILY_0 airplane IQsV_hTCyMA_1 bicycle IQwk7Ge6Apk_0 truck IRK6-ixyaVI_0 elephant IRSbjN-mnJI_0 skateboard IRZBnQJoKiU_0 skateboard IRztQZ4bigY_0 car IR9A3u83crI_4 elephant IR-PGdIPgcE_0 skateboard ISAnMprDgCk_0 skateboard ISJW4GuahWg_2 dog ISSTEs8xDWk_0 umbrella ISYwpUKxHJU_1 elephant ISYwpUKxHJU_2 elephant ISYwpUKxHJU_0 elephant ISud5E9hZxU_0 train ISud5E9hZxU_1 train IS9s3kJzTcA_0 airplane ITCcMWC_RW8_0 umbrella ITbwhPVxFv0_0 umbrella ITrisbHlaJw_1 truck ITzBy7T7_fI_1 umbrella IT6TArZww6A_0 cat IT8VqGbdH_A_0 horse IT_zQ44PPOo_0 dog IUH4PYmObvU_0 dog IUO1sDZgGHs_0 bird IUdyfRMOyX8_0 elephant IUdyfRMOyX8_8 elephant IUdyfRMOyX8_1 elephant IUdyfRMOyX8_2 elephant IUdyfRMOyX8_3 elephant IUdyfRMOyX8_4 elephant IUdyfRMOyX8_5 elephant IUdyfRMOyX8_6 elephant IUdyfRMOyX8_7 elephant IUf7a2WuoBw_0 train IUgkMOA3siY_1 bus IUlDlS2KD-k_0 bicycle IUlDlS2KD-k_1 bicycle IUzpvnXep7M_0 bear IU7x7I53cng_0 elephant IVFq204Rr9c_0 airplane IVHx3I13xdQ_0 boat IVSJSu0PlsI_0 train IVVFeaTw6IE_0 bicycle IVjCZS2Fo7k_0 bird IVpmCnL5cE8_1 giraffe IVrBPzhFMi8_1 motorcycle IVrBPzhFMi8_2 motorcycle IVzxeeJEtiY_1 bear IV6EMw4XYco_0 skateboard IV6EMw4XYco_1 skateboard IWCZ1PDW99k_0 motorcycle IWVIIKxipc8_0 motorcycle IWVIIKxipc8_1 motorcycle IWn16DCfLbc_1 knife IWumeAEXWVo_1 boat IWu47p4l06Y_5 umbrella IWu47p4l06Y_6 umbrella IWu47p4l06Y_3 umbrella IWu47p4l06Y_4 umbrella IW1cFMDjPUk_0 bear IW2mFJ8iw6Y_0 bird IW4ZnmQeNtA_1 elephant IW4g0kfA3GE_0 truck IW5Vgh3SE-I_4 elephant IW7TwQ-hY7I_0 motorcycle IW7TwQ-hY7I_1 motorcycle IXTgztKfRQU_0 skateboard IXVCCLG3_cw_0 bird IXyV2vpIEA8_0 dog IXyV2vpIEA8_2 dog gja4H3sGrqQ_0 car gjdlZhmnGbk_0 airplane gjfdI7hO92E_0 bird gjquLAxFRWw_2 umbrella gjx4xu1TyWU_1 cow gj7W2zjQApw_3 knife gkEoTLpAw7g_0 airplane gkLRnt1OCH4_7 horse gkRqNmGQbPI_0 skateboard gkXKCuc0Moc_0 skateboard gkXKCuc0Moc_1 skateboard gkb4Ya5QW9M_0 bird gkb4Ya5QW9M_1 bird gkf0Bcsuhlc_1 car gkf0Bcsuhlc_3 car gkf0Bcsuhlc_4 car gkiUpdrObXo_1 elephant gkz49y5qcvc_0 horse gkz-LCZcGtc_5 bird gk1x_qYyDl4_0 cat glNWqIolkq8_0 skateboard glOskJOtnTU_0 knife glOskJOtnTU_2 knife glSdaND81E8_0 person gltHxIp_ma8_0 bird gmCT9tUPTB4_1 giraffe gmdxOMQMgnw_0 airplane gmnvPoB2cNY_0 motorcycle gm53_sbr85Q_1 bird gm53_sbr85Q_2 bird gm9M-m4mCZ4_0 car gm9M-m4mCZ4_2 car gnA9QVNkmTU_1 knife gnD6mU9A2oo_0 elephant gnEttGTQqQ4_1 train gnEttGTQqQ4_0 train gnF9YJM1jaE_1 cow gnGvXHS4UDs_0 airplane gnM9SRiFh7M_0 truck gnM9SRiFh7M_1 truck gnPrHGB85WY_0 bus gnTj3krZROI_4 boat gnVo44q-XDI_0 knife gnb1N_MLdcY_2 elephant gnwCzU63_YY_0 person gn2XuCFK-hE_0 truck gn2bME2rmGw_0 truck goIfg0C9kmM_0 dog goOIZE0j6DM_0 bicycle goSyNORcJ00_0 airplane gok9kHQ77dY_0 skateboard gollBTymf8I_1 bus gomnpeJd5zw_0 boat gonzAOezSOQ_1 train gonzAOezSOQ_2 train gosq350N9dI_2 skateboard goyIWrU1Lbo_0 cat gpBoXY6MM5E_0 dog gpEiPRMcPwo_4 bear gpY-o8xPA3w_0 bicycle gpa4WfWCLa0_1 elephant gpa4WfWCLa0_0 elephant gpa9p4XNeKc_3 bear gpbdiDEPd-s_0 skateboard gpjqG97-SyQ_0 horse gpmdLMUX53k_0 bear gp2SDJHMADo_3 horse gp2SDJHMADo_0 horse gp2SDJHMADo_2 horse gp9q0jvTKo0_0 bird gqNgT7LxZSQ_1 bus gqOfm9XTr6M_3 airplane gqOfm9XTr6M_0 airplane gqOfm9XTr6M_1 airplane gqOfm9XTr6M_2 airplane gqbDkeOx0mA_0 motorcycle gqgQpw4DWZA_0 giraffe gqhweewmNn8_0 skateboard gqkLzCkKKtE_0 skateboard gqucExXpPys_0 car gqxvRzuWcrI_0 bird grBVFo1wSjs_1 bird grFPTYaKb7Q_0 bus grI0uf6IwBw_0 bear grNkPqf-ySE_0 dog grWw42izM6M_1 train grWw42izM6M_2 train grWw42izM6M_0 train grbP7mKMX_A_5 airplane grbP7mKMX_A_1 airplane grbP7mKMX_A_4 airplane grdEE264TwM_0 motorcycle grdIYaNewv0_0 motorcycle grhIgcHgpOw_0 bus gsCvhqZCWX0_0 dog gsUrGSN-k00_0 horse gsbJ13WiSvE_1 horse gsfIYIQ1siA_0 skateboard gsvn88OsH_8_3 knife gsv7RJk7dtY_0 dog gs_C12A8Wq4_1 bicycle gtFIMtVrAGk_0 bicycle gtNJSexRjxE_0 car gtNdVTTd0tg_0 bicycle gtNdVTTd0tg_2 bicycle gtOa6rSatLA_0 cow gtQ_uFTKEck_1 horse gtii5vwjSTY_1 dog gtuj1cOmYSs_1 train gtuj1cOmYSs_3 train gtz5ClHTSVo_0 cat gtz5ClHTSVo_1 cat gt_WHCkauOA_1 knife guVl_gp0sJE_0 bus gugP5f2JRJ0_1 bear gugP5f2JRJ0_0 bear guh1OUkdIGE_0 horse guktzkv1els_0 boat guv5reh2NH4_0 boat guxRXiegac0_4 bird gvNxDnFriAI_0 skateboard gvcioONBIcE_0 train gviQTbs7dIk_1 bird gvjcggbLXRo_0 elephant gvjcggbLXRo_1 elephant gvjcggbLXRo_2 elephant gvk0hzlYu9E_0 umbrella gvraCN0RYko_0 dog gvtY3fwbgdc_0 cow gvuBfR3HXac_0 elephant gv4sQFTuJ-k_0 elephant gv7qY66lOhs_0 giraffe gv8pF9t1zYM_0 elephant gwKq56_M6Kc_0 horse gwN_p_IRuoo_0 horse gwP-6gOPn2c_0 motorcycle gwTc-69C_P4_0 knife gwTyjJwBgRk_0 horse gwy7eePYryM_1 boat gw9MjutMhLs_1 airplane gw9MjutMhLs_3 airplane gw9MjutMhLs_0 airplane gw9MjutMhLs_2 airplane gxKnyBP8_cs_0 elephant gxejG9D0guY_1 person gxgZg6BU3ds_0 dog gxgZg6BU3ds_1 dog IX4HjI_9vLY_2 dog IX4IwgbTdCk_0 dog IYBF45M9nTc_0 skateboard IYBzvotFEYo_0 motorcycle IYZZ-K_Ygpo_0 bicycle IYdXz1cOCWc_0 giraffe IYukRQKxhFI_0 person IYukRQKxhFI_1 motorcycle IZESZPVT0zk_3 bear IZGady38Nh8_0 bird IZIPpBl_h0Q_5 truck IZIPpBl_h0Q_0 truck IZIPpBl_h0Q_6 truck IZJ1PO3Fkuw_0 umbrella IZLMXYU4A-0_0 airplane IZLMXYU4A-0_2 airplane IZTfd31H0AI_0 bicycle IZUO1x0QT1I_1 elephant IZ2nFUgP-Pw_1 elephant IZ2nFUgP-Pw_5 elephant IZ2nFUgP-Pw_6 elephant IZ2nFUgP-Pw_3 elephant IZ2nFUgP-Pw_4 elephant IaAPZOFgclo_1 elephant IaG7siKVlak_0 giraffe IaxZJVx5ptw_0 truck IaxZJVx5ptw_1 truck IaxZJVx5ptw_2 truck IaxZJVx5ptw_3 truck Ia0DjYXcBWc_8 elephant Ia0DjYXcBWc_4 elephant Ia0DjYXcBWc_5 elephant Ia0DjYXcBWc_6 elephant Ia0DjYXcBWc_9 elephant Ia0DjYXcBWc_10 elephant Ia0DjYXcBWc_11 elephant Ia0DjYXcBWc_12 elephant Ia0DjYXcBWc_13 elephant Ia0DjYXcBWc_14 elephant Ia0DjYXcBWc_16 elephant Ia0DjYXcBWc_18 elephant Ia0DjYXcBWc_19 elephant IbEpwiOUFEI_0 dog Ib15GlTvqTQ_2 skateboard Ib2u6u-j2vk_0 skateboard IcEs4vbIcDM_0 umbrella IcSumCpVOy0_0 skateboard IcZ2D-MawSg_0 truck IciJuq7ZY6o_0 elephant IckUkdfRndY_1 knife Ic1cufihs-0_0 elephant Ic1cufihs-0_1 elephant IdSlvHXTrmE_1 skateboard IdXPNOQD97w_0 motorcycle IdabN3kTjSk_0 skateboard IdrTVVio1U4_0 dog IdvQme2elLk_1 truck IdvQme2elLk_2 truck IdvQme2elLk_3 truck Id6HsaEvZ0k_0 person IeB4Nf3h7T4_0 bus IeENvG3Qtk0_5 elephant IeFUkGY1b4Y_4 elephant IeXb8CHr4ms_0 train IefPtlA5ebA_0 motorcycle IehTemq8EYc_27 bicycle IehTemq8EYc_28 bicycle IehTemq8EYc_0 bicycle IehTemq8EYc_6 bicycle IehTemq8EYc_11 bicycle IehTemq8EYc_15 bicycle IehTemq8EYc_17 bicycle IehTemq8EYc_19 bicycle Iejh8w6egIA_0 umbrella Iek9nAfsymA_0 bus IewJcdqOzCY_0 train IewJcdqOzCY_1 train Ie4Ct_HRDNw_1 dog Ie5lfGQndBs_0 airplane Ie8dc7EO7VI_0 bicycle Ie8dc7EO7VI_1 bicycle Ie8dc7EO7VI_2 bicycle IfBft2ltqqE_0 skateboard IfBft2ltqqE_1 skateboard IfFnkz6EUno_1 horse IfGZXa16ZnQ_0 knife IfTrYE-Ox50_0 cat IfZDLHBP_qk_0 bus Ifpbe7xlKp4_0 truck If4WPZY4LIY_0 elephant If8EotoXQVQ_1 truck IgO9_kN8D5I_0 cat IgRs6nmhv2w_0 cat Ige9Idj8fDw_3 cow Ige9Idj8fDw_2 cow Ig0Luv6UlkE_1 bicycle Ig1JdzucmLI_0 boat Ig9jZPM0n2A_0 car IhR6ePM1wRw_0 bird IhXAXy3VAqA_0 cat IhdGvFfk3Ks_0 bird IhlRPxknT9E_1 motorcycle Ihp3YZGcRjM_0 horse Ihp3YZGcRjM_1 horse Ihsr3gT-u00_0 cow IiBzrow5m9w_0 cat IiH0f7VOXTY_4 airplane IiH0f7VOXTY_2 airplane IiH0f7VOXTY_3 airplane Iie6uM_sdLE_2 truck Iie6uM_sdLE_5 truck IiscR53FEz0_1 airplane Iiy_W2tIOWI_0 boat Ii9URMIXJjc_0 dog IjHqTBt-tzY_0 horse IjMLYR0bH6g_0 cow Ijf2ZMTxDUs_0 cow Ij57BoIbMws_0 train IkOG3ZnCvY4_0 cow IkVifrtYlcI_0 skateboard Iklc7ijgOtA_0 horse Ikl-nlqwUJA_2 train IkqFTjEXf4g_0 motorcycle IkrKcORoFLI_0 cat Ik4UxtlIrw0_5 airplane Ik4UxtlIrw0_13 airplane IlJT6oek8KQ_0 dog IlNV-gFlp3Q_0 umbrella IlshSY2CGU0_0 cow Il1TKTSRPO4_0 train Il7GtfxmBlQ_0 skateboard ImCV4d0kYxY_0 skateboard ImEKl15Aipo_2 bear ImOLHl6gwLE_0 giraffe ImO7oG_YuSU_0 train gyBWGyhFuWg_0 elephant gyBWGyhFuWg_1 elephant gyBWGyhFuWg_2 elephant gyBWGyhFuWg_4 elephant gyBWGyhFuWg_7 elephant gybSfaDRdVA_2 airplane gy3zF39Y7B8_0 airplane gzQrsNwx8MQ_1 truck gzTHA0tMocM_0 bird gzUj7KfvRPY_0 train gzVdw-5l3sY_5 bear gzWwT4ufwFY_0 bicycle gzwzd6nOPoI_3 bear gz13nfjIblU_0 skateboard g0Jq0uIY3i0_2 knife g0LufqNJtss_1 elephant g0LufqNJtss_2 elephant g0SdZmm5Mm0_0 horse g0W6U-p-T2c_0 horse g0om0nrfC4w_5 airplane g0om0nrfC4w_1 airplane g0tXovGqqSE_0 cow g0zcJWO1MbU_1 airplane g02OQmAgfo4_0 train g02OQmAgfo4_1 train g04xUjb4z0w_0 bicycle g05TJKB5TL0_0 elephant g05TJKB5TL0_1 elephant g05TJKB5TL0_2 elephant g1HtoWJ3NjA_0 airplane g1UUBEfyzJ4_0 horse g1UUBEfyzJ4_1 horse g1ZtaoEqtjI_0 bus g1j_9A4-PL4_0 cow g1n74kWqKFM_0 truck g1vq3JO3eH0_0 skateboard g12DCVqfKjM_0 airplane g13JzTyNCPY_0 truck g17hrSF1YN8_1 bear g2Hh_97o7jY_0 person g2KeNy_WECo_0 bus g2MUK80Ht8k_0 horse g2MUK80Ht8k_1 horse g2MUK80Ht8k_2 horse g2MUK80Ht8k_3 horse g2MUK80Ht8k_4 horse g2cCr0rRIeo_0 motorcycle g2vRpfpQuNE_1 motorcycle g2-SNBvYdNc_3 car g2-SNBvYdNc_2 car g3DAFznLlXw_0 elephant g3e6vDSvpN4_0 skateboard g3g7M2Xv3JY_0 zebra g3ytRwjgoMI_2 horse g3ytRwjgoMI_3 horse g30xOR9j3_A_0 skateboard g30xOR9j3_A_1 skateboard g38MDXW9ndc_0 elephant g4BX8_C-NeQ_1 dog g4KzjuhixSo_0 motorcycle g4KzjuhixSo_1 motorcycle g4R5jZXlnl4_0 truck g5OKbEXlegI_0 cow g5SIvfoi7tE_2 bird g5S-76eh6vs_0 car g5ztjA03q5k_0 horse g55_MKVNAE8_0 motorcycle g57hZ17etp8_0 skateboard g5-55T7AzUE_1 skateboard g7Qk-cV3IFs_1 car g7YvJasRFj0_0 skateboard g7fZhFRdYJs_3 zebra g7oMLF6ZfT8_0 horse g7oMLF6ZfT8_1 horse g8aScpqmhVU_0 umbrella g8iDSRkz_go_1 boat g80ZYUNhRME_1 dog g9Am-b3OqbI_0 truck g9RM9VSJPIY_0 knife g9WrMIn5AkI_0 skateboard g9sD-4RBa3Y_0 motorcycle g9uOEJm7wdw_0 elephant g9yESRreg5k_0 boat g9zLmd4IZ78_0 bus g91mK1sMiSI_1 elephant g9-6tclIBcc_0 motorcycle g-Dfzs3HQ8w_0 boat g-EVS_QxLxA_0 horse g-F4Eig_Rxc_0 motorcycle g-F4Eig_Rxc_2 motorcycle g-F4Eig_Rxc_1 motorcycle g-JTM0dCFFA_0 cow g-SJXmYYHqI_0 truck g-SlOveVnAs_0 cow g-Z7CA3qr1A_0 skateboard g_B2r70EsjY_2 horse g_XW0YLzND0_0 motorcycle g_XW0YLzND0_1 motorcycle g_XW0YLzND0_2 motorcycle g_dN59QhubM_0 person g_jq8Uy4P2s_0 truck hAHsYyTOJoI_0 cow hAIBcR5MAVE_0 boat hAUD4Cy2GiM_0 cow hAUD4Cy2GiM_6 cow hAUD4Cy2GiM_1 cow hAUD4Cy2GiM_3 cow hAUD4Cy2GiM_4 cow hAUD4Cy2GiM_5 cow hAVbFSsRfOY_0 airplane hAcx9u12Rd0_1 cat ImZcOQCdJng_0 skateboard ImiKNikVSsM_0 horse ImqKWexMOEA_0 bird Imuhe4E1pxo_0 car Imy4SpqoC4k_0 cat Im3ooIguQHk_4 train Im3ooIguQHk_5 train Im3ooIguQHk_6 train Im3ooIguQHk_0 train InEZSi4Zz08_4 train InEZSi4Zz08_1 train InEZSi4Zz08_2 train InTq6s23Ygc_0 cat Inn1lo0hbX0_0 train Invv-JPzV-0_0 elephant In_dBFPRoso_1 airplane IobdPoAtEB0_0 bear IoqRCAQzibw_1 skateboard IoqRrAswOwY_5 bear IoqRrAswOwY_4 bear Io5wOOkpkdE_0 skateboard IpOFHasyloc_0 cat IpQQ9QabgiU_0 bear IpVCKTRou10_1 truck IpVCKTRou10_3 truck IpVCKTRou10_4 truck IpYZmcVrqdQ_0 cow IpnTUQCHioc_0 giraffe Ip0c_3xCHRA_2 horse Ip-N_PYIqhA_0 motorcycle Iqv9963BN8w_0 cat IrAxUS0aBTQ_0 bird IrDY9nE1V2I_1 motorcycle Ir4CkmTmSXQ_0 cow IsSCjgdAQiE_0 dog IslqPZDUBHI_0 motorcycle IssSzh7Z-vo_0 umbrella IsxRqs7KcbQ_0 cat Is2E8gFNBWo_4 bear Is2E8gFNBWo_1 bear ItL-C-szpU8_0 truck ItiAXqRQm3A_1 knife ItkxwET4PNc_0 dog ItzhXkBVmEY_0 car ItzlBA8cl3c_2 airplane It_dJluX63g_0 cat IuN_risviek_0 giraffe IuZ6JD-k2nM_0 dog Iuk4W5KJbQ8_0 bus IuwJz5d-8J4_0 umbrella Iuw0f-Y8t6I_0 airplane Iuw0f-Y8t6I_1 airplane Iu26NyEUoGY_2 boat Iu5GqI9oVnk_0 motorcycle IvJLhgaveaw_0 skateboard IvMiQ2e-5hQ_0 bus IvMiQ2e-5hQ_1 bus IvX1MeQN-e0_0 cat IvZSk33MtAc_0 motorcycle IvZqPTK9DEQ_0 motorcycle IvZqPTK9DEQ_4 motorcycle IvZqPTK9DEQ_3 motorcycle IvfWyYn_ifg_0 elephant IvjNeTpV6hs_0 horse IvyftS2bPuo_10 airplane IvyftS2bPuo_0 airplane IvyftS2bPuo_2 airplane IvyftS2bPuo_6 airplane IvyftS2bPuo_7 airplane IvyftS2bPuo_9 airplane IwcC1J_ImAs_0 car Iwd7i4kvS5c_0 car IwfpNUPSvpw_0 motorcycle IwgX5DfmIQo_0 bicycle Iwhf27USDD4_0 motorcycle IwmwVP_e5Ag_0 bus IwxmbUX4fcg_0 cow Iw6-0LYvEmQ_0 bird Iw6-0LYvEmQ_1 bird Iw7zBsW9W5Y_0 train IxLbLqfxrhg_1 boat IxNA0hdkWGg_0 cow IxObyCZ6OfY_4 giraffe Ix8eS24W75g_4 airplane Ix8eS24W75g_5 airplane Ix8eS24W75g_0 airplane Ix8eS24W75g_1 airplane Ix8eS24W75g_2 airplane Ix8eS24W75g_3 airplane IyNmzxdv8-Q_0 bird IyQlh0wdd9I_8 boat IyQlh0wdd9I_7 boat IyU3NizvZuM_0 horse IyU3NizvZuM_5 horse Iyj9D6cwI5o_0 bicycle Iyk9-k1RP-M_0 car Iyk9-k1RP-M_1 car Iyk9-k1RP-M_2 car Iys_rL0bPcc_4 boat Iy4SrujSLuQ_1 elephant Iy4SrujSLuQ_5 elephant Iy4SrujSLuQ_6 elephant Iy4SrujSLuQ_3 elephant IzC8vjFriRE_0 horse IzPS29ghTxo_0 knife IzQjjBqimYw_5 elephant IzQjjBqimYw_10 elephant IzQjjBqimYw_11 elephant IzQjjBqimYw_0 elephant IzQjjBqimYw_1 elephant IzQjjBqimYw_2 elephant IzQjjBqimYw_3 elephant IzQjjBqimYw_4 elephant IzQjjBqimYw_6 elephant IzQjjBqimYw_7 elephant IzQjjBqimYw_9 elephant Iz4_9EtiVXc_0 motorcycle Iz8cco4VLow_0 cow Iz8gKIZcfqo_0 bear Iz8uzZuBiXs_0 bird I0eY-kKi2FM_0 umbrella I0iEaW1Qg_o_1 bear I0oVkr613Rw_0 skateboard I0voLEPKkG8_0 horse I0voLEPKkG8_1 horse I0voLEPKkG8_2 horse I0voLEPKkG8_3 horse I0voLEPKkG8_4 horse hAplCSSZqAs_0 airplane hAplCSSZqAs_1 airplane hAteY2rkmVg_8 bus hAteY2rkmVg_1 bus hAuFEp75jVo_0 train hAzefhyFMN4_0 truck hAzsdnh5Iq8_0 elephant hA_YzyjSVZM_0 bicycle hBDc0K6CvHg_0 bus hBDvdp2RCCw_1 airplane hBDvdp2RCCw_2 airplane hBDvdp2RCCw_3 airplane hBDvdp2RCCw_5 airplane hBDvdp2RCCw_7 airplane hBKuHV_S8lM_0 skateboard hBOhA_sljfE_0 umbrella hBcYx5Uc-vw_0 car hBcZZeXsCaw_0 bicycle hBgxILtRUIc_0 cat hB23PCerELA_0 skateboard hB-M9w3C_Tw_0 boat hCB731pKdcg_1 train hChqLLLAmF4_1 bird hChqLLLAmF4_0 bird hCkn4pJxSkk_0 bear hCrrYhe3x9Q_0 train hCsCAXkiQ4Y_2 train hCynNRrrTKI_0 cow hC5Wac-AzgM_1 elephant hC5Wac-AzgM_2 elephant hC5Wac-AzgM_3 elephant hC5augWtBcQ_1 bicycle hDAv3aPvZjc_1 truck hDLAKS4hCfc_0 car hDM4sCvlRoA_1 airplane hDVx_yYysaA_0 cow hDXpSU7bq44_0 bicycle hDYV-Vz3xwA_1 dog hEAQZIsaIew_1 train hERCXzHI2nA_1 elephant hERyFpl4aDk_0 dog hEWJZ4dCcIY_2 cow hEZt4InN7Eo_0 elephant hEZt4InN7Eo_1 elephant hEZt4InN7Eo_2 elephant hEdpC8HEa-A_0 motorcycle hE04tUrJzXo_0 truck hE7N0N5vik0_0 bird hE7N0N5vik0_1 bird hE-VIrAVcBA_2 bus hFFrC0_rJYA_0 airplane hFTTcrUxPeg_0 cow hFTTcrUxPeg_3 cow hFdi9yxVkys_0 motorcycle hFdi9yxVkys_1 motorcycle hFixbos35O4_0 truck hFnKIVp-Dcc_0 cow hFnKIVp-Dcc_1 cow hFzR4bgxihU_0 bicycle hGH72iljdzU_0 bear hGRdOlSIQRU_1 train hGRdOlSIQRU_2 train hGRdOlSIQRU_0 train hGiCVP3Z8l0_0 umbrella hG6vW_xUZgA_0 train hG6vW_xUZgA_1 train hG959XPTh_8_0 bear hG-quo0MZM8_0 elephant hG-quo0MZM8_1 elephant hHEIEEdrXYE_0 cow hHIyy4Vda6M_0 cat hHjzciM78AA_0 cow hHtOM5_wiWM_0 truck hHtqPiAg32Q_0 umbrella hH_akvS98jo_0 skateboard hIH6LuoXbpE_0 cat hIXTbG6ho4E_0 person hIXTbG6ho4E_1 person hIXTbG6ho4E_2 person hIz3ONvP-Bo_0 zebra hI3P4BxIr-o_0 bear hI3eGFKYRuc_1 horse hJP8qg-kSZA_0 cow hJTl4NJ0qIs_0 person hJhBQsD0_hw_0 bus hJkgoq_T4Pk_0 train hJmxsYAKHdc_0 umbrella hJtloiw4D-M_0 car hJ_uvoDrzkI_0 giraffe hKJQH8VbGk4_0 airplane hKJQH8VbGk4_1 airplane hKYJZqP-44M_0 airplane hKYJZqP-44M_1 airplane hKgtNPTirdc_2 elephant hKgtNPTirdc_3 elephant hKlKPyuUYps_0 bus hKtHZYDaoXA_1 train hKtHZYDaoXA_2 train hKtHZYDaoXA_3 train hKtHZYDaoXA_0 train hK6w0B1cu-I_0 cow hK7VoN3cI74_0 cat hLGnjjoilbo_0 skateboard hLHaPstpghQ_0 motorcycle hLKzDOp8XLc_1 zebra hLNcuJAwfDo_0 cow hLVZsqfElxI_0 dog hLX1LeVKgi8_0 cat hLjDO37EQ60_2 dog hLjDO37EQ60_1 dog hLscdjfkeho_0 cow hLte0Y4VWR0_0 knife hL_QAgWBkJ4_0 cow hL_noZA6D8E_0 truck hMGVdq71lME_1 horse hMLkMrqUtA0_0 horse hMRIDt-1dY4_0 train hMgp2oyTB80_0 cow hMjke9g_Ysw_0 horse hMuO0MHPIOQ_0 elephant hMuO0MHPIOQ_1 elephant hMusKbJqZDY_0 skateboard hNHGh8N1XGg_0 knife hN_-56Oxma0_0 dog hOJJ65CVNuM_0 bird hOOwQSSrFVc_1 cow hOid-qo2Ozw_0 cow hOky3qIMxRY_0 skateboard hOpJoO7UciM_1 bicycle hOrAXl-jATo_0 airplane hOxMkI1d3oc_1 airplane hOxMkI1d3oc_3 airplane hOxMkI1d3oc_4 airplane hOxMkI1d3oc_6 airplane hOxMkI1d3oc_7 airplane hOxMkI1d3oc_9 airplane hPEsz5u87CI_0 bus hPIDFIwLI8c_0 car hPWhKQfDoXg_0 airplane hPWhKQfDoXg_1 airplane hPW2NpCU668_2 elephant hPW2NpCU668_0 elephant hPW2NpCU668_3 elephant hPW2NpCU668_5 elephant hPW2NpCU668_6 elephant hPW2NpCU668_7 elephant hPW2NpCU668_8 elephant hPa5hUze91s_0 elephant hPa5hUze91s_1 elephant hPb_Rq2yKRA_0 cow hPo5Wd-otbY_0 dog I0yz1LGLl08_0 elephant I1Ejpa2UWSk_1 bird I1Pdo-p11tI_0 motorcycle I1Quuhyu2UI_1 motorcycle I1YfOiyQW_8_0 truck I1wfW86V8So_0 dog I1wfW86V8So_2 dog I14JWDgkllE_0 truck I14JWDgkllE_1 truck I19kQsgjFRA_0 bicycle I2IfiPw2aKE_0 elephant I2OQlELjXvU_1 truck I2OQlELjXvU_2 truck I2OQlELjXvU_3 truck I2hmFe1pYes_0 horse I2o_4OyrJlI_0 horse I3DSZk-7nG8_0 train I3JCCqGY3c8_0 truck I3KJj6GQ5QE_0 cat I3OWw4AK0MI_0 dog I330kG5lk5A_0 knife I3-xBh-IrIo_0 airplane I3_lU2I_AaU_0 bicycle I4BMptNse7c_1 train I4BMptNse7c_0 train I4CMNv-VRDo_0 bird I4Gi7kq5XAs_0 horse I4HuQ8DDxoM_0 skateboard I4WNAfBvm5E_1 skateboard I4z-3IGHMW4_0 dog I5KNdt1NT8g_0 skateboard I5QNP3-QHLw_0 cow I5SA8N1JKwM_0 cat I5WNgPfoaZQ_2 motorcycle I5pU9zWz4Fg_0 motorcycle I6fJWB7DpAM_0 bus I6oT6dLeq7A_0 motorcycle I6wEvIOC-Pk_0 train I7GbkWE2A0M_0 bus I7aUrrDieE4_0 cow I7bKlZxD6Fs_0 bicycle I7xOURJQUps_0 train I7xOURJQUps_1 train I7x_od8h4iw_0 cow I7-iLB-NVGg_0 dog I8FoWQrnHGY_0 bird I8Ms0rXjfXU_0 skateboard I8Ms0rXjfXU_1 skateboard I8Ms0rXjfXU_2 skateboard I8Qx-qd0eLg_0 boat I8Qx-qd0eLg_1 boat I8UlumMtAG8_0 horse I8Vr0DzHV9U_0 cow I8rww3UUjYI_0 person I9AGRokco_M_0 train I9FPkgdc-5E_1 cow I9XcFcBW-HM_0 motorcycle I9oAq_x5pqg_0 bus I9yrFs_JpWc_1 skateboard I94qZUJmKP8_1 bicycle I94qZUJmKP8_2 bicycle I-SRTsDkhLM_0 cow I-TshjRdh74_1 knife I-blRAakQjM_0 boat I-h3cTJlsRc_0 dog I-nb60BTO_g_0 train I-raj-aLy8s_8 horse I-ywD5MDZZ4_3 cow I-ywD5MDZZ4_4 cow I_LhSNsRHMs_0 elephant I_kI39ZHymk_0 horse JAEzOCIew2Q_0 airplane JAEzOCIew2Q_1 airplane JAb3p7VYLzI_0 bear JAb3p7VYLzI_1 bear JAcHxxzG1vA_0 motorcycle JAf3nC1hYS4_0 dog JAp2_UJfFao_0 person JAqAH7n-3lA_0 bus JAzD-VzDxfc_2 bicycle JAzD-VzDxfc_4 bicycle JAzD-VzDxfc_5 bicycle JAzD-VzDxfc_8 bicycle JAzD-VzDxfc_11 bicycle JAzD-VzDxfc_13 bicycle JAzD-VzDxfc_17 bicycle JAzD-VzDxfc_18 bicycle JAzD-VzDxfc_19 bicycle JA2PLZmRABc_1 umbrella JBGewEMeWIs_1 dog JBGewEMeWIs_5 dog JBKG_tl08RU_0 cow JBMhOrDLcho_0 cat JBYr3VbJLoM_0 person JBkymGnh5mA_1 bicycle JBkymGnh5mA_2 bicycle JBkymGnh5mA_3 bicycle JBkymGnh5mA_4 bicycle JBlCFCV4sdw_0 horse JBlCFCV4sdw_1 horse JBxFgwl0To8_0 cow JB0SELYSRXA_1 bear JB-hzl-gILo_2 truck JCIJbwBevro_2 bird JCSRBZQpYCw_1 bear JCSRBZQpYCw_5 bear JCTYAwT6ppk_0 motorcycle JCTYAwT6ppk_1 motorcycle JCTYAwT6ppk_2 motorcycle JCciDn0O6X0_0 airplane JChsfz-p2KI_0 cat JCuE5X37xIE_3 boat JCuE5X37xIE_4 boat JDJWapHD_kM_0 boat hP8Jfo1RaSk_0 elephant hP8Jfo1RaSk_1 elephant hP8Jfo1RaSk_2 elephant hQWcyTkfPeU_1 dog hQZDg__nxQA_4 bear hQZ5lNlAXBI_0 truck hQe3_1EvqIY_0 cow hQfYabI9_ec_0 bird hQkbXGwGwyg_0 skateboard hQve0ugvy6s_0 motorcycle hRAbtgVJiWI_0 bear hRJ0Qk_qdAY_0 airplane hRS45wmOq9c_0 elephant hSR-ZVA-vMU_0 dog hSWyYOzvh0g_0 dog hSf3uEm8r9M_0 bus hShwtMLieCc_0 boat hSiozs1nz7o_1 motorcycle hSzgOCvRfq4_0 bear hS-h8AUEibc_0 cow hS-h8AUEibc_1 cow hS-h8AUEibc_2 cow hTHBMsKC5ZI_0 cat hTZr7OF0VuY_5 dog hUJMSp4rMrc_0 train hU0EbblT2vQ_2 airplane hU388mZGPGg_0 cat hU9B31AVZNg_0 bus hVJjOdU5-yQ_0 car hVNKN_qFEUA_0 bicycle hVOImOLBY1g_0 skateboard hVdb-Q3aJ9E_0 dog hVhNOzZA40E_0 cat hVnD8rlLRgM_0 bird hVq6NOrBwlM_1 motorcycle hVsAAQqAHyI_1 skateboard hWHUct-PLfY_1 motorcycle hWHUct-PLfY_0 motorcycle hWNyVxx4a94_0 cat hWn0ddeHF0I_2 zebra hXAQH1xVKB8_0 cow hXWQ710-JZQ_0 motorcycle hXagj4A6N-s_1 elephant hXbMo03RQWk_0 train hXflTk4WVAA_1 bear hXf7dimd2bo_2 cat hXhtGcCMf5Q_0 airplane hXsCNMb3eTc_0 bicycle hYD7HKMKa3k_0 elephant hYFW5XhMxyg_1 knife hYIPy3eyC9k_0 cat hYQBaiC8d6Y_0 horse hYTIV5X87S4_0 horse hYgzs0gDiiU_0 elephant hYkPL7spYMo_1 elephant hYlmhAuVVh8_0 bird hYtFyx0799o_0 boat hY0vkwEtjLM_1 bear hZAOhuPJTho_0 horse hZAXlQqCmCI_2 train hZCGOP3PHOM_2 knife hZHjTTvcQ88_2 bicycle hZOhuOcxTP8_0 skateboard hZPYHGzIYh0_0 cow hZeekc0i_b8_0 motorcycle hZiXqP-WaQk_3 bird hZiXqP-WaQk_0 bird hZiXqP-WaQk_1 bird hZiXqP-WaQk_2 bird hZygBhv-nDg_0 motorcycle haC0TZbvBEU_0 cat haMtzn-TnOQ_0 boat haTl-PeSssc_0 dog hakWXvIYvzo_1 dog hanKUxPHFbA_1 car hanKUxPHFbA_0 car haxabA27SnU_0 horse ha3C2hPzaiw_0 dog ha8hX-68TqI_0 bear ha8hX-68TqI_2 bear hbKjt5OBryI_0 truck hbKjt5OBryI_2 truck hbKjt5OBryI_1 truck hbfiyMHycSs_3 knife hbvJ3t9lpUo_0 truck hcXtsyICD30_1 skateboard hcpMT5qGQ0U_0 bus hdZkNo0t6wg_0 boat hdbKePdCemQ_0 cow hdqiOcfXejc_1 zebra hdwZF4C-vYs_0 cow hd_yXL53Z9E_0 elephant heQRV9di86s_0 train heTgOW6o1ho_0 zebra hedgcDGNngs_0 bicycle heucaATRtbI_0 cat he_j-GZdCNs_0 person hfCbKe627p0_0 airplane hfEl_mnX9X4_0 skateboard hfGEkaEADUw_0 motorcycle hfGEkaEADUw_1 motorcycle hfGEkaEADUw_2 motorcycle hfcKFLBuJ_g_1 dog JDcAM9ieTp8_0 bicycle JDe9ulv2Nmo_0 elephant JD_njBej6V0_0 truck JD_njBej6V0_2 truck JEU2rZzAxRU_0 skateboard JEbIHUJTFsM_0 airplane JEdl8GROiQM_0 truck JExlAUEYZwc_0 cat JE8SV6FOlC0_0 truck JFH3n9kI6aA_0 boat JFO_Qz1y8-s_4 elephant JFQ_GztsLs0_0 cow JFQ_GztsLs0_3 cow JFZG_ebR2mk_0 elephant JFZpmduYfv4_0 motorcycle JFfYNQ2FmHU_0 cow JFk4Qyn58CY_0 train JFvQ7wc6c0o_0 airplane JGDf9kSc-v4_13 dog JGDf9kSc-v4_15 dog JGDf9kSc-v4_17 dog JGDf9kSc-v4_19 dog JGDf9kSc-v4_1 dog JGDf9kSc-v4_2 dog JGDf9kSc-v4_6 dog JGGj1z6Kujc_0 dog JGGj1z6Kujc_1 dog JGMfEFj5PVM_1 truck JGWBjvjqVhw_4 skateboard JGanm9yGTJk_0 truck JGanm9yGTJk_1 truck JGanm9yGTJk_2 truck JGmHpQtJzic_0 horse JGn6Ifa5bWI_0 bird JG0B4rV4KEI_0 dog JG6H3R9rErg_8 airplane JG6H3R9rErg_0 airplane JG6H3R9rErg_1 airplane JG6H3R9rErg_2 airplane JG6H3R9rErg_3 airplane JG6H3R9rErg_4 airplane JG6H3R9rErg_5 airplane JG6H3R9rErg_7 airplane JG6sceNvlnI_3 boat JG6sceNvlnI_2 boat JG872iaucFc_0 umbrella JHBhDpq4HNs_0 cat JHBtawKoltc_0 car JHTt9PSzrhU_0 elephant JHb8IVsjgMs_1 bus JHdc9jvf4qA_0 motorcycle JHmG34eTWow_0 train JHr57YE7IRs_1 airplane JHy85i0So5U_1 dog JH0Jzb0wOXw_3 elephant JH0Jzb0wOXw_4 elephant JH0Jzb0wOXw_2 elephant JISA50Bfj4U_0 boat JIamGji7w9U_3 bird JIiA0pG-MKk_0 skateboard JI6MyG7aTvM_0 bird JJSp2fu3lk8_4 dog JJSp2fu3lk8_3 dog JJq7YAYUado_0 umbrella JJx7GdAuDQY_0 skateboard JJyJR7TlQ7o_0 motorcycle JJ0Ja1ju2ec_1 horse JJ0NBly53IU_0 cat JJ8Vv2hiCCA_0 cow JJ8Vv2hiCCA_1 cow JKBJuICyV50_1 train JKBJuICyV50_0 train JKCFS8k_Qis_3 bus JKGV5hbm5g8_0 skateboard JKJQPHspLBs_0 bird JKJQPHspLBs_1 bird JKNRKGSvtsQ_0 elephant JKYPluJPL7c_0 dog JKa7rPKrAwY_0 train JKgPYc0K_hI_4 car JKgPYc0K_hI_1 car JKuhG9WLM2k_0 airplane JK42K36SYLs_0 bird JLE_jNuNoA0_0 cow JLHP-3UxtMU_0 boat JLb2dnuNhqs_0 bus JLoS7DZH_ik_2 airplane JLoS7DZH_ik_1 airplane JLsEcZUU7FM_2 truck JL64rU6Jvmw_1 giraffe JL71b_9Cy9I_1 umbrella JMDFSes_w0E_0 cow JMMmrEdfRbk_0 boat JMPKtdq9b0Y_0 train JMR4IvE2sDo_0 bus JMaahZTxRLk_6 boat JMgbgNPBIJI_0 bird JMnp6FLLbtw_0 horse JMnp6FLLbtw_4 horse JM1jSU4FEPw_2 airplane JM4yr2pj-zg_0 airplane JNDZBgXZBU8_0 knife JNDZBgXZBU8_3 knife JNDdt_ZPl1s_3 elephant JNNbk6jVfB4_0 cat JNZDx8Ro_mM_0 truck JNe7ZednqQc_2 horse JNkz_3Qtdfc_0 horse JNnnm9ixKrM_3 car JNnnm9ixKrM_4 car JNnnm9ixKrM_5 car JNpuJeqVFxk_0 motorcycle JONF8-3gEoY_0 giraffe JONF8-3gEoY_1 giraffe JONF8-3gEoY_2 giraffe JObYghNlZas_6 train JObYghNlZas_7 train JOmeD6G33Dc_1 horse JOoNVY1C6qI_0 train JOoNVY1C6qI_2 train JOqHfu-WVu8_2 horse JOuB1UkVvKI_0 airplane JOue8LphKc4_0 truck JOztmtwKz-k_0 cow JPAjGBsi-rE_0 bird JPMFXg-BXDE_2 car JPTFJk9f2nM_0 dog JPevMGnX92M_0 airplane JPiSmPAIpOI_0 knife JPlZOEew4wg_0 elephant JPuDmwlAXzI_0 skateboard JPwUpTvlZDA_0 person JPwUpTvlZDA_3 horse JPw4R6t-0j4_1 bird JQDX7gVR0qM_2 knife JQDX7gVR0qM_0 knife JQKDDMvCtt8_1 horse JQNsIqNLn40_0 truck JQRxu6RVGMg_0 car hfwhbInEJAk_3 train hfwhbInEJAk_2 train hgFfz_RTcx4_0 truck hgFfz_RTcx4_1 truck hgxvhMjH_68_0 motorcycle hg6Z6JIwRMU_0 elephant hg6Z6JIwRMU_1 elephant hhM2TSF2GhA_1 horse hhNlkY3SS6w_1 bus hhYOJb0v5Yw_0 cat hhlt4dfZmFE_0 horse hhyzKC353Jo_1 car hiJ-OdPj_8c_0 bird hiPDdAi1Qs8_0 motorcycle hiUH1zOfsfo_0 cat hiZLv2E5zI8_0 elephant hjBLAHakI9c_0 boat hjRlztwK-vg_2 bicycle hjhbMbrRUWI_0 truck hj2P25O-nIk_0 skateboard hkR10EU8YPI_0 train hk0cDE4A_b0_0 boat hk7M3PGcOhw_0 train hk-IVoljyKE_0 elephant hk-IVoljyKE_1 elephant hlFPCpe8Akk_0 airplane hlLrYrrOcY4_0 dog hlNOQO4BIHg_0 train hlnNVsSGjxA_3 car hlnNVsSGjxA_1 car hl4yLAJiWjQ_0 elephant hl7z1gnPPW0_0 knife hl_YHwW5mrM_1 bird hl_YHwW5mrM_0 bird hmThCl2HK8E_0 skateboard hmThCl2HK8E_1 skateboard hmdH0Olcbx4_0 bicycle hm98pilx9dE_5 horse hm98pilx9dE_1 horse hm98pilx9dE_2 horse hm98pilx9dE_3 horse hm98pilx9dE_4 horse hnJ2wDmXD6w_1 bicycle hnbZY12P-7g_1 elephant hne72NMSPuc_0 bird hnffUBbBFoQ_1 horse hnrSBT9miTE_1 bird hnvbE27mWwI_2 train hnvbE27mWwI_0 train hn19XaR_wIs_0 knife hn7ollCkAy4_5 bicycle hn-1W1O8kZs_0 boat hoLnPrkJ6sE_0 horse hoLnPrkJ6sE_3 horse hoNPAcq_5Ac_1 bird hoNPAcq_5Ac_0 bird hoYDTU50MTk_0 cow hoe88GhFhq0_0 truck homQXuwbe04_0 cow homx5sSuNr4_2 bear hoozxxjd57c_1 bus hotrXXenVAk_0 cat ho5YZstr1XE_1 cow ho7yo7nJk3o_1 elephant hpG2eG_hduA_0 motorcycle hpRxBuFhZ4M_0 train hpRxBuFhZ4M_1 train hpRxBuFhZ4M_2 train hpRxBuFhZ4M_4 train hpkXlhfYZfw_2 motorcycle hpkXlhfYZfw_1 motorcycle hpmC3OjLnZM_2 boat hpmC3OjLnZM_0 boat hpo-lwBTbFw_1 dog hp3aTxzS9ms_0 skateboard hqGhmP1u07Y_0 elephant hqoQm68UbGo_3 airplane hqoQm68UbGo_2 airplane hqsoIR9v8IY_0 motorcycle hq7f1_o4eFg_0 airplane hrLkVz3_xGw_2 bus hrW-pkK9osE_2 bicycle hrW-pkK9osE_3 bicycle hrgh69NXZqw_0 cow hrj6I8n8nAc_0 bicycle hrj6I8n8nAc_1 bicycle hrrpTPwLZHA_0 bird hrtiCeqnqLg_0 cow hrziTee4b2c_0 airplane hr5Q08OMeAU_0 train hr7wUBMikww_0 zebra hr7wUBMikww_1 zebra hsMptx7tOLo_0 elephant hsMptx7tOLo_1 elephant hsMptx7tOLo_2 elephant hsM1eKbrqLs_0 cat hsPK4wlNtI8_0 cow hsYL355Fzio_0 truck hsfS5oT1y5M_2 boat hskEM8GUmDE_2 train hsmxUKxzapo_2 skateboard hsmxUKxzapo_0 skateboard hsyCfsJx7DI_2 skateboard hsyCfsJx7DI_1 skateboard hs2foQ_Xo8A_0 skateboard hs-OEgnsLZs_0 train htDilkoPA-M_0 airplane htSBZwTBX98_0 horse hteze9Fz1dc_0 knife htkybhLm0uk_0 umbrella htwBHgatd9c_2 horse htwBHgatd9c_3 horse htwBHgatd9c_0 horse huCxpuVT4GI_0 dog huDCqh-KRy4_8 bicycle huDCqh-KRy4_2 bicycle huDCqh-KRy4_3 bicycle huDCqh-KRy4_4 bicycle JQnf7j7HpKY_0 cow JQpJv-SOMS0_0 dog JQ9LtiJVsd8_0 cat JQ_dyIlBnGM_0 cow JQ_6xcOuEfU_4 cow JQ_6xcOuEfU_1 cow JRA3LCwRGu0_0 knife JRBLFsevgg0_0 train JRJjI6mFa6s_1 skateboard JRJnSf2qOXA_0 airplane JRT0FH2KEsc_0 cow JRcTFvzRC10_0 bird JRcTFvzRC10_1 bird JRsNcoTJJjE_0 cat JRsn1likB7c_0 boat JRyc_lxMJzs_0 skateboard JR6JAx7xdGg_0 cat JSA0JWvQbJg_2 train JSdEdTcUHHI_0 knife JSfXE4ExZ1U_0 bird JSfXE4ExZ1U_2 bird JSs6Sa8zR6c_0 horse JS2cbpFwahY_0 skateboard JTE0ABGzb30_1 skateboard JTE0ABGzb30_2 skateboard JTJgZcBM93k_1 knife JTa9HkbXfSw_0 cow JThBohLxRSc_0 cow JTi4Oy6v9mM_1 horse JTi4Oy6v9mM_2 horse JTtjfwrK4Ls_0 dog JT5zUQio3B0_0 bus JUHMTmjUswE_0 knife JUVHXeFTe3Q_0 horse JUVHXeFTe3Q_3 horse JUbPqBVbGQQ_1 truck JUpxTW6_BAI_0 cow JUtd4FLjXio_0 horse JU1N1nqXjII_0 train JVKkxo7adX8_1 knife JVQ6Gx2hGxs_0 airplane JVTIzApj2UA_0 giraffe JVVtcOIACz0_0 giraffe JVg62b0T408_2 train JVg62b0T408_0 train JV2A3zWMRj8_0 umbrella JV3Tbp30yp4_2 motorcycle JV3Tbp30yp4_1 motorcycle JV3Tbp30yp4_3 motorcycle JV-OfjEsQDs_0 umbrella JWKZlCk_cts_0 train JWXSXvHgoo4_0 car JXEyPb4Nzro_0 skateboard JXP_CNg8grg_0 cat JXi5KrVPz0M_1 bird JXj_lj5QUp8_0 person JXmBBTT0YXQ_0 cat JXobiO1_7Ts_0 train JXwfPpl53Fs_0 dog JYYAwimr2XQ_0 truck JYi7bWDL5os_0 person JYsWtLH_mjM_0 bus JYsWtLH_mjM_1 bus JYsWtLH_mjM_2 bus JYvBo5FwjSg_0 elephant JYvBo5FwjSg_2 elephant JY2d1dohCDs_0 elephant JY3rSX-blgA_0 cow JZBJ35lKlXw_0 truck JZOZuTiifHM_2 boat JZXr-dGLkpU_0 boat JZcy1T--d4M_0 skateboard JZ_ri3awsso_0 cat JaI9UR2n7ZE_0 horse JaLswoS3xO8_0 knife Jaumrq8clZY_0 truck Ja9rAQpB2_M_0 cat Ja_ofQ1ynAc_1 airplane Ja_ofQ1ynAc_2 airplane Ja_ofQ1ynAc_4 airplane JbA11YWHpW0_1 skateboard JbBxvvoOvpg_0 bear JbK17NE3dvk_1 train JbK17NE3dvk_0 train JbK17NE3dvk_2 train JbK17NE3dvk_3 train JbPP4AwiNEc_0 cat JbSkoHG6Vq4_0 airplane Jbfzd9wIyi4_0 cat Jbw0KUJqWpE_0 train Jb03yqEB5WI_1 bus Jb03yqEB5WI_4 bus Jb5lFDvKqXA_0 bus Jb6FIuynIuw_0 bicycle Jb-q7z_Mygg_0 truck JcJKjdDKuc4_0 train JcRvhoBwgNg_0 cow JcU-cdQmKV8_3 bus JcU-cdQmKV8_1 bus JcixSQRUnY4_1 elephant JcmTLrQZ7sE_1 cow JcmTLrQZ7sE_0 cow Jcwl0kCsUTw_0 umbrella Jc5PS0Ejejw_1 elephant Jc8eE1ayaX8_0 cow Jc9PdqC1rpg_0 train JdUehtxAfys_1 bicycle JdUehtxAfys_7 bicycle JdwSAFvKg74_0 car JeAykU3MiKg_2 airplane JeET8zb_gPQ_4 knife JeNu9WVQOHY_4 bicycle JeNu9WVQOHY_1 bicycle JeNu9WVQOHY_7 bicycle JeYCd0VP5EY_0 horse Jeb4SSyyZD8_0 dog Je_fuH6-34I_0 skateboard hujF3CEgAXI_0 skateboard hulFEZUNu10_0 train hutTW7ORN8g_0 bicycle hutTW7ORN8g_1 bicycle huy9NXPynro_0 cat hu6nRmzUcAw_0 train hvWHb1kiV5g_0 dog hvWs1FhyQlw_0 umbrella hvhWoRQZMUU_0 cat hvjNVTle8bQ_6 airplane hvjNVTle8bQ_0 airplane hvjNVTle8bQ_1 airplane hvjNVTle8bQ_2 airplane hvjNVTle8bQ_3 airplane hvjNVTle8bQ_4 airplane hvjNVTle8bQ_5 airplane hvkIo-dZUUY_1 bird hvlXyPikLUY_0 bus hv49V2RzgHw_0 horse hv7b1I-cRvI_0 truck hwOL2G-Lo54_0 umbrella hwPkgOB1mEU_0 cow hwTVAkfjjCY_0 cat hwikEC2Jc0c_1 horse hxC7dFDqfXo_0 car hxUn2A7Ko2g_0 cow hyMlfx_ZEeI_0 train hyMlfx_ZEeI_1 train hyX6rKHZcLs_0 person hyb_qBoKG9Y_0 train hyjjdUcyanE_1 dog hyj8BJ_PMgQ_2 elephant hyrBL1wMHts_1 truck hy9Ml-3zAtM_2 knife hy9jrpamopE_0 umbrella hzBqPVIC7IQ_0 train hzUTA7mGyKE_0 bicycle hzeHyMcUmO4_0 motorcycle hzeHyMcUmO4_1 motorcycle hzz9JBRYjFs_0 bicycle hzz9JBRYjFs_1 bicycle hz5anqtArdI_0 train hz5anqtArdI_1 train hz7PXI6R6DI_0 train h0IiMbTwz1Q_0 truck h0IiMbTwz1Q_1 truck h0hIpf9O0Vg_0 bus h1MxYGy1SBc_0 dog h1XtVmXF7CQ_1 elephant h19z0Ap_5Pc_0 bus h2R46pcCEVg_0 cow h2SNrfK0yQQ_2 bus h2X0to3hDA4_0 bicycle h2b9t_pnnNA_0 cow h22FyeO_lyE_0 umbrella h23R8X1WKjU_1 horse h24uuiI34yI_0 skateboard h27DK_oMwYY_0 dog h3FnAKBB9Xc_1 elephant h3Lz61ficjc_2 motorcycle h3aEao1bRIY_0 cat h3aZGHTjBwc_0 elephant h3o5ZykGOxI_4 elephant h3o5ZykGOxI_2 elephant h3o5ZykGOxI_3 elephant h3qOwaRYAi8_1 bear h3uPELFKoCc_3 knife h3uR99WtOh4_4 bear h3_cWsxi4Qw_1 skateboard h4CySJb83XI_2 elephant h4KXG16xA_Y_0 dog h4LE2YVwHL0_0 motorcycle h4jU8ZrDZd8_0 skateboard h4kmvN6NmyA_3 train h4kmvN6NmyA_2 train h4wsDcj7kcE_0 cow h45-zE2gKFA_2 person h45-zE2gKFA_3 elephant h47dExP6oXQ_0 elephant h5C2RKknWfg_3 bicycle h5C2RKknWfg_5 bicycle h5C2RKknWfg_6 bicycle h5KSLdybLIE_5 bicycle h5KSLdybLIE_1 bicycle h5KSLdybLIE_3 bicycle h5dsU3N4joc_0 cow h5hkvWWp7Qg_0 knife h55Exp2rpSM_0 knife h6FtP-5VnYM_2 cow h6FtP-5VnYM_1 cow h6McnZDPX3I_12 elephant h6McnZDPX3I_1 elephant h6McnZDPX3I_2 elephant h6McnZDPX3I_6 elephant h6McnZDPX3I_7 elephant h6McnZDPX3I_9 elephant h6McnZDPX3I_10 elephant h6Mvzt5e_eE_0 horse h6jGPQLkE48_0 person h6ztcoDHYaY_0 cat h62bO9Mfl9Y_0 cat h64dmoPNWw0_0 car h7OZUnDKWbA_0 truck h7cXxMNxlcY_0 horse h7uwd7opKjI_0 motorcycle h7uwd7opKjI_1 motorcycle h8BDqFH8e_w_0 train h8BDqFH8e_w_1 train h8BDqFH8e_w_2 train h8EHrA_OM7c_0 person h8LiHNo4904_4 airplane h8LiHNo4904_5 airplane h8LiHNo4904_6 airplane Jfb3XGdt6VE_0 cat JfdoYsRxF5k_2 knife JfnHVMyUT0E_4 bicycle JfqHeWyD5DQ_0 skateboard JgLXpgcnjAA_0 cow JgQbvDmM2Nk_0 bird JggJWWHhlc4_0 umbrella Jg8FXSKMvTQ_1 elephant JhDNC6XRVG8_0 cow JhDNC6XRVG8_1 cow JhFvJHfP_NY_0 car JhPLC0PS9I0_0 knife Jh87zKRgN68_2 boat JiMyZFGmGgM_0 dog Jifa2spqYV8_0 airplane JijtEhm-Dk8_0 bus JikSLpJ2xKw_0 cow JinIHVE4_MI_1 bear JioS9DumyIM_1 car Jixd9HKGzWA_0 train Ji6bpPIPScI_0 umbrella JjIvWQ-198c_0 knife Jja500M50Yw_0 cow Jja500M50Yw_1 cow Jj4KvC3TXro_0 car Jj4KvC3TXro_1 car JkC1Udoysk8_1 cat JkC4nV8LcTE_1 bicycle JkH8ZtuvzDQ_0 dog JkpQkpiRpVI_0 bird JkzNUiOu1GI_0 bus Jk28bpr063o_4 airplane Jk28bpr063o_0 airplane JlJQlaoy3ec_0 cat JlrPaJIAP9k_1 horse JluvPpeI2DY_0 train JluvPpeI2DY_1 train JlzsUphxgIY_0 truck Jl1bEdoRG9I_0 cow Jl6gTtZcQH0_3 horse Jl6gTtZcQH0_0 horse Jl6gTtZcQH0_2 horse Jmblo1iMURo_0 motorcycle JmdMhGsyZvk_0 boat JmvNubLPYGo_0 bird JmxixgKAKzc_0 truck Jm0S-kE2yVc_0 truck Jm3dtu8GTos_0 dog JnAaSoaN3FI_4 boat JnHUNCeHEDc_0 bird JnMkFSGB6Vw_0 truck JnXmNI53DWE_0 person JnrrNu9udj0_0 bear JnvIx5y-ijs_1 umbrella Jnysuevt_4A_0 train Jn1gvGhxU5U_0 bear JocAgPv-ZJo_0 skateboard JohmecnKktI_0 boat JopGEGMo-DQ_0 dog Jo50LBwjHIk_0 bicycle Jo50LBwjHIk_2 bicycle JpDOBaNBwkc_0 truck JpFiApmpoHA_0 cow JpL4Mv-uFi4_1 dog JpRMc6MtCH8_0 truck JpWh1yQThRo_0 train JpZwF6hOCDg_1 truck JpjAxQ_vsZw_7 bicycle JpjAxQ_vsZw_1 bicycle JpsOsewgXAg_1 bird JpuCWzsE35k_1 bird Jp0GKZ9vA0c_0 airplane Jp1tvS1y4eI_0 boat JqCaTxH5Ovk_0 motorcycle JqC81ViWFeE_0 bear JqPkaGRIz6c_2 elephant JqT_Bx4fd1Q_0 cow Jqauh1bsJy4_0 bear Jq2ml2xQkHg_0 cat Jq8D628IlV8_1 skateboard Jq8D628IlV8_2 skateboard Jq8OMvgG6wc_0 cow JrAvVMnkKEo_3 bear JrKxxhHGR7E_0 giraffe JrZTstVj2wg_0 horse JrbrXXDuxnc_0 horse JrmyPAW-ItI_0 dog JsNQXxg1PvE_0 person JsPtP21j3f8_3 bear JsPtP21j3f8_1 bear JscnB4QfAhY_0 train JsiSPt3nv1Y_0 cow JsiSPt3nv1Y_2 cow Js2ZDfWZWtc_0 cat Js69iFgcic0_2 bus JtMMD0aJnPI_0 train JtMMD0aJnPI_1 train JtQzeWNt8IA_0 umbrella JtQzeWNt8IA_2 umbrella Jtfp49L4LHg_0 train Jt1zVsUQGhI_2 elephant Jt1zVsUQGhI_3 elephant Jt8ikZGW768_0 bicycle JuGusvu6Z7o_0 skateboard JuKJKHykMKM_0 horse JuKgukJ63eM_4 skateboard JuME8_jaVdE_2 car JuME8_jaVdE_3 car JuMNRsOc0nU_1 cat JuMNRsOc0nU_0 cat JuNubQtCvrU_0 bird JuNubQtCvrU_1 bird JuO7qvp2GBs_0 knife JuXqLoCgK4o_0 bear h8OcTR0Z4yo_1 airplane h8OcTR0Z4yo_2 airplane h8OiIYhIPTs_2 train h8PJps4Sj1E_0 airplane h8PmDAKiKVc_0 dog h8oTFl4XWTc_0 bus h8ysn_L9udY_0 train h8ysn_L9udY_1 train h9FtsOFR3p8_0 cat h9veoEpzRH8_0 cow h9w20ChZ_7Y_0 bicycle h9w20ChZ_7Y_1 bicycle h96rR-VkJZA_1 bear h96rR-VkJZA_2 bear h966cxQyjvc_1 airplane h-PS5v6ZTBY_0 truck h-VSmS49g5M_0 skateboard h-npKkPbHSA_0 boat h-qRpUteJV4_0 bird h-vGllteZnI_0 train h-1NdCqoxdU_1 bird h-2DBPzbKUM_0 cow h-27oWBBirE_0 dog h-9WCj8sB6o_7 airplane h-9WCj8sB6o_8 airplane h-9WCj8sB6o_10 airplane h-9WCj8sB6o_11 airplane h-9WCj8sB6o_12 airplane h-9WCj8sB6o_0 airplane h-9WCj8sB6o_1 airplane h-9WCj8sB6o_3 airplane h-9WCj8sB6o_5 airplane h_DH9wUjJZA_0 cow h_Ey7gQJCSc_0 cow h_KKvY3cK4o_0 cow h_KKvY3cK4o_1 cow h_XHdrNdD98_0 bus h_tQ-ZVYe1M_0 bird h_6GMOpsIOk_0 cat iACKPRGNEOU_0 bus iADpOEGdwQI_3 bird iALubFRPBXQ_1 knife iAL5KD5BwGQ_0 horse iAuV09oxF_c_0 bus iAzvkn-2C9s_4 horse iA_tYzSGuVg_0 dog iBDVD9if3VA_1 bear iBDVD9if3VA_3 bear iBDVD9if3VA_4 bear iBF1Cfv7RpE_2 train iBF1Cfv7RpE_3 train iBO6oNBr4hM_2 train iBmHl4vB2p8_0 boat iBmHl4vB2p8_1 boat iB2e_0wI6Cs_1 bird iCA5LKIvUak_0 horse iCUmfkHj2MM_0 elephant iCWBysiT4fE_0 airplane iCoklLBZGi0_0 truck iC-r2odD6Ss_0 dog iDBWSSj3Yag_0 bus iDMMfw0zrvQ_0 cow iDy5BzJGt50_0 skateboard iD0ptJ7ucww_0 horse iD0ptJ7ucww_2 horse iECVUNZOPOM_0 cow iEIRSDANY7g_0 bird iEcsL-BdEp8_0 skateboard iEeZD9_-mw4_1 train iEe9Qed4A6w_0 elephant iEfRHR6In04_1 dog iEnwhpHkWPA_0 dog iErN5WNQuZ8_1 bear iFLG6c3XcMw_1 knife iFgR4_OYpgU_0 boat iFk_jNFfItI_0 car iFsAXsW8t-8_1 bus iFsAXsW8t-8_2 bus iGB1OkMGELk_1 elephant iGE04YY7P68_0 motorcycle iGE8oPBzavo_0 airplane iGKh6_bzEe8_9 airplane iGKh6_bzEe8_5 airplane iGWCy-zysHU_7 horse iGWCy-zysHU_0 horse iGWCy-zysHU_2 horse iGWCy-zysHU_5 horse iGf0rCvWhZE_1 bird iGivgJkDWVo_0 elephant iGivgJkDWVo_4 elephant iGivgJkDWVo_5 elephant iGivgJkDWVo_1 elephant iGivgJkDWVo_2 elephant iGmHR-MYdts_2 skateboard iGtwAlGgpuQ_0 motorcycle iG3IZAIpSos_0 cat iG4w2A16Qy0_3 boat iG4w2A16Qy0_0 boat iG7OG-yAmkg_1 boat iHNSjj9GO9k_0 horse iHZNqzCjd7k_0 train iHbirUiASog_0 skateboard iH0SvXt_QEE_0 cow iH9qrmQO5wg_3 horse iH9qrmQO5wg_1 horse iH_5naROy0I_0 motorcycle iIYyUq4tPbc_0 cow iIZw5oU3kz4_0 dog iIa2i3Fyyp8_0 cat iIgi9EuB83A_0 train iIlu4DSMMwM_0 skateboard iIoEhVh0sac_0 bird iIoEhVh0sac_3 bird iIoEhVh0sac_1 bird iIwKnWnoXd0_0 skateboard iI66ySv1M1E_0 bear iJcYkYS6CgE_4 airplane iJcYkYS6CgE_0 airplane iJcYkYS6CgE_3 airplane iJqRpAI5q0M_0 cow iJ0Pe8-N6i4_0 bus iJ5fEZLxnPw_0 knife iJ5fEZLxnPw_2 knife iKLuvvisn6Y_0 airplane JvHU5ncnmtc_1 cow Jvkp32eVZyc_0 cat Jvm2k8MgJ5k_0 cat Jv1ayezpka4_0 bird Jv6b9zItltw_3 bird Jv6b9zItltw_0 bird JwNWcW7nUBE_0 elephant JwNWcW7nUBE_2 elephant JwaPyA7kWhc_0 cow JwnMWPlx6KU_0 cow Jw_nc2U4pKs_0 skateboard JxKJB-QdFUA_1 umbrella JxRKwF7KNOA_0 bird JxSYbvgXcT8_0 car JxVoSlh710g_2 bird Jxc3ArJpyuY_0 motorcycle Jxc3ArJpyuY_3 motorcycle JxdIZhohCtg_0 cow JxlB8wLncYc_0 elephant JxzCLy2VyJA_0 skateboard Jx03EEph0bw_1 truck Jx2PgBxlrLY_3 airplane Jx6xyX5sPMk_0 cat JyKJFochwIQ_0 truck JyLFLF4shyY_0 airplane JyLqTlaGOww_0 knife JyM0FDmoMyQ_0 airplane JyePA4nzTx8_0 truck JyhAOfW608o_0 cow JyliijVyyUc_0 elephant JyliijVyyUc_1 elephant Jy1hmMPCNks_0 dog Jy1hmMPCNks_1 dog Jy37u1dt8Qc_0 dog Jy_3PqINBss_1 bird JzGkRevP9mU_1 truck JzNvJYTN1Pw_1 bus JzNvJYTN1Pw_0 bus JzNvJYTN1Pw_2 bus JzNvJYTN1Pw_4 bus JzNvJYTN1Pw_7 bus Jzm0H_o-LyA_1 bicycle JzwF2_O5qho_0 cow JzwF2_O5qho_1 cow JzwF2_O5qho_2 cow J0Gb34OfhGs_0 airplane J0m2haAO_Pg_0 truck J0uOEHqVD0g_1 elephant J01a05fNHz8_0 airplane J05eYTq5pFE_0 cow J1BVFlR3Pzc_2 bicycle J1VVax1uIGc_0 elephant J1YSacTJR64_0 bear J1YqrkAsUIs_1 truck J1YqrkAsUIs_2 truck J1YqrkAsUIs_3 truck J1rYOpOlNqs_0 cat J1reV7ZinzE_2 truck J1sQZHaGRVY_0 cow J1uF4oCMmtU_0 car J10PTSVhLnQ_0 car J10PTSVhLnQ_1 car J10PTSVhLnQ_2 car J142X1ly-gY_0 cow J17uKo2HgxY_0 bird J2R5C_XNnek_0 train J2Sh2XKvWOA_2 horse J3EToJg72Es_0 horse J3d48McH1L0_0 elephant J3gk0p9Hm0o_0 knife J3hgEqlUzpg_0 bus J3hva1l0CWM_1 horse J3jOAuADP44_0 boat J3sMC-99CWs_1 cow J3zIT2YwDdY_0 bicycle J315ju7gD8Q_2 train J4eK5nQv9E0_0 motorcycle J4hu4X1Hr7k_0 bear J4ithFdbyKY_0 train J4mDzsuGR1M_2 bear J43AWiRkRAI_0 skateboard J46c4FEAjQ8_0 horse J46c4FEAjQ8_2 horse J5CA6t8d7uA_0 truck J5JNgpMvPks_0 horse J5Ss-cEKg9o_0 skateboard J5TS-1YKlWE_0 elephant J5TS-1YKlWE_1 elephant J51qDcGqOV8_0 airplane J5-O6tDEZO0_0 horse J5_8xLaPuIU_0 cat J6AHeX1RqWk_0 bus J6nRLSf9kms_1 dog J61MSyhI5Xg_0 bird J68NptJ9oRE_0 skateboard J7h1DaonvHY_1 horse J7jTtirQ85g_0 motorcycle J7vNGyyYQ30_0 dog J73WpGWHEuE_0 giraffe J73WpGWHEuE_15 giraffe J73WpGWHEuE_1 giraffe J73WpGWHEuE_2 giraffe J73WpGWHEuE_14 giraffe J79qVoBV6TM_0 car J8Akt0d4r_k_0 train J8Akt0d4r_k_1 train J8dIP05jqRw_2 truck J8dIP05jqRw_5 truck J9SzI8MQm6Y_0 airplane J9ZGJucbLiw_0 airplane J9mX4rrWQto_0 knife J9n9_-FSk4Y_0 dog J916-YD5Qms_0 elephant J-sHEYA-20k_1 giraffe iKjaiW6gHPQ_1 elephant iKjaiW6gHPQ_0 elephant iKlCbkZsFzE_1 cow iLeUN6d8Aew_0 giraffe iLeUN6d8Aew_1 giraffe iLk3v-m1Z0U_0 horse iLvLOw8Jigg_0 motorcycle iL0GMZ7iO3c_0 dog iL5OOut4Jek_3 bus iL9TAERxS4A_1 bicycle iL9hLZ_cXaI_0 person iMfVd5_HBcE_0 bus iMqYyOcO4Fw_0 umbrella iMtt9-ROv_o_0 dog iMukpec9Vmo_0 airplane iMukpec9Vmo_2 airplane iMxzNRMiKMA_0 truck iM3tOs60qxk_1 airplane iM8Lua_zTug_2 train iNQNSmu2BD8_0 skateboard iNWrFmCCfXw_1 bear iNa2jg_1Vyc_0 cat iNghTa86iWY_0 cat iN-bJwlR2i8_1 bicycle iOEuAB0dIs8_0 dog iOH00pYaMhY_0 cow iOJiYp298qc_3 airplane iOJiYp298qc_1 airplane iOd4NCiEBLw_4 airplane iOd4NCiEBLw_2 airplane iOgScMDTX_I_0 skateboard iOvWAp7U61k_0 cow iOzYv5IpFng_0 horse iO7wHeFO6Js_1 cow iO7wHeFO6Js_2 cow iPWL6FSzmS8_0 umbrella iPbg6G7tUVo_1 horse iP98M3c1PJw_0 elephant iQB9bgZJCwA_0 motorcycle iQPn_3iB6aU_0 umbrella iQYiakvHwnk_0 bicycle iQZ1QN-A3JQ_0 elephant iQfs0MyXA-s_0 airplane iQxGihgbiM8_0 cow iQ_2xA5J-Zg_4 bird iQ_2xA5J-Zg_5 bird iQ_2xA5J-Zg_1 bird iQ_2xA5J-Zg_2 bird iRI3AkfYykI_0 knife iRLMFxqd6Vk_0 bear iRTTlG8M9FE_0 car iRTTlG8M9FE_2 car iRTTlG8M9FE_1 car iRWWnw104cE_0 bicycle iRklgBUz8ME_0 bus iRk0aHyYWdM_0 bird iRlBKC_jfE0_1 horse iRlBKC_jfE0_2 horse iRlBKC_jfE0_4 horse iRmfa0b6jJk_0 car iRpibBNFoiY_0 knife iRv5dyfU3ZQ_1 car iRv5dyfU3ZQ_2 car iRw-TCiikqw_0 horse iRw-TCiikqw_1 horse iR3sRTxVGtg_0 airplane iR4rImxKjK0_0 car iR4rImxKjK0_1 car iR5Zew8NcYU_0 truck iR5Zew8NcYU_1 truck iR5Zew8NcYU_2 truck iR5Zew8NcYU_3 truck iR5Zew8NcYU_4 truck iR5Zew8NcYU_5 truck iR5Zew8NcYU_6 truck iR5Zew8NcYU_7 truck iR5Zew8NcYU_8 truck iR5Zew8NcYU_9 truck iSCFoiWm7Xk_0 bear iSLNkNnHOXQ_0 bicycle iSYNvKIuAXc_0 motorcycle iSbXpgu-7qA_0 bicycle iSeR1wQ4sl0_0 train iTF1bWOtrew_1 bus iTF1bWOtrew_2 bus iTWyYCJO0FI_2 truck iTbEmIOM3Bg_2 car iTbEmIOM3Bg_0 car iTbEmIOM3Bg_1 car iT3LIkn9wh4_0 car iT5clmXCTEc_0 elephant iUDGzAPkGLI_1 airplane iUEEnhAvRoY_0 cow iUSZKTFqatw_0 airplane iUX8ST-BSFg_1 bus iUZnCaGp148_0 dog iVH9ehKyau0_0 giraffe iVRs9h04NcM_0 cat iVzRc0RW_Y4_0 bird iV4UGeMqQeY_0 dog iV8NpvUXve4_0 elephant iV8NpvUXve4_1 elephant iV9CFIQTmfs_2 bicycle iWP_wo9OSe4_0 bird iWo66ztRt0o_3 boat iWtj7if5cK8_1 boat iWv1rxdhH1E_0 bear iW1aIV39PQo_0 motorcycle iW2g2j2VhbM_1 skateboard iW2g2j2VhbM_2 skateboard iXKQX0UfOqA_0 cow iXKQX0UfOqA_1 cow iXKQX0UfOqA_3 cow iXh4-KWp9S4_0 horse iXl114K8Y1E_0 car iXxi1CQpbBk_2 cow iXzEoHyipJM_0 truck iX7b9tWhoKg_0 giraffe iYGSi3t8Do0_2 cow iYO5SD120r4_0 elephant iYYdiX4oGjM_0 skateboard iYjiqdn7fVk_0 bird iYsgKLWI96c_2 knife iYtDe_tT_wo_1 train J-6KxfbaI6M_2 cow J_HdQVHBeco_0 motorcycle J_l7W4IMhJo_0 dog J_n_3-KXet0_0 dog KAGadYR0_LM_4 bird KAGadYR0_LM_6 bird KAGadYR0_LM_8 bird KAKn8JmKESU_0 train KAjM8ENV-F4_4 skateboard KAxsc-ratJ4_0 horse KA1A0hH1nVQ_0 train KBIGw8UrUG8_0 cow KBKaaEaIPRc_0 cow KBNqKcj0xoc_0 train KBP3moB3vz4_0 bird KBRkCaaDjxU_3 bus KBRkCaaDjxU_0 bus KBe3_8RL_MI_0 person KBoY6Pa8f_M_0 cow KCbzyGKBwC8_0 train KCdR8nTa3p4_0 skateboard KCipBL5_e5M_0 horse KCy-RKy_KN0_0 bicycle KC1md4Q_DlQ_0 skateboard KDSxlGW6eRc_0 umbrella KDZsS4MjllY_0 motorcycle KDaVTe3RbUY_0 horse KDyYkCLIImM_0 knife KD0Qm4z53a0_0 truck KD0Qm4z53a0_5 truck KD5LwDdfw0o_0 horse KD9qqVSiPu0_0 train KEGLFAbfrxs_0 motorcycle KERo3bKldwM_0 elephant KEW0fAHE_74_0 bus KEW0fAHE_74_2 bus KEagowlFwzI_0 cow KEll3gbyIsk_0 truck KEll3gbyIsk_1 truck KEll3gbyIsk_2 truck KExfLNe3IbY_0 airplane KE2StZtSBfk_0 airplane KE3O7h2RC-s_1 train KE_UJpQulNU_0 horse KFEorB8NRcE_0 boat KFFTHBaYcbw_0 bear KFJtVwXfusI_0 boat KFRZOFB41Jk_0 train KFk_7p6X-zI_6 car KFk_7p6X-zI_1 car KFk_7p6X-zI_2 car KFk_7p6X-zI_4 car KFk_7p6X-zI_5 car KFnvvsS8eIE_1 knife KGYrelsyNbk_0 airplane KGbYHbiOfd8_0 giraffe KGwEL4VozSA_0 boat KG8zBA9Gudg_0 knife KHBsJZVKzks_0 truck KHG1hZsfjwQ_0 train KHHyhgm1jZ0_3 skateboard KHSjivlhX30_1 bear KHcEC33udEg_0 cow KHgLQP4XH9Q_0 skateboard KHsYYKcSCSI_1 cow KH0F1sJXKss_3 elephant KH0k5jfUZGg_0 bicycle KH8QlsYIT1M_1 bear KIPptA8AzYg_0 horse KIjf6QGqdsw_0 truck KIjf6QGqdsw_1 truck KIqePeskBSk_0 truck KIy2LK1jsQ8_0 person KI8Arf5-ekw_1 truck KI8Arf5-ekw_4 truck KJIBdy7_10k_1 bus KJIBdy7_10k_2 bus KJJBVXnnqIw_0 zebra KJcXjJ5S9yA_1 dog KJrPyuVxWRg_0 airplane KJrPyuVxWRg_1 airplane KJvAK-5ExwY_2 truck KJ30mU3h4f4_0 bear KJ7PQiJAKRM_0 elephant KKKiTv_k23A_0 giraffe KKO1QGoVQYU_0 elephant KKpwJEMQYv8_0 dog KKsKKMjHYGM_0 horse KK06xbUhklk_1 bus KLC8OgkQnNQ_0 boat KLEKnTRMmo0_1 cow KLGAT1GQYGA_2 bird KLMz6_P5QmA_0 horse KLNmQqyAs54_0 cow KLUTy4pqLZ0_0 bicycle KLVZqPfRuTg_2 bear KLVZqPfRuTg_7 bear KLlN4H-eGYI_1 skateboard KL6-Iu09-C8_0 cat KMNaWZZK2Os_0 skateboard KMOOcO5yE9E_1 horse KMXuGjMAt7k_5 bicycle KMXuGjMAt7k_6 bicycle KMXuGjMAt7k_3 bicycle KMajGvVnol0_1 airplane KMajGvVnol0_4 airplane KMajGvVnol0_5 airplane KMajGvVnol0_6 airplane KMajGvVnol0_7 airplane KMiZgk_f50g_0 dog KMlZbzTdutw_1 car KMlZbzTdutw_2 car KMsL64iYfOA_0 car KMtu1xThH2k_2 elephant KMyoO6YYfZk_0 elephant KNaoNUMT7m0_1 car KNg4K_bbY5Q_0 train KN5hxi96gW0_0 cat KN-_uhPPfoE_0 cow KOKdrC_foXo_0 airplane KOOd5IO8seo_0 boat KOSUWuFIQjQ_1 airplane KOVZk2ixqc0_0 truck KOgmgqcT21Y_1 bird KOl1EDiK2e8_0 motorcycle KO6T6QdloiM_0 bus KO7Ncyx1-9c_0 train KPJDHcE-qeQ_0 bicycle KPYtlDJa43o_0 skateboard KPfbBNvFcmA_0 skateboard KPj_wrsubOE_2 bear KPkzyHL7IPg_0 cow KPmvpNEHsPk_0 skateboard KPzWIuvRlr0_1 skateboard KP4ApNQiIEI_0 cat KQB-ZyriFmI_0 boat KQg6eO2jr_Y_0 umbrella KQ5mchVgTXo_0 truck KRCLiP-JUsc_0 truck KRCLiP-JUsc_2 truck KRCLiP-JUsc_1 truck KRW0HyqDLg8_0 dog KRjN1nx8mcE_0 airplane KSDxU99SF6g_0 motorcycle KSHVle4SAM4_0 elephant KSZ7nkMWOsU_0 skateboard KSZ7nkMWOsU_1 skateboard KSj7hZ7oO18_0 cow KS1ge4vlv64_0 bicycle KS4vsIYGaCM_4 truck KS4vsIYGaCM_0 truck KS8UAlyHoCg_0 dog KS_fak2guWU_1 dog KTAMaZKxpF8_2 train KTDhNtr8XF4_0 airplane KTDzrCvIVQs_0 dog KTQQtbUbWbA_0 airplane KTZ2Jsj6_ig_0 truck KTdzxOjJNgI_0 car KTsTGNqrFuE_0 umbrella KT7YiBWXqNk_0 airplane KUZxnRyU2e8_0 cat KUbSnz1yWxc_0 knife KUc8Kw30V1Q_2 truck KUc8Kw30V1Q_3 truck KUc8Kw30V1Q_4 truck KUgY_2bsBC0_1 skateboard KUhzqYZoYCI_0 cow KUkcrqulhqg_0 cow KUlpA-cpCpM_0 horse KUumLype4AE_0 elephant KVFlTVdKQVw_0 horse KVJCkQzQbMs_0 person KVmS-yiYu2c_0 bicycle KVzW5MPT25A_0 airplane KV0o55FO4XA_0 skateboard KV3jtdzXA9U_0 dog KV__RQ75-vw_1 cow KWJiCsomGTA_0 cow KWLl4vVumIs_0 truck KWSDQebY3dA_0 cat KWwbFKgHqW0_0 car KWxd8IQ9_a0_0 cat KW10UlO19uo_0 bus KW4ovUCg7uU_0 bicycle KW4ovUCg7uU_1 bicycle KW5S4gsTVaQ_0 knife KW7gAr7kgow_0 dog KW_6RyjLGPI_3 horse KXCQuD9phb4_1 bird KXENib5sk78_0 cat KXLWiz5ZUh0_1 train KXLWiz5ZUh0_2 train KXdF5__0yVQ_0 cow KXf6k7PrX7E_1 elephant KXf6k7PrX7E_2 elephant KXrQkw1WPnk_0 bird KXzu3MDaZn8_0 car KYK_Wg8JlTg_0 skateboard KYK_Wg8JlTg_1 skateboard KYTRCD2p-8Y_0 motorcycle KYZzKKYD7Yc_1 horse KYaB_EEk344_0 cat KYc__uUZkwc_3 bicycle KYd6wCR0jVc_1 horse KYd6wCR0jVc_0 horse KYs4hm9X1Rg_1 bicycle KYvXJXEbUMg_0 bird KY0x7p41Q_A_0 cat KY04L4VTsXc_1 airplane KY04L4VTsXc_2 airplane KY7D2Y5MQSo_0 horse KZAf2uPS-us_1 horse KZAf2uPS-us_0 horse KZFniGi-fes_0 dog KZJcgoY3r3U_0 airplane KZSLQpdbGps_0 motorcycle KZYe6pqrLaQ_1 dog KZhX7tDfYIA_0 bus KZl_XArvSXk_0 horse KZ4OuA1t3ZY_0 elephant KaUGkf-3N-4_0 horse KaiX3d83DWA_0 zebra Kaj5B4nrWJU_0 skateboard KapwOqVyzUk_0 cat KaqToIfNxMY_1 bicycle KauPg8P2kC4_1 airplane KazepPKQz1M_1 cow KazepPKQz1M_3 cow KazepPKQz1M_4 cow Ka978At0k0Y_0 airplane Ka-4ZfE0GMQ_0 motorcycle KbA6UDJg1LE_0 train KbA6UDJg1LE_1 train KbGl5jqOQ7o_0 cat KbRIbBeLBsM_3 motorcycle KbosOWR7ZSg_1 boat Kb3lxArGO8Y_0 bicycle Kb3lxArGO8Y_1 bicycle KcDpzG8kKho_0 cat KcL-zz1sb6I_0 dog KceqMsKO-zc_0 cat KcpGWNCD-uk_0 cat Kct9k6Q2YM8_0 car KcuEc9WwYSQ_0 cow KcuEc9WwYSQ_1 cow KcyLR4RxylE_0 cow KcyMYgt62Go_0 horse iY5Sh73Lem0_0 bird iY6eEC8uY4E_2 train iY6eEC8uY4E_1 train iY9QlFmEBFY_0 motorcycle iZsSK_iIOoA_0 horse iaGO2mTgoPo_1 bicycle iaGO2mTgoPo_3 bicycle iaWSU1ISWXQ_2 airplane iaWSU1ISWXQ_0 airplane iaflfMXT7QQ_0 boat iamGAsKNRhY_0 train iana0Lz1gs0_1 motorcycle iasZRb9p3lg_0 motorcycle ia1XmqAwn7M_0 bus ia6R3fqdlnE_0 bear ibcBDIGpMfo_1 bus ibd-Wxcr_x4_0 horse ibpj369yzbw_0 umbrella ibxmk7cGhTQ_3 horse ib5fWzJWV5A_0 cow icDyRH3P-nM_0 airplane icGjENlINL4_3 skateboard ich9rXZWjGY_0 car icic9NkCnf0_0 cow icnuBKQZNBg_2 bus icnuBKQZNBg_0 bus icnuBKQZNBg_1 bus icxOfJQ-l9I_0 car icxOfJQ-l9I_1 car icy3pC1Q0eA_0 cat ic7k8fkUDXs_0 cow idnOwkwaCm4_0 horse idnSzg_rV_k_3 bicycle idoGYHCXGJs_0 elephant idq0Jqw8Oa0_2 elephant id1yzZ3HkTs_1 knife ieCL4lz7IJw_1 boat ieOpqoYhMOQ_0 truck ieOpqoYhMOQ_1 truck ieOpqoYhMOQ_2 truck ieULzTIs9ls_0 cow iedgnWefCA0_0 airplane iedgnWefCA0_2 airplane iedgnWefCA0_3 airplane iewlg5CteEs_1 airplane ie8gkh6nQcA_0 train ifKKR-gCLSk_0 cat ifRQKBKIRSI_0 dog iff3KW8leKw_0 airplane iff3KW8leKw_1 airplane ifghH4Jo8D8_0 truck if31ci9xz_8_4 bicycle if31ci9xz_8_1 bicycle if31ci9xz_8_2 bicycle igGtS-jZCQM_2 car igGtS-jZCQM_0 car igLVqNKw-yE_0 bird igMWvnK1jEE_0 giraffe igMWvnK1jEE_3 giraffe igMWvnK1jEE_1 giraffe igQUACDrluw_0 horse igU61tmxeE4_2 skateboard igWsPt0nelg_1 bus igcpSvypduQ_0 truck igcpSvypduQ_1 truck igdqmLfZ_cw_0 airplane igjBIRwjlko_1 dog igm6X4CZLmk_1 bus ignREcFRyaQ_7 airplane ignREcFRyaQ_8 airplane igwghbZYjgg_0 airplane ihMDaxeTpZs_1 horse ihTjIMWOjuQ_1 motorcycle ihUpF22zo4M_0 train ihUpF22zo4M_1 train ihWWle00xEE_0 motorcycle ihh0J0AaWBs_0 train ihh0J0AaWBs_2 train iiA0hIRwwJA_0 train iiSWvRk3YfU_0 bird iiextKoe48U_0 cat iigPPpoo0W8_0 knife iiiOUcmwJPw_0 cow ii0PDMs-a0o_2 car ii2ghwDAI3w_1 airplane ii_sG2SkeXM_0 cat ijB2Yh71VIg_2 bear ijJAWtORd2w_0 truck ijJAWtORd2w_1 truck ijVpcnt8HN8_0 bus ijXmwWOLvpM_2 horse ijXmwWOLvpM_1 horse ijdipMmraWc_0 truck ijwhkKzyWE8_0 airplane ij0zLKtr0sA_0 bird ikGzd6ivk64_0 motorcycle ikKFRS8Hivk_0 bear ikVu6XfZ3_A_1 bicycle ikafEc8p6rI_0 bicycle ikafEc8p6rI_5 bicycle ikafEc8p6rI_1 bicycle ikafEc8p6rI_3 bicycle ikafEc8p6rI_4 bicycle ikfmjumoUlM_2 train ik868nOtrZo_4 bus ik-jgdZW4Ek_0 horse ik__zZ1HZNg_1 giraffe ilKErQ8ojz0_0 umbrella ilKErQ8ojz0_2 umbrella ilKErQ8ojz0_3 umbrella ilKW98Qvobg_0 skateboard ilvsheh1Cqs_0 dog ilxXSgvtFgw_0 cow imEWC_Q-BSg_1 car imcRxs0K7H8_0 bus immhpBi8eWw_6 skateboard im_FneG303c_0 dog inEZ7ZLAS7s_5 skateboard inJLKInP5kw_0 dog inZmM8c-9NI_3 horse inedUh-74-A_4 truck inodVLfFogA_0 train inynAJrGhVU_0 motorcycle in061qZJjWI_0 dog Kc8WMzLKvvk_0 cow Kc-f3X7O-pw_0 cat Kc-x73DCumI_0 truck KdGgVhM0Ihg_0 bird KdKlI0ZN6qo_0 airplane KdQQqsAuU7o_1 bicycle KdUSJz6UWLQ_0 giraffe KdXRnPKKeTU_0 bird KddQJwFfv9s_2 skateboard KdjMgSuON5w_5 bear KdpUjVhfjG0_0 person KdyadP7Y1nU_0 car Kd9Em2ABfN8_0 cat Kd-jTE5-2uE_1 motorcycle KeMITKdjHtk_0 cat KenV2bIQf1o_0 bicycle KevYmLAAigc_1 train Ke3R9FrGLcY_0 dog KfJU66erPWo_2 knife KfMO45jz-68_0 boat KfS_UKkbQAA_0 bird KfTV1TFY2b8_0 bird KfaTw0euPQA_0 motorcycle KfjmKiZzSlY_0 cow KfjmKiZzSlY_5 cow KfkKe7q45KA_1 motorcycle KfkKe7q45KA_2 motorcycle KfkKe7q45KA_3 motorcycle KfpCncLoqOw_0 cow KfwbVpPI0nU_1 motorcycle KgAFD_JvgrQ_0 cow KgD3H0APDy0_0 bear KgNS5HwFF_c_1 elephant KgVEQYicksA_0 cow KgY5OrVnzv4_0 cow Kgo7SWtDdz4_1 dog Kg3xuyjNU7w_0 umbrella Kg7Qk4Gx9n0_0 motorcycle KhKZwdKiqms_0 cow KhKcHaH_ALo_0 horse KhPKq8O30VM_0 bicycle KhPKq8O30VM_2 bicycle KhPKq8O30VM_4 bicycle KhuC9snWfpI_0 cow Kh7rAO7jCGc_0 airplane Kh_KwBHfGQ8_0 cow KiHy8IMQ6zA_0 airplane KiaUDlPLxzk_1 bear Kixl-Wmj3kg_0 motorcycle Kjaag6B-MIQ_1 skateboard Kjca1u6P3NE_0 cow KjiI2E3l3Mk_1 truck KjiI2E3l3Mk_2 truck KjqaJ25GUBI_0 bus Kj3dRtd4xQI_1 cow Kj3dRtd4xQI_0 cow KkD23XYUG9c_0 umbrella KkMNGzvNkg4_9 bird KkNYBz9ZaVA_0 bird KkNYBz9ZaVA_1 bird KkPf9AB1HZo_1 elephant KkRq1ogJq-4_0 skateboard KkXTT9C4xfc_0 cow KkdSKHS7P50_1 skateboard Kks6eJqnZLQ_0 dog Kks6eJqnZLQ_2 dog Kks6eJqnZLQ_3 dog Kks6eJqnZLQ_4 dog Kks6eJqnZLQ_5 dog Kk6BgYl9OjA_7 bicycle KlEK-vv3DVo_0 bear KlENnLskuCU_0 cat KlG0czACle4_1 cow KlG0czACle4_0 cow KlG0czACle4_2 cow KlG0czACle4_3 cow KlqbHICh4G4_0 train KmJhshcviXA_0 knife KmbMzgXFdKs_1 airplane KmbMzgXFdKs_2 airplane KmbMzgXFdKs_0 airplane KmfmqwmQneM_0 bird Kmr5uVYVSDo_0 car KmuV8XfAjvw_0 horse Km3GmgNJlL8_0 train Km3GmgNJlL8_1 train Km3GmgNJlL8_4 train Km7w520V5vs_0 airplane KnIxVxIho9w_1 bird KnN2yDre-aM_0 boat KnTu6keaGs0_2 elephant KnTu6keaGs0_0 elephant KnXPxa1RzmU_0 cow KncYvkV6rwc_0 boat Knql8E5Khc8_0 elephant KnuD87lrS8w_0 skateboard KnvGRqLQ5iM_1 train KoA6bPmALeA_0 cat KoXgGmdVCBM_1 bicycle KoXgGmdVCBM_10 bicycle KoXgGmdVCBM_2 bicycle KoXgGmdVCBM_3 bicycle KoXgGmdVCBM_4 bicycle KoXgGmdVCBM_5 bicycle KoXgGmdVCBM_6 bicycle KoXgGmdVCBM_7 bicycle KoXgGmdVCBM_8 bicycle Kosi26dm76A_0 horse Ko5wlBGl200_0 horse Ko_Nx24OGxM_2 airplane KpDzoM2xtwc_2 truck KpDzoM2xtwc_3 truck KpDzoM2xtwc_5 truck KpHFaYsgWrg_2 elephant KpHFaYsgWrg_1 elephant KpVflkpC7d4_3 bus KpVflkpC7d4_5 bus KpVflkpC7d4_0 bus KpVflkpC7d4_2 bus KpXxo2n6AYw_1 motorcycle Kphl0WRacss_0 knife KqAvXx4bN5k_0 cat KqQgFUEAS-M_0 train KqavxpR698k_6 dog KqavxpR698k_0 dog KqavxpR698k_1 dog Kqfo6_qcthc_0 car KqjhaIJMY5U_0 cat KqnqyAczaqs_4 bus KqqyldSpJh4_0 horse KqqyldSpJh4_1 horse KqzkADa-Lqw_1 train Kq1x16QvM1g_0 dog KrGJjt0yq-s_1 bus KriNb3dhqVQ_1 skateboard in9LFcixPXo_0 skateboard ioEMtB2bP6o_0 bird ioESr4H79KY_0 boat ioGc_R8NJow_0 cow ioKahF3aFWw_0 horse ioKahF3aFWw_1 horse ioOHxrHumIk_1 airplane iobYquCNk5k_0 cow iojaZ646ie8_0 skateboard ipLnwxta1Jc_0 boat ipOJVFLMLIk_2 bird ipOJVFLMLIk_0 bird ipgB9KXnzK8_0 horse ipg_y1T2OsM_0 cow ipg_y1T2OsM_1 cow ipqQlNsINy8_2 airplane ipt6gWgCgis_0 truck ip5xVRJOpP8_0 umbrella ip8BFE94TKo_0 airplane ip8BFE94TKo_2 airplane iqDJJqLVBBk_1 elephant iqExYW2fPfc_0 bear iqicuLBaF_g_0 truck iqlKzflOl00_1 bus iq1FaWFylpI_0 motorcycle iq6izTYp-DU_0 motorcycle irBsER6ITHw_2 skateboard irDs_vWExnM_1 bicycle irDs_vWExnM_2 bicycle irU_BJXoU9I_1 cow irWY8s-JuBs_3 airplane irWY8s-JuBs_0 airplane irWY8s-JuBs_1 airplane irWY8s-JuBs_2 airplane iramP9ihj_w_1 bird irgacv6LobE_0 motorcycle iri1MtEgOjQ_0 bear irs2O6YOB5I_3 elephant irs2O6YOB5I_5 elephant irs2O6YOB5I_1 elephant iruY-BU0rpg_4 elephant irzcPf--6uQ_0 train irzcPf--6uQ_4 train irzcPf--6uQ_5 train ir4EYn7Fz5A_0 dog ir5E9O2Tonk_0 boat ir7Dq5dPxOQ_0 horse isPplb7aotI_0 boat isPplb7aotI_3 boat isU4229ndXM_0 cat isfwmnXNmeM_2 cow islz_HxqOnI_0 bird isvvRHvNuIw_4 umbrella isynk11V9s8_3 airplane isynk11V9s8_1 airplane isypXPZMgns_2 boat isypXPZMgns_3 boat itKyPMv5z0Y_0 umbrella itKyPMv5z0Y_2 umbrella itc-A2zwSGM_0 dog itrvgHryhIY_0 train its4C4ty2oA_0 skateboard ittQcsrECUE_1 bear ittQcsrECUE_2 bear it1EatlrBkg_0 cat it3KS-r39EQ_1 knife it3hCzfmyfs_0 cow it6DtEGdhas_0 cat it8Fid-mqRQ_0 truck iuEbY8B4Qo4_0 cow iuEbY8B4Qo4_1 cow iuFmdispR2U_0 bicycle iuRmu4BN6bw_0 train iumTd9IGDho_0 train iusgUMlrYFA_0 airplane iutdZMWA8f0_0 person iuumrgHW8zM_0 umbrella iu3sd1qnr8g_0 car iu9Av4HCmiw_0 knife ivDeIaJYIlE_0 truck ivT103z2bwc_0 giraffe ivdfO5VqKo4_0 cat ivgTXhIqccY_0 cat ivi1frbFnGw_1 giraffe iwFO7lcVjKc_2 cow iwFO7lcVjKc_0 cow iwFO7lcVjKc_1 cow iwX4cgfQn5s_0 bird iwczN64AC9Y_0 bus iwp5aVOXWaM_0 airplane ix8S6CRuUFg_3 bear iyAvqfMVOeA_0 cat iyLZZlL-B80_0 cow iyMbIICjtcg_0 cow iybJfH6iVdU_0 bus iygW3-Ovcic_0 cow iyn1OZFmvXE_2 bird iyz9Lq13Mcg_0 cow izbTUTqkG7c_0 cow izx70OqPYBc_0 dog iz9-Vl4e9po_0 train iz9-Vl4e9po_2 train iz9-Vl4e9po_3 train iz-BT0NAs6k_1 knife i0Eg02B3JoM_0 elephant i0Ez1KT7sTo_0 horse i0ZE0kXl5oU_1 skateboard i0eMgZ0riHI_2 bird i0gg-mJNKlU_0 cow i05OPAsrmJI_3 elephant i05OPAsrmJI_1 elephant i05OPAsrmJI_2 elephant i09cuoC14q4_0 bear i1DfyWe0Jh4_0 cow i1DfyWe0Jh4_1 cow i1NfFxZmBSA_0 bus KrvsSuIgrJQ_4 horse KrvsSuIgrJQ_1 horse KsT2_VxPkb4_0 knife KsXzFCpHMPU_0 giraffe Ksyud0_i1zI_0 bus KtINrfbQSXk_0 knife KtV59qZg7BU_0 truck KtX4x9k3J2A_0 train Kthi3i2WM3s_1 skateboard KtkN77asAj4_0 horse KtplZx6_ecU_1 knife KtqvSap6uig_0 skateboard Ktxb4OmaAjA_0 car Kt3uQcxNltk_0 zebra Kt9neWWjkHM_2 bear KuBa9tep8xk_0 bear KuQgP71vfZ0_0 train KuYBJ90zNYw_0 umbrella KuYjBUvU-ws_0 umbrella KuYrzelSfIw_0 car Kulks153IS8_0 truck Kulks153IS8_1 truck Ku0XhH2YeG4_0 bear KvH6JyHG3H8_0 motorcycle KvH6JyHG3H8_1 motorcycle KvLXxaGooPk_0 cow KvPLPO4A5R8_0 knife KvRsu4xefwo_0 person KvcxzJxNkkU_1 bird KveRZ7dBNGU_0 boat KvgupPBw5rc_0 cat KvjDDIthDDM_0 cow KvkOTtqxJlo_1 cat KvsaKWirK7Y_0 skateboard Kv0ui3mEWGE_0 horse Kv0ui3mEWGE_4 horse Kv0ui3mEWGE_1 horse Kv0ui3mEWGE_2 horse KwkcPYl8Lv4_0 cow Kw7t6l8h2Ns_0 bear Kw7t6l8h2Ns_1 bear Kw8037OwDjc_0 truck KxWI3M2FGOw_0 horse KxZXot9AIY4_0 truck KxflrYttp20_0 bird KxlTxdqDDzo_0 cat Kxuqb_htGwY_0 giraffe Kxuqb_htGwY_2 giraffe Kx40to29YnE_0 skateboard KyDXCruNNj4_0 horse KyUM64yfNCA_0 horse KyWUn_bj5rM_0 motorcycle KyZWWIsQUbg_0 skateboard KyZWWIsQUbg_1 skateboard KyaKfhOfKhE_1 bird Kyt325n06oI_0 cat KywHhzvsm3Y_0 bird KyyS9PYJ9Zo_0 truck KzK3iwncxbY_0 bicycle KzK3iwncxbY_1 bicycle Kzc17TzutkM_0 skateboard Kzc17TzutkM_1 skateboard KzyD-e7N2D4_0 train KzyD-e7N2D4_1 train Kz3zulHzEE4_1 train K0CwoXVMp0M_0 bicycle K0L3_2UquEY_0 boat K0Zt-EcXkj8_1 airplane K0cgwgX_8fo_2 boat K0xs4bH65_Q_1 motorcycle K02fUURwCiY_2 car K02fUURwCiY_0 car K02fUURwCiY_1 car K1Qbgm__2iE_0 cat K1ccfBgR_kg_0 truck K1-s4sk63R4_0 horse K1_J3d_yH64_0 motorcycle K2F6TCgVfR0_0 boat K2hV4KVruLc_0 airplane K2my8qWjyn4_0 cat K2yjgwFV15k_1 motorcycle K2yjgwFV15k_0 motorcycle K26jSjClwaQ_0 skateboard K3Cgw_EFdbw_1 motorcycle K3DniaFnn9E_0 cat K3KhxEuf8mY_0 horse K3KhxEuf8mY_5 horse K3Ov5rPJ2LE_1 horse K3XsEMr7Qt4_0 person K3qgW4Y3yrk_0 motorcycle K30LSGFu6hs_0 motorcycle K4RE7AZWGv0_0 car K4U_AmqQFDY_0 bear K4VnWy2-8xQ_3 car K4ec2MqDkPw_0 train K4fCUNjbdf8_0 motorcycle K4wp52Zn5d4_0 horse K5NooGgwD1E_0 horse K5NooGgwD1E_1 horse K5pBkPv_1sg_5 car i12y-zJl-nA_0 cat i17EaDmRPCg_0 umbrella i2Yjl6kF8iY_2 airplane i2Yjl6kF8iY_0 airplane i2cujNbMSKc_1 skateboard i2diIHrCsbk_1 bird i3AK_cujBxY_1 motorcycle i3BpSeFJdgo_0 cat i3HeGqUyibM_4 bicycle i3HeGqUyibM_9 bicycle i3HeGqUyibM_12 bicycle i3LFAemLFW0_0 horse i3Z5pFF2dH0_0 bird i3a4U770GtE_0 person i31nG3E36WE_0 knife i32p4KoRD2o_0 train i33S_D8TBc4_0 dog i35wpbpl8qY_2 boat i38dpYWvJN8_0 umbrella i38dpYWvJN8_1 umbrella i4CFI7MtlRs_0 cat i4ExemfAEO8_0 bicycle i4IpgDIqTrs_0 boat i4RZtd1cCw8_0 umbrella i4bRNqQ32MI_0 cat i4clJpNvw4M_2 bus i4hqN47R0oU_1 train i45JoRzDdI0_0 cow i46jok5cjyY_0 horse i5GJ6mIp8zc_0 boat i5G6RkcL4m0_0 cat i5OdBE4QG6c_0 train i5g87UeVkBU_0 horse i5g87UeVkBU_1 horse i5sT2ifoPyM_0 knife i6MF-PGtJiE_1 train i6WTNPwIjW8_0 cat i6aJqhBh5wg_0 skateboard i6j6P7ITxYg_0 cow i6vwTWezXmU_1 boat i66Gsq6zzqI_0 motorcycle i6-YQ6rSnDI_0 cat i6_oBTD2-YA_5 bird i7P2tq4TS_4_2 bus i7UQGL5uxvw_1 skateboard i7WeV3CfJV8_0 knife i7a8sQcVRgE_0 truck i7umCLnxVXw_0 cat i791If0qoBU_5 knife i8KQCu2cMAc_2 bicycle i8KQCu2cMAc_4 bicycle i8bVI1667K4_1 truck i8hjK42sseE_0 motorcycle i8lG7Ux3wlc_0 dog i8nbuADJjmE_0 car i8nbuADJjmE_1 car i8nbuADJjmE_2 car i9PUn4sF30g_0 motorcycle i9T-NwSBqPE_1 knife i9VWkuQHBls_0 horse i9nmvkDiFGc_0 cow i9sP7mWuQ_8_2 motorcycle i9sP7mWuQ_8_1 motorcycle i9u4vsQUBTQ_0 horse i90TDb7evCY_0 truck i9_FG4-2VIM_0 skateboard i-CQVFq1JI8_1 bicycle i-CQVFq1JI8_3 bicycle i-T9Q2g8xbk_0 airplane i-kodOT_ufM_0 cow i-nP7aFTZb8_0 bird i-xdWDN7Eys_2 knife i-3aAuwOmxc_0 truck i-8W-K4y3nY_0 train i_HHc85mP4Q_0 train i_h0vOCrd_U_0 airplane i_h0vOCrd_U_1 airplane i_iXTMX4Vls_0 cat i_nZ8ImBf18_1 bicycle i_nwFUP7QJM_0 knife i_4c71HPXOI_0 giraffe i_-PIEIGkQE_0 horse i_-PIEIGkQE_1 horse jAH-80rHWKY_3 bear jAW8iLGAgdQ_1 bear jAW8iLGAgdQ_0 bear jAh4oBD0Bsw_0 train jAnV_6fFGnI_0 cow jAy3VhkJauE_2 knife jAy3VhkJauE_5 knife jA6aZl1f4Wg_0 bicycle jBMmFLPc7nA_6 bus jBMmFLPc7nA_0 bus jBMmFLPc7nA_3 bus jBMmFLPc7nA_5 bus jBTJgbVspOA_0 airplane jBl50J7bOEw_1 airplane jB1IT1aBj-Y_0 dog jCDFU72N7Mc_1 skateboard jCJGjjNBSk8_1 airplane jCJGjjNBSk8_0 airplane jCMWNtCzuqU_0 knife jCUnLxCoYMA_0 motorcycle jCY67ybfyqU_1 cow jCZx5dn_4KA_0 bear jCcW1MW6PTE_0 truck jCcW1MW6PTE_1 truck jCiwgfC1uN0_0 dog jCtFgJ1qhJE_0 bird jC5Px208OVY_4 horse jC5Px208OVY_5 horse jC5YGckTiIU_2 train jDFqxB4rC7M_0 cat jDJNC5fzvfA_1 motorcycle jDYks7hSKbg_0 truck jDbHjQZ5R70_0 airplane jDbHjQZ5R70_1 airplane jDdFavN2eWY_0 dog jDgpggXdBIc_1 motorcycle jDgpggXdBIc_2 motorcycle jD2RjyxG6ow_0 motorcycle jD4621IQz3w_0 dog jD4621IQz3w_1 dog jEASZOuNSS0_3 skateboard jEASZOuNSS0_0 skateboard jEASZOuNSS0_2 skateboard jEEOkCjU9y0_0 bear jEJZ76_xhog_2 bear jEQDhb_Zewo_0 cat jEYG-qIv34o_1 cat jEYG-qIv34o_0 cat jEfwj-JzFXo_0 person jE1Rq_Ot02M_0 dog jFAm4tikj6E_0 horse jFSIX_KuRK8_0 horse K5p31PQkx3I_1 horse K5q4FoXnLwI_0 train K5sQWplX-D8_1 skateboard K5sQWplX-D8_2 skateboard K6JHTga6VU8_0 airplane K6SFafS3Zv8_0 car K6SFafS3Zv8_2 car K6jf51to7dU_0 horse K6jf51to7dU_1 horse K6sKjN_MOsE_1 bear K6srgkSvZdw_1 skateboard K6srgkSvZdw_2 skateboard K6vEY0vOlSg_1 train K66dqG9OJuo_1 dog K66dqG9OJuo_0 dog K6_WEh-eizw_1 airplane K6_WEh-eizw_2 airplane K6_WEh-eizw_4 airplane K7uSHqISah0_0 train K702Tx5vkp4_0 horse K78iEUHTTZc_1 cat K8aa-7brUTs_0 bear K8vGdEhh_jU_0 bicycle K81vEhukX4U_0 motorcycle K9LhqtvfZ10_0 dog K9LhqtvfZ10_3 dog K9LhqtvfZ10_4 dog K9LhqtvfZ10_5 dog K9TPOifKCmU_0 motorcycle K9hTkmr_71A_2 car K9jCx7G3_Mw_0 knife K9kNamc2c5Y_1 dog K9kNamc2c5Y_0 dog K9wE7VzJD00_0 train K-Dz6gr96Lo_0 dog K-s8RPMLRw4_0 bird K-s8RPMLRw4_2 bird K-x3x3kGGqg_0 dog K_PGa9Eo6mo_1 dog K_VS3tyB-Cc_0 person K_Z28TO4stg_0 bird K_h1L3P_j1M_0 bird K_pO-MBS7lI_0 dog K_qFWKniImU_0 skateboard LAKF499FHX0_0 train LAKF499FHX0_4 train LAKF499FHX0_1 train LAKF499FHX0_2 train LAKF499FHX0_3 train LARRHwtW8fE_1 dog LAZoyKF7lbQ_0 truck LAZoyKF7lbQ_2 truck LAZoyKF7lbQ_3 truck LBJEbJfzvW4_1 skateboard LBOXDMZvtBY_1 train LBnsLkuQ8kE_0 person LBwm49n5rKo_0 motorcycle LB6fi4oTKvQ_2 dog LB8Wc8hU4Hc_0 airplane LCGZmNGyPhM_0 boat LCghaNtVeM0_1 knife LCjQb5zLTCs_0 train LCoIwiCBlW4_0 dog LCxiwbrpEFI_2 bus LC5Qly11BZs_0 train LC5q2G2pxT0_0 bus LDEju5sQWOU_1 bear LDH_eiO0aFE_0 boat LDJ9xB-n5Sg_0 dog LDJ9xB-n5Sg_1 dog LDQiOOCMhs4_0 truck LDQqhsLKyjs_0 train LDYFndJjRGA_0 skateboard LDgpZlJ_QYM_0 boat LDh-8GoBSLw_0 bear LDlR_gDbVFk_0 airplane LDvN2rB8p44_0 train LD-8yzPoOIQ_0 car LD-8yzPoOIQ_1 car LD-8yzPoOIQ_2 car LEH61oMv2So_1 train LEIkLV_S5yA_0 cat LEP6ZOl5iw0_0 horse LEUCQjNIm9E_0 knife LEYBNQUwruU_0 dog LEiolk6i9RI_0 horse LEmU61Tdqxs_1 motorcycle LEverFsHygc_1 airplane LE2ks85I17U_0 bird LFDqskJozig_1 skateboard LFMUePhHPAk_1 car LFZYYpjP3FA_0 knife LF4xVBfV5SI_1 bird LGRkVRP-RTs_0 car LGgzD_ng3aA_1 bear LGrMlBi0l6Y_1 boat LGuSLUeKcTo_0 bird LG0w1oTdXgY_0 bird LHEuYW96FG0_0 bear LHEuYW96FG0_4 bear LHbVe_bjGp0_2 dog LHbVe_bjGp0_0 dog LHbVe_bjGp0_1 dog LHmvAqv6kYE_0 zebra jFneoJr36o8_0 car jGCw13fkf0Q_2 motorcycle jGPtq4pO8Ug_0 car jGTNsTUkNUw_0 cat jGTr1LSaGGw_1 bicycle jGTr1LSaGGw_2 bicycle jGTr1LSaGGw_0 bicycle jGlNsqDOz8Y_0 horse jGqRX9IwGI0_8 bear jHK3JYa_Ypg_0 umbrella jHM867g1K8k_1 horse jHM867g1K8k_0 person jHy5deaCjQE_0 dog jH_YxkU_JwE_0 motorcycle jINuUqU6sJI_0 dog jIP9FdmB0_E_0 train jIbmC5sed8I_1 airplane jIjEX8I5SHo_1 bird jIjEX8I5SHo_2 bird jInMbuzvtiQ_0 umbrella jInMbuzvtiQ_1 umbrella jI0xgoZ8QDA_0 boat jI1Swlwj_wc_0 horse jJMefDe4r9w_1 skateboard jJR-emvmi9s_0 bear jJR-emvmi9s_1 bear jJf_N_p-Gjo_1 skateboard jJnz3tS1uME_0 motorcycle jKBU4c1AdSQ_0 cat jKv6Q1RRxVM_1 boat jLBSOa5iDgE_0 horse jLR7LmbNekc_0 motorcycle jLXuZdAveV0_2 boat jLXuZdAveV0_0 boat jMNaKigE1eI_0 truck jMNaKigE1eI_1 truck jMVeJ3RbcH4_0 car jMaYIgpjxlk_0 dog jMmjaxXWaUk_1 bus jMo01X2mBq0_0 bus jM79QETqts8_1 horse jNCq29f3J8Y_0 airplane jNE_FcqbQN8_0 motorcycle jNJJgAg79KA_1 airplane jNJJgAg79KA_0 airplane jNKO9msLe34_1 airplane jNKO9msLe34_0 airplane jNSTcIQwl_g_3 train jNSTcIQwl_g_1 train jNSTcIQwl_g_2 train jNllRQ66Re4_3 dog jNn7v2MFg_U_0 truck jNsEePln1_U_0 bird jNsEePln1_U_1 bird jNt8Vn-WKRI_1 horse jN-BXoM15Qs_0 cat jOQ0W0Z_-Uo_0 dog jOl4m5QdOZQ_0 bus jPaVdR2IRu8_0 airplane jPiVFMGvHbM_0 train jPiVFMGvHbM_1 train jPrY_Xz0CDM_0 knife jP5RhcwO4E4_1 dog jP7mwBStU3w_0 dog jQBc1CqjGOk_0 skateboard jQCrA8Bjbp8_0 bird jQXYSlXk7_c_3 bear jQXYSlXk7_c_1 bear jRIy_wUojcs_0 car jRR6sU59uTo_0 airplane jRTkny0bdY0_2 motorcycle jRTkny0bdY0_1 motorcycle jRh5WphQGDI_0 horse jRqdnQ8HlwQ_0 airplane jR7eq8CAmbs_0 airplane jR-Cbp3qBJI_2 horse jR-Cbp3qBJI_0 horse jSS6b2iz2hk_0 knife jSk-3X-hjyg_1 knife jStwl7WfsVE_0 skateboard jTAz5HO8mQw_0 cat jTHDoLyfTLc_0 dog jTQ5A95TKw8_0 cat jTYsK4JKns8_0 giraffe jT1mDaHStHU_0 train jUDnkkvVKNo_0 airplane LIw68irBLtE_3 airplane LIzgqx7Ykxw_0 airplane LI286rLHd0I_0 bird LJGQA810BtE_0 bus LJJuw5mLJ4Q_0 skateboard LJhCGLht3Rw_0 train LJhCGLht3Rw_1 train LKe9a7L3vkk_0 bird LKhjmARDv7k_4 bear LKhjmARDv7k_6 bear LKoaXogFTbc_0 dog LKyQ2fBNVmw_3 skateboard LK2-EMocZQs_6 dog LK2-EMocZQs_1 dog LK2-EMocZQs_3 dog LK9zoUrrEHc_0 skateboard LLJiqe0d06I_0 train LLOwSRx9hxo_0 bird LLVr7tG42kw_0 motorcycle LLW1jx3S-Hw_0 train LLjDNseEw0c_0 skateboard LL_DiAJ71rc_0 bird LMGo4BXG4Yw_8 knife LMRH29tlDrM_0 cat LMrDuKEYJ3k_0 truck LM1djNtENzA_0 cat LNQHybwdHRk_0 airplane LNX244qUx7M_0 dog LNntRLW2bHA_3 skateboard LNntRLW2bHA_0 skateboard LNntRLW2bHA_2 skateboard LN6DT1DOaTg_5 skateboard LOBD9yc5YPM_1 skateboard LOMTlGqGyHc_0 motorcycle LOjc-npcSjs_0 airplane LOjc-npcSjs_2 airplane LOjc-npcSjs_4 airplane LOjc-npcSjs_9 airplane LOlUKQgr7Qg_0 boat LOosqz3z8Xw_0 train LOzh9vxSHPg_0 dog LPQv6LdOZHo_2 motorcycle LPQv6LdOZHo_1 motorcycle LPZjxIqs8Uw_2 airplane LPd_Y8gk5uI_1 train LPgmaebC-L0_2 boat LPtcpZXDhHw_0 knife LPvsAAlZI_8_1 bus LP3a2L1ZCyg_2 dog LP8dyCxmCrI_2 train LQAF34GzpMY_0 airplane LQO68Aj4ons_0 car LQRuelaTZd4_0 bear LQRuelaTZd4_1 bear LQT4GnnPhA8_1 dog LQbQVeZrwEk_0 motorcycle LQdP4gNX9Aw_0 bird LQjzonTrY2o_0 bear LQr5vK-X1fQ_0 cat LQ2EDJSNIN0_1 dog LQ2EDJSNIN0_3 dog LQ4z96EA6co_2 bird LRSii99-QIo_1 zebra LRgsl5_TJVg_2 skateboard LRgsl5_TJVg_0 skateboard LRgsl5_TJVg_1 skateboard LRtLr32oPAw_0 skateboard LR7IHIbXtrE_0 bird LSE0KHhFxps_0 train LSMKaXjXnhE_1 boat LSi1i5lSUjA_0 dog LSqIpguEI04_0 motorcycle LSqIpguEI04_1 motorcycle LSvVMD-SF48_1 bus LS8qQoB3Uw8_0 dog LS8qQoB3Uw8_1 dog LTEyQSswTVI_0 bus LTQPc_WVFOw_0 airplane LTQPc_WVFOw_1 airplane LTQPc_WVFOw_2 airplane LTQPc_WVFOw_3 airplane LTaExiLK2S0_2 bear LTaExiLK2S0_3 bear LTaExiLK2S0_4 bear LTaExiLK2S0_6 bear LTaExiLK2S0_7 bear LTjSA_-Q5DU_1 knife LTkuM5IoNV4_0 motorcycle LUCDeZOOhlg_0 cat LUUYKUhaYZs_0 bus LUjqWGI9KSo_2 truck LUphe242a5g_0 train LU4-QjhixQU_0 motorcycle LU4-QjhixQU_1 motorcycle LU__7PPUMTo_0 skateboard LVCMA3LXlkc_0 airplane LVfXvn7elFI_0 person LVfrWLnu7T8_0 train LWHshdXjBCY_0 truck LWQhidgjZno_0 motorcycle LWRXboX1o5Y_0 motorcycle LWTYrbFCPl0_0 dog LWY9Y2YVtHA_1 truck jUQUg-qsfgI_0 motorcycle jUWm1Mc1Tno_0 airplane jVEM2JpS4sE_0 truck jVZhyibQ31g_0 cat jV9-Lr_rsf0_0 bicycle jWCpff7m0LE_1 airplane jWCpff7m0LE_8 airplane jWCpff7m0LE_0 airplane jWCpff7m0LE_2 airplane jWCpff7m0LE_10 airplane jWGulD3X0qw_0 car jWIFscsXRmo_0 skateboard jWLv1BQ4PsA_0 bear jWawsbm6dCc_0 bear jWfItNlOURk_0 motorcycle jWfItNlOURk_1 motorcycle jWruD-mHxrQ_0 cat jW4VRs_uVZw_2 airplane jW4VRs_uVZw_5 airplane jW4VRs_uVZw_0 airplane jW4VRs_uVZw_4 airplane jXBBnV6cop0_0 car jXDxesHRKAc_0 umbrella jXLUgu4rET0_1 cat jXkzrsfYgbs_0 dog jX84bwkb-r0_3 bus jYBgSw-woGw_2 bear jYIWAGlIq9c_0 skateboard jYZmjlzKhL8_1 skateboard jYhAd9FFxqI_0 umbrella jY37CiJCKJk_0 cat jY9ihstGQwU_0 cat jZWITYFghgA_0 cat jZZBR49_vR0_0 motorcycle jZiuOZwq7gQ_0 motorcycle jaS19NIXdrk_0 motorcycle jaVgyhuxK_4_3 skateboard jaVgyhuxK_4_0 skateboard jalIqFA40pI_1 motorcycle jalIqFA40pI_2 motorcycle jaoXgM9c7u4_1 car jaovVHNORuA_0 cat jauLT1ElBPc_1 train jauLT1ElBPc_2 train jbN4y-wz5-s_13 giraffe jbN4y-wz5-s_1 giraffe jbN4y-wz5-s_4 giraffe jbN4y-wz5-s_5 giraffe jbN4y-wz5-s_11 giraffe jbhxM5eNgO0_0 train jboQE0Z0280_0 truck jbrhKjPDzhE_1 train jbwSKNFH66s_0 dog jb23jXcxaHE_1 train jb23jXcxaHE_2 train jb23jXcxaHE_8 train jb23jXcxaHE_9 train jb3uct7NumU_0 train jb4crk58m88_0 skateboard jb4672rSRIs_0 dog jcLbvoEUbj0_0 airplane jc2fijpD8vI_0 bicycle jc-IKl7He7U_0 knife jduOxfYHRGQ_0 person jeBcjSSkUhw_0 cat jeFFdyPLUts_1 boat jeWf_4ARan0_1 bicycle je8cw_bajbc_1 cat jfENtrpYNKE_2 bear jfENtrpYNKE_1 bear jfixAXjax5I_1 motorcycle jfixAXjax5I_2 motorcycle jfixAXjax5I_0 person jgAt3qPg7A8_2 truck jgD77Vh-X28_0 motorcycle jgGLyRuFOdk_0 bus jglg4qcOpWw_0 skateboard jg7I2TXyQ2Y_2 bus jhQ4iIJ42Yw_0 cat jhSH0EjNy0k_0 car jhjKdc7FtE0_5 airplane jiAVTB1keAQ_0 bicycle jiCp6fAMISg_0 cat jiJWjndM8hI_0 knife jjDZnXMMhEA_0 train jjKsYbTw1qk_0 truck jjNxX05CDNc_0 bird LWv0LbGIDi8_0 car LWxkJ4fux_I_0 knife LWy-Lhb3YEk_0 bear LWy-Lhb3YEk_1 bear LW3bZPt1qrw_5 boat LW7XQWZjBIw_0 dog LXLI-Bzcsf4_2 knife LXLmpEVYE5E_0 train LXgItdZ5DXo_0 airplane LYLuXQRCIJ4_0 car LYXMPTRr40M_0 dog LYXMPTRr40M_2 dog LYmsSNBP634_0 knife LY-hwswMG4g_0 cat LZJjKCpcAWA_1 knife LZ_qufxYP3I_0 cat LaA51BrvHGw_1 truck LaA51BrvHGw_2 truck Lam8oTdJids_0 car LanX2twvMmw_1 airplane LanX2twvMmw_0 airplane Lan3os3aUl8_0 boat LbC7nqh0Uyg_2 train LbEPmGgzUIE_0 truck LbvEMq_DQTU_1 train Lbv8FZelQCM_0 truck LcD_I0Lkw3k_0 train LcD_I0Lkw3k_2 train LceJwFxs3q8_0 dog LdEeXsYfzE0_0 car LdLtHx09mII_0 skateboard LdL-cFGaJqU_0 bird LdRX8-r4Cpc_0 car LdggIc_gAew_0 motorcycle LeAl87F6eS0_2 umbrella LeOCD9rZsSI_0 bird LeX-zqgzN3k_1 bird LeljDmw2CGU_0 skateboard LfAbAKrmMq0_6 giraffe LfAbAKrmMq0_7 giraffe LfAbAKrmMq0_1 giraffe LfatUu2cH3Y_0 car LfbQRAjsucU_0 cat Lf5ebV_NH78_0 train LgVi03EiPlQ_2 train LgVi03EiPlQ_0 train LgZrI3dxws4_0 motorcycle LgrPr2OxWcw_0 giraffe Lgyj-vOk72M_0 umbrella LhdXtQ8SbGE_1 bird LhgyObbNmLI_0 bus LhhzzaKmVO4_2 motorcycle Lhm6JF_1lQg_1 train LhnNboAgtNg_0 cat LhtrfEijGHU_0 airplane LiMriWExmQM_0 boat LiZxvVZfUdU_2 umbrella LiwliE18fA4_0 motorcycle LiwliE18fA4_1 motorcycle LiwliE18fA4_2 motorcycle Lizh5Kae5Nk_2 knife Lizh5Kae5Nk_4 knife LiznFL6_r2A_0 motorcycle LjLWamF9HyA_0 giraffe LjjGe9bnQ3Q_0 train Lj0zBxRWoIU_0 skateboard LkFbAjpWRAw_1 giraffe LkFlT3d8MuQ_0 airplane LkmioXgRyo4_0 cat Lk7Z-AUDCuQ_0 cat LlA5ioDqRns_2 bus LlA5ioDqRns_1 bus LlNCPsiSjOU_0 airplane LlS3_VvB4Nw_0 truck LlfRY71K2AU_0 truck LliRBHO1A_E_0 train LlplZ9JJtQw_0 dog LlplZ9JJtQw_2 dog LmFx-lJ6-_M_1 truck LmR0Ur4owgw_0 bicycle LmT8BFH5c7k_0 umbrella LmYKmKucl28_0 truck Lm4mghtFu-I_0 train Lm5GStt7KBw_0 truck Lm5GStt7KBw_1 truck LnGeYd1AsoA_1 bicycle LnKLql5jAXo_0 train LnLlD-mNTtE_0 bear LnPyjqgA37I_0 giraffe LndUw9o_3ME_0 skateboard LnhmeU6oRBE_0 bus Lntuuj_mi9c_3 knife LnyfbZ7-fP4_1 umbrella LnyfbZ7-fP4_0 umbrella LnyfbZ7-fP4_2 umbrella LnyfbZ7-fP4_3 umbrella Ln_tNsQVuwc_0 dog LomkA_DJyEM_1 bird Lo2GqBe8-Qc_0 bus Lo8Q0MdVi9A_1 bear Lo8ZEKusM1o_0 dog LpXfY3oQDIc_0 skateboard LpXfY3oQDIc_1 skateboard LpnkxmohHZ8_1 airplane Lpt6bE36Uuw_0 train Lpt8i9V2MK0_1 train Lp88aaB29zE_0 zebra LqOv_DqIWEk_0 boat Lqf8Q1pPNFg_1 knife LrIVNsObdso_0 bird LrKKU5rjq38_2 zebra Lr-9DI7T7JE_0 bird Lr-9DI7T7JE_6 bird LsdHOclMPh4_0 dog LshP_zqoBc0_0 knife LsuQhEjteSE_0 dog LtGXT385l_I_1 dog LtabCE1oaCw_0 bird Ltt24ke9SIA_0 bicycle LtyHCo5uPrQ_0 umbrella LuA9aRIic7s_1 bird LuM1ie5yy70_1 umbrella LuM1ie5yy70_3 umbrella LuQiLJ7-B-8_0 cat LuQxQm7FqD0_0 cat Lua1id9drCA_1 giraffe Luv05fYUS1Y_0 skateboard Lu6WLASNWIM_0 truck Lu6rn2EQSEM_0 motorcycle Lu6rn2EQSEM_2 motorcycle LvPDEznT9Yo_1 bird LvgprOdn070_2 truck LvhxnDPWfXw_0 knife Lvv3Ei45X_4_1 knife Lvz3fP96sew_0 dog Lv7JaIYWXV4_1 dog Lv8u2aPVHmc_2 bird LwChAirlUno_0 skateboard LwMepJ25LgQ_0 bear LwPB4qPCelk_2 car LwPB4qPCelk_0 car LwgyjrFlc5M_0 bicycle LwiTfwL3bCs_0 car LxAhZAbzn7k_2 bird LxjlAGLccRw_0 motorcycle Lxlu3NusDCM_0 bicycle Lx0IybSITTc_0 boat Lx25sZ_GeqA_0 motorcycle LyOo_B0KLAs_0 car LyReFCR-oq8_1 bicycle LyReFCR-oq8_0 bicycle LyiT3ute8W0_0 bird LyiT3ute8W0_1 bird LyiT3ute8W0_3 bird LyiT3ute8W0_4 bird LyiT3ute8W0_5 bird Ly-uIzZCdn0_1 bus LzMxggGTH1I_0 motorcycle LzP0t153jKw_0 skateboard LzY_TxIbKpw_0 train Lzk6uj8FMsE_0 cat Lzp-Yej0-7E_1 bird LztNNlg_fXs_0 knife Lz0Gxxs0FUE_2 bus L0IXFlnu6Qg_0 motorcycle L0US3Aiu1q0_0 truck L0kRKO8zzsI_0 bird L0kRKO8zzsI_3 bird L0kRKO8zzsI_1 bird L1EZ_RVwD8E_0 cat L1LQOPj7NBs_0 truck L1U2YrjRao0_0 bear L1VgJBGpBz8_0 bird L1iiOGDSByA_0 motorcycle L19ZzBwAHrU_0 knife L1_86Xd176w_3 knife L2Efv5kJpc0_0 skateboard L2FE5Lr0wnY_3 bicycle L2FE5Lr0wnY_4 bicycle jjZl3tMuO6w_0 dog jjcoVigCzgg_0 skateboard jjk9P9gQq3E_0 bus jj-p0K2XoQY_0 boat jj_pv9SFrnU_1 umbrella jj_pv9SFrnU_0 umbrella jkGvuOC8azU_0 motorcycle jkGvuOC8azU_1 motorcycle jkKU7T0wpj4_0 bus jkdEq1MRNws_0 cat jkkk9vsCYVA_0 car jkqKyvow-ww_1 skateboard jkqKyvow-ww_0 skateboard jk2gGx6dIWA_0 train jlA3_oF9j-Q_0 motorcycle jluiJgeyCa4_0 truck jluiJgeyCa4_1 truck jlu4Ry8dDus_0 cat jmXmA9egY4s_0 bird jmXmA9egY4s_1 bird jmeVwD4p83w_0 umbrella jm8AZ0aSF0U_0 motorcycle jnD_9KMnzpk_2 skateboard jnD_9KMnzpk_1 skateboard jnQYikiCbAM_0 bicycle jnQgVTaiaXk_0 train jnSm3vCtu1k_0 dog jnu28BEM2j0_0 bird jnwQHd-sNW0_0 cat jous_VGiSK0_0 bicycle joxEhiwL-qg_1 skateboard jpBcdceCHgY_0 skateboard jpCdMdRzmuY_0 cat jpuFdyVJJwQ_0 motorcycle jpuFdyVJJwQ_1 motorcycle jpyidnScqNQ_0 umbrella jpzKefnhMA4_0 train jqHtlrHk5Cw_0 dog jqO4FvS_v54_0 boat jqRXcc7rPaY_0 cat jqWXHWqSVX8_0 train jqu6Gjc1hCE_0 person jq9ZPuTO7Rc_0 umbrella jrAyEPgy1LM_1 truck jrLRiCFtlvY_0 skateboard jrNGiQLJ0ug_1 train jrg8oKSN6bk_1 bird jrg8oKSN6bk_0 bird jsJprPZCPvA_0 boat jskm6kDOao0_0 cat jslKL8yQ7v4_0 bird jslKL8yQ7v4_1 bird jsp_sWu7g7Q_1 bear jsx0cE948y8_2 train jtQGgQPHofk_0 boat jtWerSK0atA_0 umbrella jtqUFmuGnVs_0 person jtx5yVxuLzA_0 bicycle jtx5yVxuLzA_2 bicycle juC5lVOX-R8_0 bear juC5lVOX-R8_1 bear juMoEfLbbI4_11 bicycle juUIMSiDGm0_0 umbrella juownJlkGfA_0 train ju08Y0j4rAI_1 car jvKKm9UbcbE_0 cat jvKqk7Yfq5Q_0 truck jvdYM-W5Kmo_2 bear jvxjOOQa_JQ_3 truck jwxSjxJVyOc_0 dog jxIyftPYPsc_0 cat jxIyftPYPsc_1 cat jxlDJ0D2Tec_0 bicycle jxn5iX8buaE_0 truck L2XOsdnKegA_0 dog L2bV5Mh6tLM_0 dog L2e6nVyZ33k_0 car L2gSKheIL48_0 dog L2zsyBTtcqE_0 bird L21bM4j4bEc_0 motorcycle L21bM4j4bEc_4 motorcycle L21sWlIIkHA_1 skateboard L28I6_ASmq0_0 motorcycle L3F2ir5MPj4_3 skateboard L3Q42kZ8Ap8_0 bus L3oyk4iYySM_0 boat L3urWJiuom8_0 bear L32hlxmCYZU_3 bicycle L32hlxmCYZU_6 bicycle L32hlxmCYZU_7 bicycle L32hlxmCYZU_14 bicycle L4NZ3vAx87A_0 boat L4kK9gTKA3Q_2 bear L4w-P2UsvBE_0 bird L5VC4bXm6Kc_0 dog L508o9A8028_0 bicycle L52ZiKJ5NLM_0 truck L5499EWzDaQ_0 motorcycle L6QaXTuDftA_0 bird L6vLixMpRZg_1 dog L6vLixMpRZg_0 person L63p00d7BPY_0 car L7TR8yCVhN0_0 cat L7ZTQMPeHYo_1 knife L7iHAg6bHw4_0 bicycle L7rQQ4IVPrU_1 skateboard L70Zv9DFAhc_0 skateboard L71JgB-L1mA_0 motorcycle L779-Nw9GV4_0 cat L780lAoEC2M_0 giraffe L780lAoEC2M_1 giraffe L8H_7qqaEOM_1 motorcycle L8SF7xF6Ucs_8 bird L8h9dw2kYRA_2 knife L9EAUBlNvLU_1 truck L9LWOPIuvcE_0 train L9L-OlYNdL0_6 knife L9Tx4-RNDqo_2 motorcycle L9Tx4-RNDqo_3 motorcycle L9Tx4-RNDqo_1 motorcycle L9Vt1klujtA_0 dog L90g72YGdVA_0 cat L97eqv7bBCE_0 dog L985IUAQ8u8_1 skateboard L-S4CNhlvlM_0 cat L-w35NTF7vA_0 car L-0JgkugTvw_0 giraffe L_AcMGC96O8_0 motorcycle L_ZdaWupJcU_1 boat L_xPWB4viT8_1 dog L_xPWB4viT8_0 dog MAJonEdmXNA_0 truck MAVqUxAjlbg_0 skateboard MBAPF4RVq7E_0 car MBLHIupmPNk_2 truck MBLHIupmPNk_5 truck MBl4bkFRZUY_2 truck MBl4bkFRZUY_0 truck MBuwlS32gjE_0 dog MC8Lal5Lp5Y_0 cat MC-KkFD07Ts_0 dog MDxAuy6D1ks_0 skateboard MD5P0EFFnUQ_1 skateboard MD8RTKTEaM0_1 motorcycle MEi_ikuUJoQ_0 skateboard ME0CETCuaK0_0 boat jyY5W5HiWUQ_1 cat jyeqCulSuVM_0 truck jy_Dr_R-svo_1 umbrella jy_Dr_R-svo_3 umbrella jzRWRRcWffo_0 skateboard j0BXwDs11NY_0 train j0OALCZbAJQ_0 bus j0ii12pbeag_0 knife j0yk2O6HAHA_0 bird j0_9iwi_dm8_0 dog j1CQLHBLwew_0 car j1NePJe1agU_0 bird j1XwtnPy1Ik_1 bear j1rU13Z_fxc_0 bicycle j1utZs4pDTc_0 bicycle j10ev-4-0Fg_0 motorcycle j11_jPnp4Pc_0 cat j2-VEpDwbyo_0 dog j3X6elDpZ-Q_0 bicycle j4K9kM9p16o_1 bear j4Qv6RH4lPk_1 bird j4U8EcQ8K34_0 umbrella j4daTphUuBw_0 cat j4mpJ3QE8VU_1 cat j4ofs57G2Uk_0 skateboard j4rMKhohDps_0 bicycle j4zZbJTAcC4_0 train j4zZbJTAcC4_1 train j5EP2UNErRE_0 dog j5Evt1HJ2ck_0 skateboard j5ayq3AbImg_2 bird j5uxE5IUOhk_0 dog j6GdrMPrcNU_0 train j6P1j6Ed1Hg_0 boat j6Ybo1yk-lE_0 motorcycle j7v1htyJtdo_1 boat j7v1htyJtdo_2 boat j7xvqf1mrUo_2 bird j707fRdtbEE_0 train j8jip_gthjs_0 train j8s5sMFYoiM_3 train j8s5sMFYoiM_1 train j82ZCaABxl8_0 truck j8-maioFCxo_2 boat j924hdZilyY_0 cat j-MwElKg8Tw_0 cat j-VN0PFvkDg_0 train j-a26pZGsKA_5 bicycle j-r3lQdwYeI_0 boat j-r3lQdwYeI_3 boat j-x8lbwsObQ_0 motorcycle j-0kVn7sEvQ_0 motorcycle j-0-IDS-OD4_1 truck j_DE_vsqSZg_0 motorcycle j_D7oxUpZqs_0 bicycle j_D7oxUpZqs_1 bicycle j_FCzH1rLDw_0 train kABwo7h7ILg_18 bicycle kABwo7h7ILg_13 bicycle kANh1n3sh5M_0 giraffe kANh1n3sh5M_3 giraffe kAekmn2pgpc_0 skateboard kAekmn2pgpc_1 skateboard kAhVhIYl-GE_0 motorcycle kAhVhIYl-GE_1 motorcycle MFw-_3fTBzA_0 bicycle MF06s9T8iJA_0 skateboard MF06s9T8iJA_1 skateboard MGFx6Irt70E_0 knife MGMJ6ocyKXQ_2 boat MGQw41RhBfc_0 motorcycle MG9MouhNLjY_1 knife MG96iokcNoY_0 car MG96iokcNoY_1 car MHIEOK-O3Q4_1 bird MHT9BbNzNJo_0 knife MHqZCkvaub8_1 car MHsxwUMk-_s_8 umbrella MIHg2KAYh5c_0 train MIHg2KAYh5c_3 train MIHg2KAYh5c_1 train MIKCpSFDh4M_0 bear MIKCpSFDh4M_1 bear MIKCpSFDh4M_2 bear MIKCpSFDh4M_3 bear MInom2mFpwg_0 skateboard MI2d7Rd8_Zs_9 bicycle MI2d7Rd8_Zs_10 bicycle MI2d7Rd8_Zs_2 bicycle MI2d7Rd8_Zs_4 bicycle MI2d7Rd8_Zs_5 bicycle MJOztUhgARo_1 bear MJvPtT5tzRI_0 motorcycle MJ3I-JfOG48_0 train MJ6b6iOY7CI_0 car MK2aqzY-UTQ_0 cat MLXY5iff2rU_0 truck MLZ5bpXr5fk_0 bicycle MLrWgAcIumk_3 knife MLrWgAcIumk_1 knife MLtRUMzqhDk_1 dog MLwCW5HBfWQ_0 bicycle MLwCW5HBfWQ_1 bicycle MLyrsP65yc8_0 cat MMGw177uo60_8 bicycle MMGw177uo60_11 bicycle MMGw177uo60_0 bicycle MMGw177uo60_1 bicycle MMGw177uo60_2 bicycle MMGw177uo60_4 bicycle MMGw177uo60_6 bicycle MMX4my6X-xg_0 car MMfLN7_khoc_0 skateboard MMwk9bxedYo_1 bird MMxfwNbWaxc_0 bus MMxfwNbWaxc_1 bus MMzNcR3qtX0_0 knife MM9D2A52FM4_0 cat MNBfv2S-yco_0 dog MNDWyaUDfAM_0 truck MNKwR4IK04k_0 bus MNnYExmY67E_0 bus MNnYExmY67E_3 bus MNuhuq3FP5Q_0 motorcycle MNuhuq3FP5Q_1 motorcycle MNuhuq3FP5Q_2 motorcycle MORtJq8MelU_2 dog MORtJq8MelU_3 dog MORtJq8MelU_0 dog MORtJq8MelU_1 dog MOR6ErlJIp8_0 giraffe MOcTGHSkER0_0 car MOgN13g3SzU_1 motorcycle MOxIwc0MqZ0_1 car MO5aNU1mc1s_2 boat MPQqmw9gvF0_0 dog MP8ETGMyhnU_0 dog MQAJWDp31ag_0 cat MQimJolkMRI_0 cat MQ5mTW70Ebs_1 train MRzphcX41T8_0 umbrella MSWR-YqRwqk_0 cat MSjYJFNM2HU_0 boat MSjYJFNM2HU_3 boat MSonF1662RI_3 skateboard MSp3-aHmNP4_1 truck MSp3-aHmNP4_2 truck MSvmSEk-UJ0_0 bicycle MSxdHgV7e6o_0 car MS7Emoy0Foc_1 boat MTDl42dubw8_0 bear MTr54KYSQBw_0 person MTvLNcYmHhQ_0 car MT-VkX2ZUYs_1 bear MT-VkX2ZUYs_2 bear MT_GWiXfC2k_0 knife MUAuC-rgc9Q_0 dog MUPAcFVQjlE_0 zebra kAkZoxVhM3I_4 train kAkZoxVhM3I_1 train kAkZoxVhM3I_2 train kAkZoxVhM3I_3 train kAmtMpdj5F8_0 dog kAsA28fm6YM_0 dog kBZZqBNk68M_0 cat kBg_1xTx4Dw_0 car kBsc-5sxeTw_1 knife kBsc-5sxeTw_3 knife kCWupS0PNHk_0 car kC0y-y4Y9zQ_0 knife kC4_7iM24Uw_0 truck kC7fdR62Lto_0 person kDU_m-Zhi-I_2 bicycle kDsGVRUxg9s_3 bicycle kDsGVRUxg9s_4 bicycle kDvYbh9_fvY_0 dog kDwVR3eWyA4_0 train kD0shq5M7Xw_1 skateboard kD_zeOiIsTM_0 train kEw-F2KrxLQ_0 train kE3cb1gtxpM_0 person kFihVzuPlGI_0 truck kF9uWuyPP8g_0 skateboard kGB7yQn8jpQ_0 bicycle kGkvBOa6Ao0_0 motorcycle kHCbADkGOsE_0 skateboard kHEfe-TDtS0_0 motorcycle kHkZCi873e4_1 motorcycle kH2Vmad_zzc_0 train kH9YVTvwmpM_0 bicycle kIGuIdHDwIw_0 truck kIasEX-cJb8_0 cat kIqavvGxvh0_0 bird kIyZZm3zk5M_0 train kIyZZm3zk5M_1 train kIyZZm3zk5M_2 train kI14RuB6ab4_1 boat kI9E5m5l4Uo_2 bird kJFQOFR0l0w_0 motorcycle kJJuX1cGFYg_0 truck kJJuX1cGFYg_3 truck kJR59i4f5HA_0 train kJR59i4f5HA_2 train kJR59i4f5HA_4 train kJR59i4f5HA_1 train kJUDpKKsNQ8_3 boat kJYZ-XE8ZEQ_0 cat kJuBcbws_zM_2 car kJuuymSuBLA_3 boat kJ2eEJ07dR8_0 cat kJ4rlYx4HDQ_0 motorcycle kKJAqMzsMHo_0 train kKOKJLrWCro_0 motorcycle kKSyjiL5foc_0 skateboard kKTvKA8cd-c_0 bird kKTvKA8cd-c_2 bird kKeaUBfwuG4_0 dog kKfiOXnjX0E_1 bird kKtawdL8xDU_0 umbrella kLL_YMFYoQw_1 car kLL_YMFYoQw_3 car kLgtAl-xGI0_0 bus kL3r_JUstGU_0 bus kL7sfsNuNVw_0 giraffe kL7sfsNuNVw_1 giraffe kL777xHctO4_0 truck kMMe5H6THlA_1 boat kMuQLvHlZM8_1 skateboard kMuQLvHlZM8_2 skateboard kM3Ml3gsG1g_0 boat kM3yM5qONQc_0 person kNNLDq_wPc4_0 dog kNQYLVUS5ag_1 train kNQYLVUS5ag_0 train kNTqRDpy6Jg_0 bicycle kNVh6uD0bMs_0 car kNlVF3ROFLs_0 dog kOOlwQ0DrQU_1 cat kOjjXFA4JLo_0 bicycle kOksVTxs6S0_0 truck kPEf41FB6w4_2 bear kPH88UubFMg_0 bird kPLn0enV644_0 motorcycle kPPya6oadAk_0 truck kPSuwjI94G8_1 bus kP4KkSrY81s_0 motorcycle kP4KkSrY81s_1 motorcycle kP7xV2Efw9c_0 car kQBqt_vvAUc_0 truck kQHn-cRLiDk_1 cat MVG65Om9g1k_0 cat MVG65Om9g1k_1 cat MVPQRjLFz6E_0 boat MVRf770zXL0_0 bus MVZinfPagDI_0 bicycle MVhsNNsDFWo_0 knife MVxJBHYueGI_0 boat MVxJBHYueGI_1 boat MV5174rsbEY_0 bus MV-CnX4Gf7A_0 truck MWGRoXhqRgQ_0 boat MW78cTfzq0c_0 cat MXGO41E37k0_1 train MXVOVBJlezc_1 train MXW5J8Fq8aw_0 bicycle MYW0loI0g8M_0 dog MZJtj9J3P2w_0 knife MZU8lpmJhxg_0 bus MZaYMDyaATI_5 skateboard MZaYMDyaATI_0 skateboard MZfxKiKSuFU_0 train MZfxKiKSuFU_1 train MZfxKiKSuFU_2 train MZr4cAj7j28_0 motorcycle MZtheeh470g_0 car MZxz9C8nBdA_0 bus MZ4A6ItKCn0_2 knife MaApAnpbJwE_0 motorcycle MaNGPVuxXqo_0 bicycle MaUrOzoC1qE_0 motorcycle MaV9LY8Yf7c_1 skateboard MaeWb_sv_KU_9 bus MaeWb_sv_KU_10 bus MaeWb_sv_KU_1 bus MaeWb_sv_KU_7 bus MaeWb_sv_KU_8 bus MalEpweFuSM_0 motorcycle MarA93dcZrA_0 train MbCJqlLjY_o_2 knife MbK94OERQUw_1 bicycle MbK-28LCQ1g_0 boat McV3_FGrKNw_1 boat MccB4r2uPG8_2 bus MctKaOAWQ2g_0 skateboard Mc_qufFsRZQ_0 train MdP8tqMgy-c_0 boat MdcfoMlgxyI_0 boat MdcfoMlgxyI_7 boat MdcfoMlgxyI_6 boat MeGIovLiBUs_0 cat MeNT1BqRoSk_0 skateboard MeR6T05EfeY_4 train MeR6T05EfeY_5 train MedPaDPXclw_0 train Me6y3gzfhGA_1 cat Me7wQZBbtkw_1 truck Me9X6zA_WSI_2 car Me9X6zA_WSI_3 car Me9X6zA_WSI_0 car Me9X6zA_WSI_1 car MfEA9RwWf8s_1 car MfKpwmhyptQ_6 knife MfQe_WreL6U_0 cat MfVLnZLXmvw_0 boat MfYYHsKxgn0_0 cat MfYYHsKxgn0_1 cat MfaYiIkR0D8_10 dog Mfe3mmOd7co_0 skateboard MflUSzEyPQA_0 dog Mf1njOx66R4_0 knife Mf1njOx66R4_1 knife MgR0ON5CM-E_1 dog MgR0ON5CM-E_0 dog Mg7Ve43Durw_0 zebra Mg9oRrgGKv0_0 skateboard MhFgGvNvIPU_1 motorcycle MhOdsv74XK4_0 bicycle MhPIl5JGvTQ_2 dog MhdkxaMWwb4_0 dog MhfYe7VajGQ_1 train MijD0ZqMorA_3 bear MijD0ZqMorA_4 bear MixmJ2mkl18_5 motorcycle kQhvp8FqRRI_0 motorcycle kQ0WAbN3uvE_2 bicycle kQ0qYUhkgXE_0 zebra kQ0qYUhkgXE_2 zebra kQ27FYyayCg_0 umbrella kQ9C8T343Bg_0 umbrella kQ97WPM3Qw4_0 skateboard kROqNf1kadg_0 bicycle kRWaghM9Bng_4 knife kRYejzNzz-k_0 bird kRYejzNzz-k_2 bird kRYejzNzz-k_5 bird kRtAJBnrb0o_0 cat kSnUCbQ4k4c_1 giraffe kSxPGqWydhQ_0 car kSxPGqWydhQ_1 car kTBAPJCn4AI_1 car kTNOY900Hbk_0 cat kTVuc-2UjPI_0 umbrella kTbS3XR-Xhc_7 bear kTdT3aGZVmo_0 train kTm1R3GaJzg_1 umbrella kTyJyGREDR8_0 boat kUX28ytNCwc_0 car kUcErGH2rjs_0 dog kU8IsLpAlXg_0 motorcycle kU8IsLpAlXg_1 motorcycle kVCic6S6ITo_0 knife kVmUxntjOEk_1 skateboard kVxw5-K9zZk_0 motorcycle kVyJVrTWLwo_0 cat kVzNGKIHA44_5 giraffe kVzNGKIHA44_2 giraffe kVzNGKIHA44_3 giraffe kVzNGKIHA44_4 giraffe kWHw0OdDAes_0 boat kWHw0OdDAes_1 boat kWo2PlJB2Nc_0 motorcycle kWxJX4oVzMo_3 train kXKTNNclCns_0 dog kXOYPLKJDdI_0 knife kXOYPLKJDdI_2 knife kXVHu_jzgek_0 knife kXj4YpwnHVs_0 car kXliGVQWoAE_0 motorcycle kXwzICrP2CA_1 dog kX-rqtb_n5w_0 boat kYAGyQOUOAw_5 train kYAGyQOUOAw_6 train kYAGyQOUOAw_9 train kYRvBDpWk_0_0 skateboard kYd1dxkZ7Q8_0 dog kYh89aM71_c_0 bicycle kYie2clM8Jg_0 motorcycle kYjiRbFWFuE_0 umbrella kYwzLhWdjYc_0 bird kY1mYWiL24M_2 train kY1mYWiL24M_11 train kY1mYWiL24M_0 train kY1mYWiL24M_1 train kY1mYWiL24M_3 train kY1mYWiL24M_4 train kY1mYWiL24M_5 train kY9lrTOcuxY_1 knife kZNZbhh6P3g_0 cat kZrG7mMww7I_0 truck kZrgKUm3pUs_0 boat kZ1L8FBg_P4_0 cat kZ3A6bY6RHo_0 motorcycle kaKhLfdT3z4_0 truck kaNpALWiNSQ_0 car kadq7fGv_zg_1 motorcycle kao854-T3zw_0 bear kaxFMN_9CfM_0 bear kaxFMN_9CfM_1 bear kazbC0JbsUY_1 boat kazbC0JbsUY_0 boat ka1HMN9Mxho_1 car ka8YGdEujsQ_0 motorcycle kbEenS2dRTc_0 cat kbF3h-YQ7m8_0 skateboard kbuWFd9Vthc_1 umbrella kb2LQHXd2zk_0 car kb-A8wbnvQg_0 bicycle kcBIvi6fhUo_1 bus kcTwHA-N1cg_0 bird kcip1032v3E_1 skateboard kco1LYK4z_w_0 person kdIBzH30zKA_0 dog kdIBzH30zKA_1 dog kdP5V_afg7E_0 skateboard kdRLqCUbWts_0 bird kdUrK5I-cNo_0 car kdU-XJEwZsQ_1 bird kd3DLyL1JMw_0 bicycle keGrBBWcGE4_1 bus keGrBBWcGE4_0 bus kePvCa53REA_0 giraffe kePvCa53REA_1 giraffe kea2UOTXlhs_0 cat kea4eM8Blz8_0 dog ketFGT3U5D0_0 bicycle kexKkPOprms_0 cat ke3yWKL94kE_0 skateboard Mi4HJYsPBPk_0 skateboard MjGAi_5coGY_0 bicycle MjGAi_5coGY_7 bicycle MjGAi_5coGY_5 bicycle MjGAi_5coGY_6 bicycle MjxkMQcgRss_1 car MkF-jfvzRJU_0 bus MkGLvilh-P4_2 dog MkIK8kdqU2I_0 motorcycle MkQzgwai9zk_0 zebra MkYtT0L4_3A_0 truck MktDGOflp1w_0 truck MktDGOflp1w_1 truck Mk82qF_xfzI_1 motorcycle Mk9tGnGNkkE_0 bird MlLHwysBUiY_0 knife MlVr20XSJMY_1 dog MmQIeOEPu9g_2 skateboard MmQIeOEPu9g_0 skateboard MmQIeOEPu9g_1 skateboard MnE1EjTWbTA_2 skateboard MnGGl7pusvI_0 motorcycle MnGGl7pusvI_1 motorcycle Mnd7aZxjoEg_0 bird Mnvqegl_fME_1 car Mnvqegl_fME_3 car Mnvqegl_fME_8 car MnyV8-43fRY_0 bicycle Mn2Nul_w66I_1 motorcycle Mn2Nul_w66I_3 motorcycle Mn2_fRbVluE_0 knife MoHDZuwBO4E_0 cat Mog-qUf6B1c_1 cat Mo6Q7lGmAw0_0 skateboard Mp42DoVxbWY_0 motorcycle Mp91b_edytM_1 dog Mp91b_edytM_0 dog MqAlMygAZto_0 cat MqPKFAIxZpE_0 dog MqlxERdGjdg_0 motorcycle MqvfJOEW4oE_0 cat MrsXy6DL4DA_0 truck MrssB6CtGrM_1 giraffe MrvbaDZm6gY_7 knife MrvbaDZm6gY_8 knife Mrwi7WoPJSs_0 cat MrxYHk0ghfM_0 boat Mr1A4et0ESg_0 bird MsFvL8N-3ds_0 umbrella MsQJkEOyREY_0 bicycle MsY_zz2OeKU_0 motorcycle Ms8x8pjN7Fw_1 bicycle Ms8x8pjN7Fw_0 bicycle MtIjkcXspsU_2 motorcycle MtfpgvzOlW8_0 person MtiQjguNpH0_2 boat MtiQjguNpH0_0 boat Mt_4bFjyYuU_0 cat MuLk_dOouJY_0 knife MuOG8PoK21o_0 bus MuVtFYK_nH0_0 bird MuYixry0epc_2 motorcycle MuYixry0epc_0 motorcycle MuYixry0epc_1 motorcycle Mu51W-lkSEc_0 car MvIYOnRinSo_0 bicycle MvxRpbl0BBk_0 bus Mv6v4w7VDFk_1 car Mv_9l8fWiP4_0 truck MwAM4o2GCuM_0 car MwHQb6ZryRA_0 skateboard MwIKOqSMRwk_0 cat MwLnGflxcqc_2 zebra MwNsM6f6fNY_3 bicycle MwNsM6f6fNY_5 bicycle MwN7iYEim6k_0 bird MwW14_GuwLg_1 bus MwdX3PbgC34_0 giraffe Mwjq136uMe0_0 car kfInF5cUU98_0 motorcycle kfInF5cUU98_1 motorcycle kfLnoXlGBvU_0 dog kfhspLhCU5Y_0 cat kgDOVDDZ9eQ_0 cat kgONObiF8Hg_0 cat kgT-NsRkv1c_0 car kgco3sZv7BY_0 cat kgi1KajW_ZU_0 truck kglv-2P5ow4_4 bus kgrFzgXO9Q8_0 skateboard kgsyAMgjuL4_0 bus kgxQ03-tSek_0 bear kg6RFppR4MM_0 knife khUURgtFYBY_1 bicycle khUURgtFYBY_0 bicycle khVST8w3Zzw_0 skateboard khlqzkfBCfc_0 cat khpJlBWPPr4_1 cat kimZApwsJEY_5 bicycle kimZApwsJEY_6 bicycle kimZApwsJEY_0 bicycle kimZApwsJEY_2 bicycle kimZApwsJEY_3 bicycle kimZApwsJEY_4 bicycle kizrM5CZzPk_0 truck kjBdTAkRijw_2 bus kjM0hJl-L44_0 skateboard kjtOW8OAIeY_0 motorcycle kkC5lqQb0t0_0 umbrella kkR7pnou7hc_0 knife kkeBMT1ixs4_0 boat kkkc9xwKGp8_1 skateboard kkvU3dvMkSI_0 truck kk4KuU5X6Lk_0 car klGHWdeD-qw_2 bear kldR5yJFeOo_1 bicycle kldR5yJFeOo_3 bicycle klgANznh5x0_1 bicycle kl2buVrYbX8_0 skateboard kl3_w8_h6ts_0 skateboard kl4RYG6OCIY_2 knife kmZFQEGncaI_2 bicycle kmZFQEGncaI_0 bicycle kmllekf2nKc_0 cat kmoaGUqL6bI_0 skateboard kmvCtYXRUhM_0 truck km7aR2fTJlA_2 knife km-3wnNLVYY_0 boat knDRZU9u-Lw_1 boat knVcB-GeINU_0 car knqi3OAHNO8_0 boat koOxoaMnXZc_0 skateboard koOxoaMnXZc_1 skateboard kphV7yVMBOQ_0 bicycle kphV7yVMBOQ_2 bicycle kqDbbFz-XQQ_0 bird kqDxyoQKFfE_0 cat kqVaHPJzEro_0 dog kq4tOnX3m2Y_3 bus kq4tOnX3m2Y_0 truck krSKV36ocSs_0 bear krvyahlS1z4_0 bus kryv5em-VHk_2 bear ksB15ebtJeM_0 umbrella ksCempldLAA_0 skateboard ksCempldLAA_1 skateboard ksCjOk8r4rU_0 person ksSVtTRXRyI_1 bicycle ksk5uCVKU7Y_0 skateboard ksxTUcFqlZw_0 knife ksx219-g47A_0 cat ktHzii2XMh4_0 boat ktPLKpH7-mk_5 dog ktcodoKjIvE_3 bicycle ktcodoKjIvE_4 bicycle ktcodoKjIvE_5 bicycle MwtWyQiagOk_0 bicycle MwvYg837DFU_0 motorcycle MxEjkI5fRh0_0 dog MxHBWltYQX0_0 boat MxKuZbSiZ4s_0 skateboard MxK1dXmYQU8_0 knife Mxr-1toRi3s_0 skateboard MyS7UVUc55M_0 car Mybir4gfQaU_3 bird MzB160hQlFE_9 giraffe MzB160hQlFE_2 giraffe MzB160hQlFE_4 giraffe MzB160hQlFE_5 giraffe MzB160hQlFE_6 giraffe MzB160hQlFE_7 giraffe Mz9ZTHPYJxk_0 dog M0Ga521uzoA_0 dog M0qQQArQdTU_0 bird M088XJeXBS0_0 cat M1UsEMPrCc4_0 knife M1cuEQppjNk_0 bus M1p1DBTuqmk_3 bird M1p1DBTuqmk_1 bird M1xxFVktlzw_1 bird M1zDeqozcU4_1 bus M2R_9l38IUQ_0 bus M2uSqd8ohUk_0 bus M3CUpLmpRBo_0 cat M3OhLKUgQho_0 cat M3P38sLk0pc_0 dog M3tK5YBjyKI_0 truck M3tK5YBjyKI_1 truck M3tK5YBjyKI_2 truck M4CENhQ5vWo_0 cat M4Hqq89bZiE_1 dog M40QOQPocV4_1 car M45MyaeogPU_0 car M5BEqJFfJYw_0 skateboard M5NRM7UQv5c_0 cat M5bLnqKDa1U_0 bear M5kj9SEKNAo_0 bus M6POMFHs-ec_0 bus M6bin6X9FSI_0 knife M6eRY9q89aQ_2 truck M6tXmkLy-2Y_1 bird M7465rUWBzY_1 bicycle M8Lhm-CgqH4_0 cat M8cFdveIy4g_0 cat M8drJLCDOL8_0 cat M8ea7gWeDQ0_0 bird M8f0VhN1ZnY_0 umbrella M8i-DGTEw9M_3 skateboard M8i-DGTEw9M_1 skateboard M8sMZ15CLIU_0 skateboard M9McwXGtZnI_0 cat M9QtHKxypyI_1 knife M9UrZSSK1MA_2 motorcycle M9eiVambl5s_1 dog kuRfhOqyXeY_0 umbrella kuzyHmE3SI0_1 knife ku68PhgE8bk_0 bird ku7gA5ZLk1Q_0 cat kvFSzJHIsVg_1 knife kwDNLBoEQq8_0 skateboard kwDX0_2B3A0_0 umbrella kwGGXvXtsjI_0 truck kwY370WQYUg_0 car kwbt-wHLPkY_1 car kwlcEg9G1bE_0 knife kwsp30ykR4U_0 boat kxeSYfuQl-I_0 bird kx1bCqhLcbY_0 bus kx5tIvM-9dE_0 knife kyAEyX8zMWQ_0 truck kyPXCwNh7Rg_0 cat kyW_f8sv5iw_1 giraffe kye1Q_k-_Gc_0 bicycle ky1FAcaT3UE_0 dog ky6uivneqIg_0 bird kzblQQcpTdk_0 skateboard kzblQQcpTdk_1 skateboard kzfxn1c7_xc_10 bicycle kzg7y0rERTY_0 bicycle kzi3zDJR9Bc_0 dog kzpJkBQxgE0_1 bicycle kzp3UEwOkJA_0 knife kzw5a8z9cXs_0 bird kz6HYpF3pLo_0 dog k0cUZwgJzB4_0 umbrella k0uDHQea9sg_1 dog k00mpKYHsuU_0 skateboard k1F_TFA3Bbk_0 bicycle k1LrJEfFKag_0 motorcycle k1NVg8uaPE4_1 skateboard k1Q5wms4euk_0 bird k1TOwPACsvY_2 giraffe k1TOwPACsvY_3 giraffe k1vz1ZSBSoo_0 bicycle k2O0XiVn5kw_0 skateboard k2QiX8c3t50_0 bird k2SEBRgras8_3 car k2Z0W54JwB4_0 skateboard k2bQG12smw0_0 cat k2imYphEfo0_0 car k2ocqQxARpQ_0 skateboard k2yx7C__3wY_1 cat k3HKP8CV3CY_0 bus k3LnBcn5zlU_0 boat k3QuANDFgVQ_2 boat k3QuANDFgVQ_3 boat k3QuANDFgVQ_5 boat k3fZgTTMj1g_0 giraffe k3fZgTTMj1g_1 giraffe k3im7HEvSCI_1 bear k4D-Ql4Fg7c_1 bird k4PWQfz5NGo_0 motorcycle k4U1AP6KV4E_1 skateboard k4c6D3ZsdL4_0 truck k5Pp6BYXono_3 bear k5R3cUyyyWo_0 car k5nvWBLlS2c_1 boat k5nvWBLlS2c_2 boat k5vlZTySXDk_0 knife k5yJqWnvZzg_1 bus k5yyV32-nOM_0 motorcycle k5yyV32-nOM_2 motorcycle k55nlQZwGz0_1 boat k57rVPEq54k_1 bear k57rVPEq54k_2 bear k6Bwd6af64Y_2 bear k6gc4du1FqU_0 truck k6l0hwjaeMA_0 motorcycle k6l0hwjaeMA_1 motorcycle k6l0hwjaeMA_2 motorcycle k60P5osD0rU_0 bus k64DU45ej5M_6 car k64DU45ej5M_0 car k64DU45ej5M_1 car k64DU45ej5M_2 car k64DU45ej5M_3 car k64DU45ej5M_5 car k640Wtpq-mU_3 umbrella k640Wtpq-mU_0 umbrella k640Wtpq-mU_1 umbrella k7TCyTff1aM_0 truck k7uTiiG-Ez0_0 bus M-8Zbj9mU9U_0 boat M_miIFgy1Ro_0 bear NAGKrEjU7Sk_3 bird NAGKrEjU7Sk_2 bird NAkFaQBgOvo_0 truck NA9hxGtSLCM_0 bird NA_DgxP18c4_2 motorcycle NBE97NAHACk_0 giraffe NBdhmPgSS2o_1 motorcycle NCNgKQCU8BM_1 bird NCP6Cna8jtY_0 skateboard NCQ5340WhY8_0 car NCSygygs2Dw_0 skateboard NCWp95If4uM_0 motorcycle NCazYWutlOc_0 boat NCoJmkRt2nE_0 bicycle NDUhlmH9Rz4_0 cat NDYT9jTE54Q_0 bus NDYT9jTE54Q_1 bus ND_GyhH6zgI_0 motorcycle NEQIR06VuP4_1 giraffe NEQOLn6QBuE_8 bird NESQ70PhJU0_1 boat NElB9jKqhLc_0 dog NFjb4XxSoHI_0 skateboard NFye-cUktCg_0 bicycle NFz_zzAU_Hc_2 skateboard NFz_zzAU_Hc_0 skateboard NFz_zzAU_Hc_1 skateboard NF_o01qBrtI_0 skateboard NF_o01qBrtI_1 skateboard NGCjiEfG4C8_0 skateboard NGM0enFRa7E_0 car NGO_7sJEeyk_0 bus NGRBYn2OatE_0 motorcycle NGU-5KGKEJ0_0 bear NGmJtkXyJpc_0 cat NGmKyRRNL_E_0 bird NGw5-auup1k_0 car NG7FgzWn8Gw_1 giraffe NG9SIDqXvic_0 knife NHlayOfSZJc_0 dog NHlsNDcNZqU_0 cat NHmxckr22ws_0 skateboard NIPnaoHgzdU_0 bird NIPnaoHgzdU_1 bird NIPnaoHgzdU_2 bird NIvYcbJIYdA_0 cat NI_YQKOQEvM_1 bird NJeNAw2RnNc_0 bus NJeNAw2RnNc_1 bus NJeNAw2RnNc_3 bus NJeNAw2RnNc_4 bus NJ0O48Pkn2k_0 bird NJ9DpLHaGl8_0 skateboard NKLemqoJ_hA_0 cat NK4942wyYgk_0 bus NLKK4VUbuuI_5 bear NLp8voZylqM_1 knife NLsGPrwnRug_1 bus NLsGPrwnRug_2 bus NL3CG8KGwis_3 giraffe NL5j52SH-yQ_0 bus NL9o4JgV25A_0 dog NMJB2K_UOLc_0 dog NMJLv-oYyNc_1 truck NMJLv-oYyNc_0 truck NMecCV-gtK8_1 dog NM7OVTITkaA_0 cat NNCjf9Qu2RI_0 bear NNHOtBx0FOY_0 motorcycle NNkLZRrMEv4_6 boat NNl4nD5_b_o_0 skateboard k8NHRbiB2Dc_0 dog k8OEoDpqSLk_0 truck k857sWPtmcs_0 cat k9BuU6A21DQ_0 skateboard k9HxprAZods_0 umbrella k9KmR4MNI7o_0 cat k9KtLV0IMgI_0 dog k9PCp-8PFZ0_0 dog k9PX9l8Fnlw_8 bus k9PX9l8Fnlw_0 bus k9PX9l8Fnlw_2 bus k9PX9l8Fnlw_4 bus k9PX9l8Fnlw_5 bus k9VDPqCbqj0_0 bear k9VVUD9wVxk_1 boat k9zLR7VKKpE_0 skateboard k9-PLHxxGHc_0 car k-DOe-pD_MY_0 dog k-Nl-39bZnw_1 skateboard k-SqR4BEw3s_4 motorcycle k-SqR4BEw3s_1 motorcycle k-izgq4Wj4E_0 dog k-izgq4Wj4E_1 dog k_X3oj841SQ_1 motorcycle k_e_YVhclfg_4 truck k_e_YVhclfg_3 truck k_iI2BJQpqo_0 cat k_jXopyxdo0_1 boat k_sLp7QKSu8_0 boat k_tkXRmI_O0_1 skateboard k_tkXRmI_O0_0 skateboard k_vnzrtDfAw_1 cat k_5e1d-vpBU_3 umbrella k_5e1d-vpBU_4 umbrella lAA5eXeYwpo_0 cat lAFonTk_uSA_1 bear lAI9mfwKMM8_1 dog lAQxdRz4PlQ_0 bear lA3btp7QIxg_0 bus lBH0KOGRswc_0 car lBXWSN3ciPY_0 motorcycle lBsOiAR5dAk_2 bird lBsOiAR5dAk_3 bird lBsOiAR5dAk_4 bird lBsOiAR5dAk_7 bird lBsOiAR5dAk_8 bird lByHH7yvxpA_0 boat lB7j8Z4gGtQ_0 car lB_bnqdnexA_5 bird lB_bnqdnexA_1 bird lB_bnqdnexA_4 bird lCYwepuY9qY_0 truck lCZry6FRpsk_0 bicycle lCf6uL_GkYw_2 bear lC0yidNH6B8_2 bear lC4BoFWvHs4_3 bear lDLYtKqlr5M_0 bus lDf9b9Kr-24_1 truck lDgzFjqokik_0 boat lDqk6pRbY3M_0 bus lDybC3N70so_0 car lD63JOjqTDg_5 bear lD63JOjqTDg_9 bear lD63JOjqTDg_10 bear lD63JOjqTDg_0 bear lEG4DGADyEU_0 bird lEIbERGmlJw_0 umbrella lEWOScSt-Ks_0 dog lEaMfPfi9wI_0 truck lEwJRP_FRW0_0 dog lFYONMOuW_o_0 truck lFqrTC4j9AU_0 cat lF3vWAJRnek_0 motorcycle lGPyv8wlqaw_1 knife lGaQV9YhOac_0 motorcycle lGrVM91Cav8_0 person lG5xlt4odEs_0 truck lHKKhuJtJ9A_0 knife lHXHAD73KC4_0 motorcycle lHX5VdjDPMg_0 knife lHuiaqmISAM_0 motorcycle lHyHQQF-8K0_0 car lIE0SbW_gCY_0 dog lIH_in2H5ds_0 knife lIrvgqkirS4_0 car lIrvgqkirS4_1 car lI6hnnAL_54_1 skateboard lI7VzYQQ8DY_1 bus lJBeZTzXuSk_0 umbrella lJJU-pzIbgs_0 boat lJKxeHgRugQ_0 bicycle lJa2bLMFljk_0 knife lJa2bLMFljk_1 knife lJa2bLMFljk_2 knife lKC5LtWPL6s_0 boat lKEgqjR4HeU_0 bicycle lKEgqjR4HeU_1 bicycle lKJZ4AYoO9g_0 car lKJZ4AYoO9g_1 car lKJZ4AYoO9g_3 car lKJZ4AYoO9g_4 car lKJZ4AYoO9g_5 car lKJZ4AYoO9g_6 car lKJZ4AYoO9g_7 car NOEix5l-1TE_1 bear NOVqPOoUWiM_2 bear NOmc38WuhVA_1 zebra NOmc38WuhVA_2 zebra NPX9qxaZXGQ_1 boat NPc_EhpqV9I_0 cat NPlhHkKnD-o_3 bird NPlhHkKnD-o_1 bird NPnIcXU4TO4_0 truck NPnJoNuZw64_0 bicycle NP2YBNp1eMo_0 bus NP8MrtR7UMQ_0 skateboard NQRWmK2DAwo_1 skateboard NQ7XVf2jPCk_1 bear NRBtrgg-ACI_0 umbrella NRGqiXyM4H0_0 bus NRRxMVw0Fv0_0 umbrella NRV62o4HAaI_0 dog NRkeO8cWvlY_0 skateboard NSEdAs2W7io_1 bus NSrCO0JVjrQ_0 bus NS6Z7neTE58_2 bear NS7vapDr5vE_0 dog NTJsuoSzIX0_8 boat NTi-7LowE5E_4 bicycle NTi-7LowE5E_0 bicycle NTurL251ndw_0 bird NTyAmrmpD-w_0 cat NUOXJlGoyJk_0 motorcycle NURGtF3McGo_0 knife NUU3df9bDmc_0 motorcycle NUhIeMVykto_0 truck NUkuVMR_rDA_0 truck NUkuVMR_rDA_1 truck NUo3_VxkQWs_0 truck NUo3_VxkQWs_1 truck NU5WfPjxGO4_1 cat NU60EZnPyy8_0 bird NVAF-TWNge8_0 boat NVeRtjaMVVM_0 car NVz1RXwlQQM_0 skateboard NWOVEKbfu_M_2 cat NWwoSS6oanE_0 bus NW6ZEfS5YY0_0 dog NXHWi70uXME_0 motorcycle NXU1Yxq08KQ_0 skateboard NXe33k8YYzQ_4 truck NXe6DkOAbbo_0 cat NX2FQE2RlgI_0 dog NX2FQE2RlgI_1 dog NYBxFsoPtLU_7 knife NYBxFsoPtLU_2 knife NYVtLPBMGDA_1 dog NYVtLPBMGDA_2 dog NYpkdx_Wzos_0 bicycle NYrd2o8DQhw_0 bird NYsYKDH1T0Y_0 bear NYs9voRwmTk_2 motorcycle NZGyAc3mNmM_1 skateboard NZOBtVvtpfo_0 bird NZoU9njpjBc_2 bird NZoU9njpjBc_1 bird NaOwM5jaBb0_0 bear NaTP9E6Ee6k_0 motorcycle NahvbbnqXN0_0 knife NaszpQMnSmM_0 skateboard NbXn5vr55Ik_0 motorcycle NbnAyKWQOgU_2 truck NbnAyKWQOgU_3 truck Nbz45at2suY_0 bird Nb1nL_IG2Tc_0 umbrella Nb4FhqzK_80_0 bird Nb9Ee0cdc90_4 knife Nb9Ee0cdc90_0 knife NcD7EzR9VKc_0 cat NcODwqAl8wA_0 bird NcODwqAl8wA_1 bird NcnPt-ksZkA_0 motorcycle Ncnr9xhL4RE_1 bird Ncnr9xhL4RE_5 bird Nco2IqVnrXc_0 cat lKiN4UeEuCQ_0 car lKrgSHU_lF4_0 motorcycle lKrgSHU_lF4_1 motorcycle lL9OwfLG-LQ_0 skateboard lMPus-gGijc_0 train lMw3GHYr5nI_3 bear lM2lr9vONXE_1 bird lNDNEdNtW4w_0 umbrella lNLvw0Ga8IY_1 skateboard lNLvw0Ga8IY_2 skateboard lNLvw0Ga8IY_0 skateboard lNShteFjBFI_0 bird lNh4Dhf0JC8_0 truck lNj5zp4Gbsw_1 bird lOGti3Hfk6A_2 bird lOglyCevyZo_0 motorcycle lOzlZJwo_U8_0 motorcycle lO0DJaFrguw_0 motorcycle lO0Nas9ogL0_0 bird lPG5xsRX0U0_0 bird lP3Jv00bEG8_0 bear lQf2-zTERI8_0 motorcycle lQ8AFjrjX64_0 umbrella lRSTcmXYwzM_2 knife lRyY7rtPGJ0_1 dog lRyY7rtPGJ0_0 dog lR-HPtCgbFY_0 car lSefRz_ad2I_0 person lS7IFw-rHNE_0 car lTNivynkdBQ_0 bear lTNivynkdBQ_2 bear lTW53YPXtYw_0 umbrella lTgxSRoCADM_1 boat lTgxSRoCADM_2 boat lTgxSRoCADM_3 boat lTgxSRoCADM_0 boat lTyeSMENfFI_0 dog lT1oYaEt3l0_0 skateboard lT1oYaEt3l0_2 skateboard lT1oYaEt3l0_1 skateboard lUEz6tmtuxs_0 dog lUQr1JtEFAM_0 cat lUSPy6WOhvw_1 boat lUk_G-9RjSE_0 bird lUq042i-r3E_1 dog lUq042i-r3E_2 dog lVCS7_AhLDg_0 cat lVKT0DahELk_0 bus lVKT0DahELk_2 bus lVOqUh5DjZE_0 bicycle lVWFKjMWyF8_0 truck lVWFKjMWyF8_1 truck lVWFKjMWyF8_2 truck lVoO_SiGxpw_0 cat lVohP88BOwU_1 giraffe lWDh4SPr76A_1 train lWGBmSVTvwo_2 skateboard lWLYqz3RhXs_0 truck lWkC8ABD6YI_0 knife lWnVG1WyzTQ_0 dog lW8axrSg7EY_0 dog lXJGVOcVinA_1 truck lXkkzYM416M_12 knife lXkkzYM416M_8 knife lXkkzYM416M_11 knife lXshoTSoReY_0 motorcycle lXshoTSoReY_1 motorcycle lXshoTSoReY_2 motorcycle lYC47pEoyKc_2 skateboard lYEiGk0pa9w_1 dog lYP4KB7dANc_0 truck lYcCLy33mJA_0 truck lYcCLy33mJA_1 truck lYrLCKi7wHw_0 knife lYrvoVOM7i8_1 truck lYrvoVOM7i8_2 truck lYzirpo9X4Q_2 knife lY38gkpHWQA_0 dog lZWg3rt2bp4_0 truck lZWg3rt2bp4_1 truck lZWg3rt2bp4_2 truck lZgIg28WsqA_1 dog Ncs0SIaAZjk_0 skateboard NdDPhB7JjOc_1 car NdFMcVN8fkc_0 skateboard NdFMcVN8fkc_1 skateboard Nd2smOOuPs4_0 truck Nd5Cyi1P2AQ_0 person Nd5Cyi1P2AQ_1 motorcycle NesRw9JE-bc_4 dog NesRw9JE-bc_0 dog NesRw9JE-bc_1 dog Ne_T9PyoaOA_0 truck Ne_T9PyoaOA_2 truck NfQ_F7iyFT4_0 bus Nfoq-vLwXMs_0 cat NfuM3ceM9Lg_0 bird Nf4iPszryRI_1 truck NgA6Mi5Qj6Y_1 car NgHJhpedfLw_0 cat NgfJ42fUH10_0 skateboard NglZtOBkn1M_0 boat Ngp2Yvug4N4_0 skateboard Ng7YPssESZs_1 umbrella NhDdHfwovA0_1 truck NhHYQ1QBPq4_0 cat NhJWY87UJGA_0 motorcycle NhKgTGZXrk4_0 motorcycle NiN42Yupn8k_0 motorcycle NiQLFJ_8gI0_1 bird NifFA8VfbMY_0 truck NjPnw9Ofph8_1 bicycle Njr2CQDoQ0w_2 boat Nj1tu2uzjf8_0 umbrella Nj4IqLuQBd0_0 car NkHiSqSViG4_3 truck NkSVC1QmlzA_2 boat NkXF30FQWUs_0 bicycle NkajkrLx-Pg_1 giraffe NkdGD4jRmVk_2 skateboard NkdGD4jRmVk_3 skateboard NkdGD4jRmVk_4 skateboard NkvfxcYCIfg_0 person Nkxm_Grldgg_0 boat NlKX0Q_a4qM_0 bicycle Nl2e8ERoEYk_1 skateboard Nl27zjpvGZk_0 cat NmCxdejUxjE_2 umbrella NmGnWjSHIGc_1 dog NmGnWjSHIGc_3 dog NmGnWjSHIGc_0 dog NmHo6hH22gY_0 cat NmRjRjuwWGU_0 umbrella Nmm4H7xWWeE_0 giraffe NmnOIU5yzmo_0 truck Nm3Wkz8ClY8_4 bicycle Nm3Wkz8ClY8_0 bicycle Nm3Wkz8ClY8_3 bicycle NnQubFQHcUU_0 cat NnSwVsUnfj8_0 dog NnV7SskfNiQ_1 bicycle NnYCP4YouSI_0 skateboard NnYCP4YouSI_1 skateboard NncDYgsTFic_0 motorcycle Nn1fsXlRDQg_7 bird NoKz0p_h8xA_0 car NoRnxJ4D8OY_0 motorcycle NoglbvaRxAM_1 car NoglbvaRxAM_2 car NopFymjXZBE_0 car NosN0T3He9Y_2 knife NowQILLv6pM_1 motorcycle NoxncYznLDw_0 motorcycle NoxncYznLDw_2 motorcycle NoxncYznLDw_3 motorcycle NoxncYznLDw_5 motorcycle NpbXizTCNgs_0 motorcycle NpciaYlS9Bs_2 skateboard NpptiWtuy7U_1 bird Np0p_ITfRiE_0 boat Np0p_ITfRiE_2 boat NqA0sKGQZbc_0 bird NqD8w0_R9y8_1 motorcycle NqLEhuNiS-A_0 knife NqzZbJJl3E4_0 truck NqzZbJJl3E4_2 truck Nq-mC-BLk1c_0 bird NrGByfXIMJc_0 dog NrGHtOFFLxU_0 motorcycle NrJIz8M3oNM_0 boat laSVNAwUDQc_0 giraffe laiFgjfWMS8_1 bird lajujsJ1J4k_0 bird lajujsJ1J4k_1 bird lauIpA9lVMo_0 skateboard la0ygpbR6t4_0 dog la0ygpbR6t4_1 dog lbCW72FyaQ8_0 umbrella lbC8rsjkZ8Y_1 truck lbDdPmkMwnw_0 motorcycle lbSldeZXn6I_0 skateboard lbZo-rTovyc_0 skateboard lbod3X-5Z40_4 bus lbod3X-5Z40_5 bus lbzHPZpNNjg_0 dog lcSqXrVIbwo_0 motorcycle lcWTw6rAYfI_0 cat lcv8jXnPWQU_0 cat lc6jM9I3ffc_0 motorcycle lc8hZxMLAr4_0 truck ldjVc4u8LUc_1 motorcycle ldqpSPYa-3U_1 bicycle ld5g39_bixY_1 skateboard ld5g39_bixY_2 skateboard lew1kgMUujc_0 car lfPmXUBRa-k_1 bird lfVb7VtGUAI_0 person lfYoLXfvmyo_0 bus lf29DRtjGcY_1 truck lf29DRtjGcY_2 truck lf4Xwro4NOQ_5 bus lgLHq8p_CnA_0 truck lgVXhalKM3w_0 boat lgne-5wGRTg_4 bird lgwnVArDAa0_2 bear lg3udJdBBoI_0 dog lg_4H9FLVog_0 dog lhBsZjQzf8Q_0 motorcycle lhEN_T9FduQ_0 knife lhoMpa49rvU_0 umbrella lh1Brsyb0aE_0 bicycle lh21_LSx_G8_1 dog liDzsyAmMJQ_0 motorcycle liThgzeBkVY_0 cat lite73A-c3o_0 bicycle li8IvNy_DW4_1 bird ljrwXgV0j9o_0 motorcycle lj3DWkRI_HM_2 bear lj3mqLiqSRw_0 knife lj5bI1M_0ZA_0 skateboard lj-BTMsCDdY_0 dog lj-BTMsCDdY_1 dog lkOFpGLmX9s_0 cat lkYuyUsRfWE_1 dog lkg_nXf_W88_0 bicycle llBtQEKaglQ_2 bird llFPEcbP7m8_0 car llWG8M6Fsrg_1 skateboard llu7uI6yzns_0 motorcycle llu7uI6yzns_1 motorcycle llu7uI6yzns_2 motorcycle lmCsOrgM7zE_0 cat lmVNyKFiuQw_3 knife lmVNyKFiuQw_2 knife lm-deiNDAW4_0 motorcycle lnFmVwj7oMg_1 cat lnk0OtCMbBc_0 cat ln5IAoaoPHc_0 dog NrX1AnOpS98_0 bus NroEppStyZI_0 bicycle NrvQhlD_Fuw_0 dog NrvQhlD_Fuw_1 dog NsCdsMqUNFc_0 bicycle NsaAbiSbaCc_0 cat NsdCvelNA0g_0 motorcycle NsgZVfgUWco_0 skateboard NsgZVfgUWco_1 skateboard Ns78CA77Hmk_0 bird NtHFEE2Ii0o_0 knife NtQSi_L3_e4_0 bear NttRY9GKNOE_1 car Nt38ikEgqJg_1 dog Nt-UKy4Uq0o_0 car NuOq_HSf26I_0 boat Nucr0ksCppE_0 dog NumUCmB1MLA_0 bus Nu6g6OfLbKU_0 zebra Nu6g6OfLbKU_1 zebra Nu-gGh3BQo0_0 skateboard NvDafPMMZtg_1 cat NvDafPMMZtg_0 cat NvFUKJ9Y500_0 bicycle NvTRLNn1Tk4_0 cat NwC3jHQ65I0_0 bear NwG3zY4-qHs_0 skateboard NwHv08KS8WU_0 truck NwHv08KS8WU_2 truck NwgEA2yRlYk_0 bird NwgEA2yRlYk_5 bird NwlCLmmFUzM_0 truck NwoCpDkRUOc_0 skateboard NwzkWW45Qx0_6 bird Nw1pLrkHm1E_1 cat Nw8ZySxnzIA_0 cat NxPgLux4spk_0 motorcycle Nxgst3FR84g_0 car NyOC1kV5fqc_2 knife NyOVnxlZw44_0 truck NyQlYlDdA1Y_2 skateboard Nyg0BliJTCI_2 umbrella Ny14oMm9C9k_6 skateboard NzIOn70DDCU_0 bicycle NzfwqHNApI8_0 bear Nzqr9pq3W0g_0 bus Nzwcia0dVls_0 bear Nz5AnTEPNKY_3 bird Nz_Dn60wY8c_0 dog N0p_wrAammI_1 bird N0wFxDTDhrA_0 truck N0yYt90fBGo_0 boat N049Vl1eC9E_0 truck N1C5Wk1HQEk_0 cat N1jUvtD_RyY_0 umbrella N1xm5YdzSfQ_0 bird N13r5ZKqAZI_1 boat N2GiHfyj2sY_0 knife N2Y3LmbOWhM_1 cat N2Y3LmbOWhM_2 cat N2e24fXBD58_0 boat N2u1zVHzrfc_0 cat N3D5PnaCpHs_1 knife N3D5PnaCpHs_2 knife N3Iy7f2RrrQ_0 motorcycle N3OIM_qi7dY_0 cat N3VKNNdiRhs_0 umbrella N3ZGT5VDX7A_0 dog N3vCQPsPb7k_0 cat N3x4Fw8PZ04_1 bird N4BazwxnEJU_1 umbrella N4T6B8WAeyw_1 bear N4bUNLwIt-I_0 bicycle N4gBOlxfYUI_0 giraffe N5T8bgYdTg8_0 bird N5cC5-506Yg_0 motorcycle N5uwMT9YWA8_2 umbrella N6FCEWFj0vc_0 truck N6XH-20xsPk_0 bus N6Xl8e3GRcY_0 bird N6gcbwR93B4_1 motorcycle N6rvYTX52x4_0 car loFhsa4OXsA_0 zebra loFhsa4OXsA_1 zebra loFhsa4OXsA_2 zebra loS5Iy7HDhY_2 car loyp0oi9idU_0 cat lpPnun9oDq4_1 boat lqEgRMyazN4_0 dog lqi9uYhr1lU_3 boat lqybkPUTuGk_4 bird lqybkPUTuGk_0 bird lqybkPUTuGk_1 bird lqybkPUTuGk_3 bird lrbJ-8myxJA_1 skateboard lrd8TXYq2Co_0 zebra lrgLAWtIFbQ_0 bird lrk-LSpxnaQ_0 bus lrsspehYW2Q_0 cat lrusc_A2xpY_1 skateboard lsQ4p_XwS3U_1 skateboard lsW8rve_6F0_0 bird lsslg2HK3as_1 bird ls7K9Ga_TDo_0 cat ls8cJ6QPPdI_0 truck ltfbVFmlGNs_0 motorcycle ltfbVFmlGNs_1 motorcycle lti3EMrk6hA_0 bird ltyDB0DzJ4o_0 bear luZpSqhxjzc_0 skateboard lujnNrfylcM_0 truck lu4gOMv2LmA_0 dog lvW9JvQnv_U_0 cat lvXow0J0_Z8_5 boat lvpmaJx7Ydo_0 motorcycle lvxwGSPs5eo_0 truck lvxwGSPs5eo_1 truck lv79L0E9KbU_0 cat lv8ApAxhQxg_9 dog lwIzp1ny_cc_0 bicycle lwqQ1SyQ6oc_0 bird lwu1229kxGE_0 umbrella lw-_X5H5dsA_1 skateboard lxfLak4qc0w_3 truck lxxazO-lUhg_0 skateboard lxz5eN6gYvE_0 skateboard lx4WDd9A1jM_0 cat lyBbm0su2N8_0 dog lyDsv_jEl3M_0 motorcycle lylbDiRYA18_0 skateboard lym5pBjKK44_1 boat lyx_DnTpBx4_0 bird lzAGCQoeAug_1 boat lzISnRATBZY_0 motorcycle lzrv6Lmaqhc_0 bicycle lz9wsaAdD3g_0 dog l0HBjPE-vp4_0 bicycle l0LztA4KLq8_1 umbrella l0TccajPnLs_0 cat l0YyZLT2r0Q_0 dog l0dbu61iEXU_0 cat l0kogcjKlvI_0 bird l01YbT30Uzw_0 car l1PoAFZPnAI_0 cat l1cfghmMFfA_0 motorcycle l1dkS9dCOZs_0 truck l1eSoNjG7g4_3 car l1smSqKCK4k_0 person l1wXtZDVtTw_0 bear l120CJB_tWI_0 car l2Cytaq3_MU_0 bird l2d3stMmMjs_0 cat l2pGQEcySt4_0 giraffe l23teWgsK_Q_1 skateboard l23teWgsK_Q_0 skateboard N7HX62OM1Jo_1 car N7WtVRWgYEs_0 bird N8RE_7TdVGo_0 skateboard N8wDSOXX8q4_0 cat N9TwNh9IZug_0 truck N9TwNh9IZug_2 truck N-bSoL4tlX0_0 cat N-ehGzRtoj8_0 bird N-4XvHMsGCk_0 person N-9RtI_ifsk_0 motorcycle N_MWs_Dxjio_0 knife OAJTjsjrFlQ_0 cat OATLx4-34zQ_0 dog OAtOdcwMjgs_0 skateboard OBDA-yKAC_k_0 umbrella OBDA-yKAC_k_2 umbrella OBLc4YWkCqU_0 motorcycle OBYJdeMHD3g_0 motorcycle OBlj7XKW4lc_1 boat OBlj7XKW4lc_0 boat OBti9g_xdjg_0 bus OBuDg5pF8EM_0 motorcycle OBvMQQZSs6Q_0 truck OCEGSfdedcM_1 dog OCYvV1-sQQQ_1 truck OCYvV1-sQQQ_0 truck OCijTz38zrU_0 truck OCpuPcuJN68_1 car OCp5hNHBPpU_6 knife OC3VHGBHbMY_1 dog OC3VHGBHbMY_2 dog ODXPmCSXZDc_1 truck ODXPmCSXZDc_2 truck ODbUQUd4jSU_0 skateboard ODdK6tzKWWs_2 bicycle ODdK6tzKWWs_3 bicycle ODlDtYOtoQs_0 truck ODo-zlQ_GB0_0 truck ODp6c6uSvaU_0 giraffe ODuka2U9fkA_0 bird OD4XXIos2Zo_0 dog OEJox-XKatw_0 knife OEJox-XKatw_1 knife OEMh8A9j_pg_3 bear OEQV-Uetx8M_0 truck OE0tYMQn8GU_1 bird OFA22Poj7lQ_0 bicycle OFA22Poj7lQ_1 bicycle OFbK3M6Z_QU_2 dog OFbK3M6Z_QU_1 dog OFdr0zUfrlE_0 bus OF2H-LBDSPk_2 bird OF2H-LBDSPk_1 bird OF6Up9vV9Qc_3 truck OGMTfwEYzHA_0 knife OGNQnbR2jAw_1 bear OGVemy4LnsA_0 truck OGbVuwjdEDU_0 motorcycle OGnQhL7HZyI_0 bus OGsEC0i33BY_0 knife OG7Gqq0yNXc_0 skateboard OHWx9W6ECl8_0 giraffe OJ0c10BvtRY_0 dog l3U_T7n5YD8_0 bicycle l3YBS5nRxUY_0 truck l3lkSnsgzx4_0 umbrella l3qhbFnoRvI_0 car l31h7cMiU1I_0 bear l4LQx_ua4m0_0 bus l4MLa-2lkQI_0 bus l4dzsbhTXr4_2 bird l4lv0qkvs10_6 bear l43lNQ5Vq_s_0 bird l4-nRuAZNyY_2 car l5FUU1e4Y60_2 bicycle l5ecq1OhBsk_1 skateboard l5ecq1OhBsk_0 skateboard l508a0nbyQI_6 bicycle l508a0nbyQI_13 bicycle l508a0nbyQI_14 bicycle l508a0nbyQI_18 bicycle l6NgJ2NHnt4_1 bear l6S8h_QnD7U_0 cat l63MzTHehFQ_0 cat l7p6AfqPX2Y_1 motorcycle l7p6AfqPX2Y_0 motorcycle l8-hpsjvPaw_1 truck l8-hpsjvPaw_2 truck l9PH4iTXdYs_0 skateboard l9ZtaPU3mB8_0 knife l9j2X0rGhIY_0 bird l9qm2_xBYHQ_0 cat l9urEyEnxnU_1 knife l96fQdjYlLs_0 knife l-MCmCPjH7k_0 bicycle l-QCC522u8A_0 car l-eNrq-WUQo_0 boat l-98mL8hxMY_0 bicycle l-98mL8hxMY_8 bicycle l_DmnPQxj7k_0 zebra l_scPJDEOuI_0 bird mAE8hqG3eSk_0 bus mAPlm5rMa-w_0 motorcycle mA5ZTSfwetI_0 truck mBEMpccxmBw_1 motorcycle mBEMpccxmBw_0 motorcycle mBTsr9NKqos_0 dog mBTsr9NKqos_1 dog mBTsr9NKqos_2 dog mBivNgtX2dc_1 skateboard mB2K7Cqy5sA_0 knife mB2K7Cqy5sA_1 knife mB2K7Cqy5sA_2 knife mCA3YMqp59Y_0 truck mCVUS1SHxdc_1 bicycle mCaHiS25d_c_0 bird mCipOiHzL24_0 car mCnfYEJ7_nM_1 boat mCplUoipq_M_0 umbrella mCshfLJNDZc_0 truck mC9gh-poTgc_1 bus mC_yfZI-Kfw_0 car mC_8_BVmM48_0 bus mDOnks0KH3c_0 bus mDO2Jg5oyPM_1 umbrella mDTcvH2cBAk_0 truck mDTxktaf2Z0_0 cat mDio2Blh76Y_3 knife mDio2Blh76Y_0 knife mDio2Blh76Y_2 knife mDoksuME2bk_0 knife mECu0xa8vxM_0 bird mEFIkGBIFT4_0 umbrella OKHhm13mZYw_0 bicycle OKJlHLunIJ4_5 truck OKL9IGXZDqg_0 cat OKOBYUJfsW0_3 bus OKVeF8WX7nM_1 dog OKXlOHWMVYI_0 bicycle OKXlOHWMVYI_2 bicycle OKniUxVle4E_0 dog OK1lt5Hbk8U_0 bird OK1lt5Hbk8U_1 bird OK72g05p_nY_0 bird OLWhwdr2s3U_0 motorcycle OLqz23zKUZ0_0 skateboard OMJA4N9BRjk_0 bus OMJA4N9BRjk_1 bus OMROj6nJzNU_0 umbrella OMscf19CmfE_0 cat OMszUYfxt-k_0 dog OM7YDn8Aj8U_0 cat ONQt1uMKjzM_0 cat ONQ7_XR_YoE_0 car ONvq-WMS04Q_0 bus ON25DCtbtZI_0 bird ON25DCtbtZI_1 bird OOOsedHMhFE_0 dog OPFx79LTPYQ_0 knife OPFx79LTPYQ_1 knife OPNrGuEJKfQ_0 cat OPRxB1VUSzc_0 bus OPZI6LUwe80_0 truck OPny4vHo5EQ_1 motorcycle OQWmlKTZbJA_1 boat OQh45xm5OzM_0 car OQlHcCttP0Y_0 motorcycle OQ5Q0IvSVJw_0 skateboard OROW-2FDArE_0 knife ORjDIPVlrpY_3 boat ORyOEpNkmQU_0 bicycle ORyOEpNkmQU_1 bicycle OR1UJ2WJswk_0 umbrella OR8th1OG-XE_0 umbrella OSia7sePfOs_0 dog OS2Ga4W91oU_0 boat OTGZvd8HEBs_6 umbrella OTGZvd8HEBs_1 umbrella OTGZvd8HEBs_5 umbrella OTK2nAcxHMw_0 truck OTSLZbr15Rk_0 truck OTXkN6YTPBY_2 bear OTvtQllL8ho_0 giraffe OT1tUDnxHUY_1 bird OT1tUDnxHUY_0 bird OUDo6Wi3Mx0_0 bicycle OUaP4Qe7K_k_0 skateboard OU9OQRs4Ff4_3 truck OU9OQRs4Ff4_0 truck OVBUoFuLqko_3 boat OVBUoFuLqko_4 boat OVBUoFuLqko_5 boat OVBUoFuLqko_0 boat OVBUoFuLqko_1 boat OV8AfAYiWos_3 truck OWe4Ah3rUkU_3 truck OWe4Ah3rUkU_4 truck OWwYp5TMtyo_0 dog OW09PhbCZ2c_0 cat OW9poTV3Pw0_0 motorcycle OXDBegRD_hY_4 bear OXleFWP00RU_0 skateboard OXn_z6r4tTM_0 bicycle OX46gFmob50_0 boat OYAOM3GxoFs_0 umbrella mEhLlaG7ivE_0 car mEyJVUti9TA_0 bird mFCrAjplP-s_1 truck mFQSD32phtQ_0 motorcycle mFoVk3mdfVs_0 boat mFpufihJP34_0 truck mF3uYMbMsrA_1 truck mGAgv6gfUIA_1 giraffe mGP0JfjwxXU_0 car mGP0JfjwxXU_1 car mGwC1aGK8EQ_1 motorcycle mGwC1aGK8EQ_0 motorcycle mG6Uz7wciew_0 truck mHNOyEXbwsg_4 bear mHORHQS-7WE_0 motorcycle mHPMxlukQ30_0 motorcycle mHfy3z8lzZY_0 bus mHicqYMm5B8_0 bird mHicqYMm5B8_1 bird mHtWCmdt2ck_0 cat mHwCC0jnHbI_0 bear mHwgF2IQCd8_0 motorcycle mIn-Tkvx0xg_0 truck mIx7ZeZ2Vv8_1 truck mJ0xD-4leB8_0 cat mKRUuWYJC2k_0 motorcycle mKRUuWYJC2k_1 motorcycle mKRUuWYJC2k_2 motorcycle mKRUuWYJC2k_3 motorcycle mKWmMLNNRAQ_0 zebra mKgld1efJss_0 bus mKu97ivRVSM_0 knife mKu97ivRVSM_1 knife mLDjtK6d-W0_0 knife mLGU-BL1agI_1 cat mLG8EyllDhA_0 car mLIp-YLvQaA_0 zebra mLgNPTUe_XI_0 cat mLmtVR-AGCk_0 bear mLpoizHo-v4_0 dog mMG1DT2mUAo_0 skateboard mMUflfP_ZMY_0 cat mMXGos8VYQI_1 dog mMt-gdadsY4_1 dog mNFkEphgV18_1 bicycle mNdM6zfb6FA_0 cat mNeHO27e_i4_0 bus mNeHO27e_i4_1 bus mOMvL5XuAZs_0 truck mOVza6TV55E_0 bicycle mOcxsTLCyfM_0 umbrella mOjLK3sW2lA_0 skateboard mO3CzDojFYs_0 dog mO8cYs6iJlE_0 cat mPCBb4ndGx0_3 car mPCBb4ndGx0_2 car mPPaPa0iD_c_0 dog mPV3eyH3uiY_0 bicycle mPW-nXWaC4U_0 cat mP223OT32Rc_0 knife mP223OT32Rc_1 knife mP553XrHpVs_0 motorcycle mQD1eeRC1Q4_0 knife mQf2FppJTEM_0 bird mRFdLfB4a1s_3 bear mRI6bXmeH0U_0 knife mRMc_QxifPU_0 truck mRMc_QxifPU_1 truck mROsO1LIGpo_0 truck mRYB4i5ld-k_0 dog mRkf0ciWPgI_9 bird mRl54j1LWx8_0 person mRyO8jtjseY_1 car mRyO8jtjseY_2 car mR0m08J8B08_4 boat mR0m08J8B08_0 boat mR0m08J8B08_1 boat mR0m08J8B08_2 boat mSTIz-CdXqU_0 truck mSf7pQlzXuw_0 giraffe mSgbTXZAzDk_1 umbrella mSvLPzkZzps_0 knife mSxrYqw4oqg_1 umbrella mSztwZ01Pck_0 bus OYIPropF-hA_2 knife OYTwB7sOFYE_0 bird OYa8DOvcJkU_0 cat OYf6rSUrwxc_0 dog OYnjEcx19SM_0 cat OZBLMb8bGX8_0 zebra OZcS8vrufig_0 dog OZeialzVvBQ_0 bird OZqsh8FFeFo_0 truck OZqsh8FFeFo_3 truck OZqsh8FFeFo_4 truck OZstdGSfBBw_0 bird OZ2Xf6zzI5Q_1 skateboard OZ2Xf6zzI5Q_0 skateboard OaR_KKoBRYA_0 boat Oai5vIFRADY_0 truck Oaxb1TjNF5A_0 boat ObG3TG10dF0_0 dog ObLBCGg01UY_5 skateboard ObLBCGg01UY_1 skateboard ObLBCGg01UY_2 skateboard ObLBCGg01UY_3 skateboard ObLBCGg01UY_4 skateboard ObMci_3wRII_0 boat Obmxs3FqVc0_0 truck Obol9FzC6qw_0 boat OburzWcRnbc_0 skateboard Ob5o_Ufzxvo_0 umbrella Ob6-UrKFrTY_5 boat OcFGISpeAn0_0 skateboard OcQBa7E9-AI_1 car OcZG24cCgsU_2 boat OchOHb4q-iE_0 bicycle OcmRyP_n53E_0 truck OcuYOC6GylA_0 car Oc1tfJzLD3o_1 bus Oc1tfJzLD3o_2 bus Oc1tfJzLD3o_0 bus OdGHHAUYow4_0 boat Odl4k8y8GfI_1 skateboard Odo1ZvyEbqs_3 bear Odo1ZvyEbqs_6 bear Odo1ZvyEbqs_7 bear OeJet0TZ0Ns_0 cat OecO1BnSygU_0 umbrella OecO1BnSygU_1 umbrella OepCeq6zNOc_0 umbrella OevlneuqSNg_0 skateboard Oe3qCUtDCoI_0 bear OfD7c6vcSKc_0 motorcycle OfFZrl_Ltoo_0 dog OfQ3Y3DEgNI_0 skateboard OfZ9wyeuMaU_0 skateboard Ofcr6xsiMGY_1 knife OfmW_n1WB-0_0 bird OfpLj-uw2VM_0 skateboard Ofv2SMoyg_8_0 boat OgG3xES-A9s_0 bicycle OgG3xES-A9s_1 bicycle OgtTZgAAtrk_0 bear Og77fxfsfzI_0 skateboard Og83XjWPr30_0 bird Og_sRGRP2fw_0 motorcycle OhQqfPIVR_o_0 truck Ohh5X9j8-P4_0 skateboard OhvnlA9rzUA_0 umbrella Oh4vuNdjqGg_1 boat Oh4vuNdjqGg_3 boat Oh79QNRx0m0_0 bus OiHa7vhbW0g_0 umbrella OiT0hP6IU_0_1 car OidiasYmhhk_0 dog Oiuo__vi77s_0 motorcycle Oi3BJVuj3f8_0 bus Ojst9j_7TPs_0 motorcycle OjxLYDs9O2w_1 skateboard Okd1qAIUuZo_0 skateboard Okd1qAIUuZo_2 skateboard OlO9xdVfniA_0 bus OlPObAsvFRE_0 bear OlQykWy5_d0_0 skateboard OlVZS0O7Xcc_0 bus OlVZS0O7Xcc_1 bus OlVofey46c8_2 giraffe OlVofey46c8_0 giraffe Oldv3-_fn3E_0 motorcycle OlufwgkC9nA_0 cat Ol3C5MWakic_0 bus Ol63TPS0wjE_0 skateboard OmTHe4jPR30_0 umbrella mTnSFF649v4_0 motorcycle mTtOhVJYmco_0 bicycle mTuXb1mo6ms_1 motorcycle mTwbZIC2mjs_0 umbrella mUllN4tCjhg_0 car mVWf8BrbbQc_0 skateboard mVZVZPz-0uk_0 knife mVztYl0hyR0_1 bird mWOuUa5VTIU_4 bird mWSWZi7ef2Q_0 bus mWULzZ-r0BE_10 bear mWULzZ-r0BE_0 bear mWULzZ-r0BE_1 bear mWULzZ-r0BE_3 bear mWULzZ-r0BE_6 bear mWULzZ-r0BE_7 bear mWULzZ-r0BE_9 bear mW85x5O3sQM_1 bus mXYQlH9le8Y_0 dog mXt-xLcVJTM_0 knife mXuPzw4I-wQ_0 dog mXu238CeGfQ_0 motorcycle mXu238CeGfQ_1 motorcycle mX3SlrHHN8A_2 knife mX3SlrHHN8A_3 knife mYFsdZ6ZiHg_0 skateboard mYYLIkI65fA_0 cat mYgcUWeYKeE_0 cat mYhujznmuic_0 motorcycle mYtEL2P4G64_2 truck mYtEL2P4G64_0 truck mY6M_QMVm6A_0 dog mY6M_QMVm6A_1 dog mY6M_QMVm6A_2 dog mY6M_QMVm6A_3 dog mZEPBKLKQLU_0 skateboard mZStBRJGz0o_0 bird mZWugKrC8fs_0 truck mZ0LxtaLk9s_0 bicycle mZ0LxtaLk9s_1 bicycle mZ1ae3QtMqY_1 skateboard mZ6SXifL_5I_0 cat maANeKOpibc_0 bus maATqEbCdmA_0 boat maOsv3Gen0Q_0 motorcycle magDXuphf6E_0 truck mavzqjj21eQ_0 motorcycle mbFrW58khSM_0 motorcycle mbtyAyprPhQ_0 motorcycle mbuozxoOynA_0 bus mb9G4GF56RA_1 umbrella mb9G4GF56RA_2 umbrella mb-nes45JeE_1 bird mb-nes45JeE_0 bird mcCOvhuC86Q_0 cat mcxoHsKM444_0 cat mc0A1NsuIBI_0 bird mdDoBuc7jag_0 boat mdZbK8mOA5Y_0 motorcycle mdxbRZzm2Fo_4 truck mdzJDnEx5AI_2 boat md8Xi01GJ0Q_3 bird meRJPfPZTpw_3 bird mebu5O8auic_1 bird mehKWfZTJQE_0 bird mfPFvq57cxM_0 skateboard mf4LyMZ6wyY_0 skateboard mgEZVZrBkrg_0 bicycle mgEkK74q1Lo_0 motorcycle mgTCPe8eM00_1 umbrella mgVB0o0U17w_1 skateboard mgVB0o0U17w_0 skateboard mhSSgOcQwd8_0 cat mhqhGszzAR8_0 cat miC3NPxHofU_1 bird miQyhDocW3I_1 dog Om1q-9YbJu0_0 bus Om-NvWZY9XM_0 bear OnKqSIvDmuM_0 skateboard OnemsYazBrQ_0 truck OnemsYazBrQ_5 truck OnemsYazBrQ_1 truck OnemsYazBrQ_2 truck OnemsYazBrQ_3 truck On3Yd3AHFp0_0 dog On3b0cn9QYE_0 bear On-GcAXLGZ0_0 motorcycle On_5UKUJi7U_0 car OohVLB8HrmU_0 bear Oo2Ux9rWYGo_0 skateboard Oo8VLA_C0ho_5 bicycle OpAPsb8a7ck_1 bicycle OpD07kt9gdg_0 motorcycle OplcFe9OOMA_0 boat OplcFe9OOMA_1 boat OpqmXBQU87o_0 truck Op3764NveuQ_1 bicycle OqKwAAWtANM_0 cat OqPOCcEAHqk_0 skateboard OqbhEJlCp48_0 skateboard Oqc407hvhn8_0 skateboard Oqjbl3c9LYU_0 bear Oqo2P7az_Jw_2 motorcycle OrhDfcZqq1E_0 cat OrhipZ8lZHo_2 bird OrhipZ8lZHo_3 bird OrhipZ8lZHo_1 bird OrmkaB0vrG8_0 dog Or-E2m2p4X8_0 motorcycle Or--toMjK3I_3 boat OsvYa6TnsFI_2 car OtIV8clF1-o_0 bird OtIV8clF1-o_2 bird OtKIh5W3Uro_0 bird Otw43WNrlsM_1 bicycle Otw43WNrlsM_2 bicycle OtyYn5vEHbM_2 skateboard OuBzZzA9Q7o_0 bicycle OuJMq2UqA-s_0 dog OuVznEsiyyA_1 motorcycle Oui9ZgfJiJE_0 skateboard Ou1yCmmAuSY_0 cat OvEJdKYqvF4_2 dog OvEJdKYqvF4_5 dog OvZguhO8UVQ_1 bird OvlqAWflXBs_1 bus OwQktS0dM3k_1 truck OwUWoVRKf7E_2 zebra OxADHlAb7dM_0 boat OxInmNOeLHY_0 bicycle OxInmNOeLHY_1 bicycle OxInmNOeLHY_2 bicycle OxInmNOeLHY_3 bicycle OxInmNOeLHY_5 bicycle OxInmNOeLHY_6 bicycle OxInmNOeLHY_7 bicycle OxZdEZCJtcw_1 motorcycle Oxkx4bWzOMo_0 skateboard Oxp9w62kg0Y_2 knife Oxp9w62kg0Y_3 knife Ox1idrJvs2E_0 cat Ox_8K3szIs0_0 cat OyGSbm149i8_0 boat OzBFCX0vpiU_0 motorcycle OzCvvptC7o8_1 bicycle OzHYG5kpMbw_0 car OzItTAjpb9U_1 knife OzwlwZq46z8_3 bus Oz89_rVdBV0_0 knife Oz89_rVdBV0_1 knife O0JwQIk5pZY_5 knife O0Xl3AF_T0s_0 dog O0dforbCqKM_0 cat O0lfImzhCM4_0 dog O0p5eAP2AyA_0 boat O0rSIIipDT0_0 truck O02oVGyCZDI_0 motorcycle O0_GC-1pCYk_0 bear mj155rqWO3k_0 motorcycle mj5oMHI4Ch0_1 skateboard mkPWdHTd5X8_0 bear mkVYY1EvetE_1 bicycle mkZA72VL1oI_0 zebra mlAzMb61fYU_0 cat mlJYoZVHztc_0 bear mlT2XD9k5Ro_2 bird mlophh4mK4A_2 bicycle mluR2OjQTmU_0 car mlwpiHjyzIA_1 motorcycle ml4BVi7cCV4_0 bird mmnFugXdqlQ_1 truck mmnFugXdqlQ_0 truck mmojCWiaNYI_0 cat mm_Udf1FG0s_0 cat mnB2hBuySsI_1 bear mnB2hBuySsI_2 bear mn_cuBRZu8M_0 cat moZR-AtZJnI_0 cat mobg7uEQTmo_0 umbrella mogyHm8Jiok_0 bird moh4TWSe9Fc_0 umbrella motFo9G-GLs_0 skateboard moyxRLHHeiI_0 bus mo5ZpMFELUQ_0 motorcycle mpO9dBwTeW4_0 bicycle mpYAM0x6L5M_0 motorcycle mphFmT6TzLM_1 knife mphFmT6TzLM_2 knife mphFmT6TzLM_3 knife mp25XfIJhQY_0 cat mp8USuQKinc_0 bird mqUyhzbCpig_0 motorcycle mqjilBZByTI_0 skateboard mq5DqmYGVM4_0 person mrJAakc7Fj8_0 bus mrOsDCuEdRQ_1 dog mrY8gIFiUhE_0 car mrhfyNpFMq4_1 truck mryDGEujJno_0 motorcycle msNXnb1a02o_0 knife msbOXFTsSVU_0 giraffe mszokIKsdUk_0 bus ms0_k1aLULU_0 truck mtITgRv95Sw_0 dog mtU7bHAsI8Y_0 cat mtZHgLGJiu4_0 skateboard mtmzPf2AZuI_0 skateboard mtnURpE0wyE_0 bicycle mtnURpE0wyE_1 bicycle mtnURpE0wyE_2 bicycle mtpTPJtG8F4_0 motorcycle mt_LZ5UsG_w_5 knife mt_LZ5UsG_w_1 knife muKQy-1p4fg_0 truck muWIt0X4pKQ_0 dog muZ7xPF8odU_2 bicycle mueRS6nKTdA_0 bicycle mujGcuAzOdo_1 bear mujGcuAzOdo_4 bear mulQIomc988_1 bicycle mulQIomc988_3 bicycle muoqLEyrhhI_0 dog mursOuNatdc_0 boat mu65YolQZds_0 knife mvEcWlHP6u4_0 bicycle mvYBfdZkCe8_0 dog mvb5jVJeuGE_0 person mvhEFfQeFCY_0 bear mv2FHxOHSR0_1 truck mwAPVTEbZGM_0 skateboard mwAPVTEbZGM_1 skateboard mwBKrjOpxkY_0 skateboard mwIroQ9RbXA_0 bird mwrxbdZraRk_0 car mw5fQZ8EB5I_0 knife O1KrpGSvXAY_0 knife O19Mlhhzqgc_2 bear O2ZR7HPYZCo_0 cat O2u5126JYpY_2 motorcycle O3DA7qzf2s8_1 bus O3DA7qzf2s8_0 bus O3y2taxKvCA_2 boat O4CfuT5BDcc_0 skateboard O4VQQaJ07zY_0 cat O5b3XcEGZ4M_0 car O54XRvo6VU0_2 motorcycle O54XRvo6VU0_0 motorcycle O59A3lMogSo_0 giraffe O59A3lMogSo_1 giraffe O6BXRuq_YcE_0 dog O6BXRuq_YcE_2 dog O6EtCByhFZI_0 truck O6Jf2yxCTuI_0 cat O6Uln7GkqDA_0 skateboard O6b3a--pX3E_0 bird O6kqsEuKhis_0 bird O69gCmR0LvA_2 motorcycle O69gCmR0LvA_3 motorcycle O7ReHsig5IQ_1 knife O7Wrpfzb8_g_0 bear O7lvzdzmX5k_2 bicycle O8BNclEPo5w_2 dog O8BNclEPo5w_1 dog O8f0Dhn1as0_0 umbrella O8sB46kfM28_7 umbrella O8sB46kfM28_6 umbrella O8sB46kfM28_13 umbrella O9Duu2Un8AE_0 skateboard O9Duu2Un8AE_1 skateboard O9Duu2Un8AE_2 skateboard O9EqKcj_CPs_0 umbrella O9iWg3ZqLcU_2 bear O-NZJ4-eoQ8_0 bus O-ZUr1bQzp4_6 umbrella O-kJ078YJq4_7 truck O-2S79hisI8_0 zebra O-4CV4-x7Tk_0 dog O_D7M00pmjQ_0 motorcycle O_PCiV3NICw_0 dog O_bAX_ruSNQ_0 skateboard O_fZm7Mblgg_0 knife O_mRo8YLc50_0 umbrella O_3VssPsSVQ_5 bicycle PABLxf3U8qc_2 bicycle PABLxf3U8qc_4 bicycle PABLxf3U8qc_1 bicycle PASMcbnOtUM_1 bird PAZBEMKPQEw_4 boat PAbB9I6MC_o_2 boat PBD1IW-vA6Y_0 dog PBD1IW-vA6Y_1 dog PBQjiKBWtao_1 bicycle PBQjiKBWtao_3 bicycle PBqIT1T_Tl4_2 umbrella PByJb40LNJ4_28 bicycle PByJb40LNJ4_30 bicycle PByJb40LNJ4_3 bicycle PByJb40LNJ4_13 bicycle PByJb40LNJ4_18 bicycle PByJb40LNJ4_22 bicycle PB8sWVNFkDw_1 motorcycle mxA8JbJ0Do8_0 car mxFga0703Mc_0 bird mxMCBmJ5owQ_2 truck mxXH5aZCSJ8_0 truck mxYl5Y1KAiY_0 dog mxZgNkjbyxk_1 knife mxeuMHAWMxo_6 knife mxeuMHAWMxo_7 knife mxeuMHAWMxo_9 knife mxsTfEQlVgM_0 motorcycle mxvG6gSVYuo_0 bicycle mxwmtm7rKF8_0 car mxxiqhZzhEE_0 motorcycle mxyHDUSMhLs_0 cat mx2i3CYeEEE_0 bear myRelcztkqo_1 knife myWzn06fmDI_0 dog myY1Ijlbknw_1 bicycle myY1Ijlbknw_4 bicycle myY1Ijlbknw_5 bicycle myY1Ijlbknw_2 bicycle mymtiyldysk_0 truck mzGmbowEFfA_1 knife mzMgXA_v8q4_0 motorcycle mzYPSSUS--w_2 boat mzYPSSUS--w_0 boat mzdD_0CKekQ_0 motorcycle mzfrEqAhHeY_0 bus mzm_D3J8zqQ_0 umbrella mzyu28WsuFs_0 motorcycle m0MVwwL_0MM_0 bicycle m0gukhoxW0Q_0 skateboard m0gukhoxW0Q_1 skateboard m0gukhoxW0Q_2 skateboard m08CnM1FBR0_0 cat m0_tPmnque0_0 bicycle m0_tPmnque0_1 bicycle m1Qhj9jYohk_0 bus m1pFyDGuVzk_1 skateboard m1pFyDGuVzk_2 skateboard m2StZDAc1yw_0 bird m2uQowbhYDc_1 bear m3AM4AQLDo0_0 zebra m3AM4AQLDo0_1 zebra m3RCOnTUyMY_0 boat m3RCOnTUyMY_1 boat m3SOT8NCOEY_0 bicycle m3cgfDs0_G8_2 dog m3fctWcU4as_0 motorcycle m3sztS1QC3s_0 cat m3uDjNrfbD8_1 bear m35CwgXROHw_0 car m4qZSrgBZkc_0 bird m4qZSrgBZkc_1 bird m6NemUzZQFc_1 motorcycle m6NemUzZQFc_0 motorcycle m6S6MEQgo2E_2 motorcycle m6S6MEQgo2E_4 motorcycle m6hQABEUkQQ_4 boat m6z3sbKYwcc_3 bus m6z3sbKYwcc_4 bus m669S-54lMc_0 motorcycle m669S-54lMc_1 motorcycle m7djLwb_a5k_0 car m7k5fJXTZPI_5 bird m7xUarlXKEw_0 umbrella m7xUarlXKEw_4 umbrella m7xUarlXKEw_1 umbrella m7xUarlXKEw_2 umbrella m8B-pb1I7nc_0 cat m8YA8dXocmg_2 boat m8t6gPBCxr8_0 truck m9HGLakPqSo_1 bear m-NEL2Jq0nQ_2 car m-dKTMwfPqo_0 truck m_JHW_eCKY0_0 umbrella m_dOsn1chuA_1 bus m_dOsn1chuA_2 bus PCC9sJ4Gdxw_0 car PCeoeGBYrJU_0 dog PCqa_yHJ32g_2 bicycle PC2plr6JdQg_0 umbrella PC_wbEzLNLQ_0 bicycle PC_wbEzLNLQ_1 bicycle PDU92To89cE_1 bird PDlKUKo06lI_0 knife PDvSiH5Pf_0_0 bus PEC7E1t79A8_0 car PEJFRzyvIBc_0 bird PEJvGdLGOjU_0 zebra PEY59JrOz5I_1 bird PEY59JrOz5I_0 bird PEfpmwboH3w_0 bus PEtsR4S5Zzg_0 bicycle PE_zE5T1ayo_0 cat PFJiRWGaPaw_0 car PFJiRWGaPaw_1 car PFa_RCiQVjA_0 skateboard PFjuIzuDmJs_1 knife PF8HAptOIC8_1 car PGEM0ys1sGE_0 knife PGMimFwsl54_0 cat PGP0PEOv3zw_2 bear PGP0PEOv3zw_0 bear PGipyYSRHso_0 bicycle PGn623RKWNA_1 car PG8bMx6DuSo_0 knife PHeQ1xoUBgg_1 boat PHmnvFIAtHo_0 bus PHxuey2u6UE_0 skateboard PIDvuyKFIJ8_0 cat PIT2XsuODRE_0 bird PIa767e6xuQ_0 cat PIkhnCxrF9g_0 cat PInIdEVTPn0_7 truck PI5ROW9ewOg_0 cat PJoSJpMWo0Y_3 skateboard PKTJIVIuSFw_0 truck PKZXF6Hj0kw_0 bird PKZXF6Hj0kw_2 bird PKZXF6Hj0kw_1 bird PKtfgOMwx4A_0 dog PK-4bXZDtlA_1 skateboard PLO2xY76oh4_0 motorcycle PLVEvFhXHAE_0 truck PLVEvFhXHAE_1 truck PLd8HlO4HYo_1 cat PLd8HlO4HYo_0 cat PLwQ0AHwZgg_1 skateboard PLwQ0AHwZgg_2 skateboard PL2FcMREy_0_0 bicycle PMRnsvlMF4A_0 skateboard PMUqAknVm2Q_0 motorcycle PMXmKup8jy4_0 boat PMkiPjm9XdY_1 motorcycle PM028PEyjv0_4 bear PNpDnymoq8w_0 truck PN6PB668zV4_0 truck PN86cQumWDU_0 motorcycle PN_b6R9HxwQ_2 cat POQalChDjmU_0 skateboard POW6F8MZMTQ_1 bird PO-OnjGHjDk_0 bus PPI6aG2QFaM_0 bird PPdV273cZC8_0 skateboard PPhYyYHNaQ4_2 boat PPhYyYHNaQ4_3 boat PP5_L_EZsmE_0 bird PQI2zG7I8jI_1 bus PQjM0fGHXds_0 bird nARlDpJ1mzQ_1 dog nAmX6FEKmTg_0 truck nAsHFcuT16U_0 skateboard nAsHFcuT16U_1 skateboard nBLWjCuzp2g_0 dog nBPhMvA4QIs_0 dog nBXKLM2hLN0_1 car nBtF1BDR8wE_0 motorcycle nCKBmlhUPYg_0 cat nCPhfqQsjIQ_0 motorcycle nCPhfqQsjIQ_1 motorcycle nCe_XQHu77g_0 truck nCgjbB7wxoE_0 bus nDsb271W8XU_1 car nEFtdboPB2w_1 bear nEIawnnD8V8_0 truck nELgP3wAnm8_0 dog nEM7mY_k1_4_0 boat nEVFHD_9xCw_1 bird nEtqWL5nz_U_0 bus nEtqWL5nz_U_1 bus nEyJKW3bMCc_0 dog nE6lY5G16lE_1 bicycle nE6lY5G16lE_2 bicycle nE6lY5G16lE_0 bicycle nFQvQPqMjpk_0 car nFZrdv6K4pg_0 motorcycle nFa5TGw-b5Y_0 bicycle nF28ACSGHM8_0 boat nF444n6UUJE_0 bear nF444n6UUJE_4 bear nGQ3Hq6P5tM_0 car nGnDoylbNm8_1 bear nHAF0LI8CPk_0 truck nHAF0LI8CPk_1 truck nHAF0LI8CPk_2 truck nHApjxTb0fI_0 umbrella nHAt_MmKZtA_0 dog nHRioXgb-Fo_0 bird nHbHOfTnrtg_0 dog nHbHOfTnrtg_2 dog nHe8j-osZck_0 dog nH9AXssn9vw_0 umbrella nIIQLgiJpz4_0 motorcycle nIqnT8pJFz0_0 knife nJF2wWsJCd8_0 cat nJuhir_bIpw_0 cat nJ6iwd_XQso_0 umbrella nJ6uR6SE01w_0 bicycle nKM_iCO6bKs_0 bus nKS1tzA_Hrk_0 skateboard nKUBzJ38GgY_1 boat nK-2zxkNCuA_0 cat nLED5Us6rMo_0 motorcycle nLL3PMe48dQ_0 boat nLXX8_SfZs0_0 cat nLn2LN33uxg_0 cat nLx78Uv2dmc_3 skateboard nMbLyO3605c_0 knife nMo_-oHL7bU_0 knife nMtxrG4hH5M_0 skateboard nMyhi847s6A_0 knife nNNF1j89RS0_0 bear nNScwJL6ym0_0 motorcycle nNeaR2o9KMY_0 boat nNwEBFJZT8U_0 bird nOe7o_AaOUs_3 skateboard nOfyHwhf35s_0 bus nPBFLS60OYk_5 truck nPhpYRGfHlw_0 bear nP5wigEk-3A_3 knife PQsHE_w_Q5I_1 knife PQuYVLwcT7k_0 skateboard PQ4gPP2l3RY_0 bus PQ9ZEkeKIzs_0 skateboard PRIJbfolHpE_0 umbrella PRIw6kIS_oM_0 motorcycle PRg6CE_exgE_2 dog PRoAGpjxUIQ_1 dog PSdh0lzfg3M_0 bus PSrvUaBxbgU_0 motorcycle PS_CABKe3Yk_0 motorcycle PTKnZd28Sac_2 dog PTORa3OCyoU_1 truck PTxm2ZRQbNg_0 skateboard PTxm2ZRQbNg_5 skateboard PTxm2ZRQbNg_1 skateboard PTxm2ZRQbNg_2 skateboard PT2XxI2FufM_0 bus PT3felQmrwU_1 bear PT6KXLLxhes_0 bird PUFo51ngpe8_0 bus PUeS5CCMoa4_1 zebra PUgpXWoI6nw_2 bird PUiSf8EuinE_2 bear PU3x1IpbndQ_0 knife PU5v_AtaKKw_9 bird PU5v_AtaKKw_2 bird PU5v_AtaKKw_3 bird PU5v_AtaKKw_4 bird PU5v_AtaKKw_5 bird PU5v_AtaKKw_7 bird PU-lRdkaqdg_0 cat PVV-saboi8Q_0 truck PVXtjPyNMms_0 dog PV6mXKbH058_0 skateboard PWIWGwJZENs_0 dog PWQGxn3c5iQ_2 knife PWQGxn3c5iQ_0 knife PWs7zuWiKZo_0 bus PW7XGdRhgKI_0 cat PW97rAj3_84_0 truck PXb9PHJghpA_0 cat PYH5FxLfm3M_0 bus PYOwGQUBJXY_1 boat PYWfE8WhDKk_1 knife PYohJALR7DA_1 motorcycle PYohJALR7DA_2 motorcycle PYsiftgJNrs_0 motorcycle PZEun35Hcoo_1 dog PZNXXWorkrY_0 motorcycle PZSGccVPUm8_1 bird PZjQiLyqHkw_0 truck PZoM9dv8P3A_1 bear PZuGSUZ1N2w_0 skateboard PZz86aIvTWU_0 skateboard PZ3PfRXk2rQ_0 cat PZ9YkHds_00_0 dog PaVPMVUQwtM_7 boat PaVPMVUQwtM_2 boat PatPjxyHqvY_0 boat PbPu-cnEMqo_0 cat PbUb1IktyM0_0 motorcycle PbdnWP3AnKQ_1 person PbhIhdwp7nI_5 knife PceERP83N7g_1 dog PceERP83N7g_2 dog PdER58jIvPg_0 cat PdRRvS5p7TM_4 bicycle PdRRvS5p7TM_0 bicycle PdRRvS5p7TM_1 bicycle PdgOy1B6ByE_0 person PdgOy1B6ByE_1 motorcycle PdkRSALRJOE_1 truck Pdne4jISJMk_0 bird PeXODrjPJpU_0 motorcycle PecvaJstdYE_0 knife Pejvg4LHBXw_1 skateboard Peur7tMeMNc_11 bicycle Peur7tMeMNc_12 bicycle Peur7tMeMNc_20 bicycle Peur7tMeMNc_21 bicycle Peur7tMeMNc_5 bicycle Pew5sug67ao_0 dog PfKS2L_bxBc_0 cat PfOYq_uyVF8_1 bird nQPFPYvmWtU_0 skateboard nQd33JTaurM_0 bird nQd33JTaurM_2 bird nQmH_VIOI4o_0 cat nRG70FCdevw_0 bus nRP28gcIe5Y_0 bird nRP8SwdbUGw_1 bear nRr5gMvJ77k_0 skateboard nSgcLfwMJu4_0 dog nSvaQz0i9i8_1 skateboard nSvaQz0i9i8_0 skateboard nSz_BdDSYsk_1 bear nS_SY6iDJ2U_3 bear nTjbCPXR408_1 truck nTjbCPXR408_2 truck nTjbCPXR408_3 truck nTjbCPXR408_4 truck nTz3LA23B4U_0 skateboard nUBgjOAcKBw_0 truck nUDvay-MfVs_0 truck nUVSuT7wfDs_0 motorcycle nUdbTm-FW0I_0 bus nVAOU6r15Ww_3 knife nVTMM3F16j0_1 boat nVi9QbrUrjE_0 motorcycle nWvR8fiLxGw_0 truck nXD-zvpjC50_0 car nXG_fwbJQ-E_0 car nXjIIWFPSd4_0 cat nXlSVy8CmMk_0 truck nXpq0p9VBXc_0 boat nXqE-XROi78_0 bear nXqQPuJmTZo_0 cat nYYFquwhxeI_0 cat nYqRuOF_Uao_2 car nYqRuOF_Uao_0 car nYqRuOF_Uao_1 car nYut3zBSbuM_0 bear nY0xtzTME34_1 cat nY2XarSrm7Y_0 boat nY2XarSrm7Y_1 boat nY3BS_3Mq6o_0 motorcycle nY3fRfvoh9w_4 bear nY3fRfvoh9w_0 bear nY_icz32gn8_0 cat nZHGbmVkhrE_0 cat nZn4xAbcGSk_0 cat naE1svJuCTw_0 truck naE1svJuCTw_1 truck naR-9rNf5fE_0 skateboard nalqTKM6890_0 umbrella nalqTKM6890_1 umbrella nalqTKM6890_3 umbrella nbCix4zvF_E_0 umbrella nbcH6NfapD0_0 boat ncuqh0iglYU_2 skateboard ncu8gbqMkMc_0 cat nc9aHs1_xzs_2 motorcycle ndBPYFAVIiM_0 bird ndJ2_mPZktw_2 bear ndJ2_mPZktw_1 bear ndMfXyYPfAM_0 bird ndNs3q8tY9U_0 bus ndO2b-r-Krs_0 motorcycle ndO2b-r-Krs_1 motorcycle ndj7VTH_PhE_0 bird Pfi9ZEQtgjY_0 knife PfnFeL4ArA8_0 skateboard PfpTZKfKeKY_2 truck PfpTZKfKeKY_0 truck PfpTZKfKeKY_1 truck PgBMaMqbYqA_0 motorcycle PgE6BAQmVQQ_0 umbrella PhFFfxYo2_o_1 dog PhJOcszed6A_1 car PhJ5rQ5VmeY_0 skateboard PhjPRYTcJwQ_1 car PhyQoxFlTMU_0 truck Ph8Vag9VxRU_0 zebra Ph8Vag9VxRU_4 zebra Ph8Vag9VxRU_1 zebra Ph8Vag9VxRU_2 zebra Ph8Vag9VxRU_3 zebra PiO6F4X8k_M_1 truck PiO6F4X8k_M_2 truck PiRy-T8d0gQ_1 skateboard PiRy-T8d0gQ_0 skateboard Pi_aEuQD5gA_5 umbrella Pi_aEuQD5gA_8 umbrella PjAWqdid4rw_1 umbrella PjBOLvrlicY_0 car PjBOLvrlicY_1 car PjBOLvrlicY_2 car PjTtsfl7KZ4_0 cat PjjO6IaSiuo_0 skateboard PjjV-pCjgqc_1 bird Pjk0d9eP2gI_0 dog Pjm-ptGWuWU_0 dog PjpGwiZ8mK8_0 bus PjuUsIXzSzQ_0 truck PjwfhUvbBNI_0 skateboard Pj9588RHCHM_1 car PkktNSL9IjE_0 bird PlKRGU_XIzs_3 boat PlfCXfMXcs0_0 skateboard PlfCXfMXcs0_1 skateboard PltDcKetGYw_0 knife Pl6ja9eNHzE_3 skateboard Pl6ja9eNHzE_4 skateboard Pl6ja9eNHzE_1 skateboard Pl6ja9eNHzE_2 skateboard Pml224S87BE_0 bird Pm_2At7P8Yo_0 bus Pnt2XmUpT8Q_1 bear Pnt2g-tHwK4_0 truck Pn1VFdKk5vQ_0 truck PoL9E8Yc2vo_0 car PoL9E8Yc2vo_1 car PoUPC9WCdiE_5 dog PoV7Wn66UTo_0 bird PolaH6r1Qds_4 truck PolaH6r1Qds_2 truck PpI7DZdWcfc_0 person PpZHxI0N3Wo_1 motorcycle PptqwylntWQ_1 boat Pp6vch1kMqE_0 cat PqJOWTjp0ww_0 cat PqKlF5nnOFs_0 motorcycle PqNDvGH2-iM_0 truck Pq7tfwAqhIM_0 motorcycle PrV4kyVAwWE_0 bear Prynn7mNQdQ_0 knife PsVhOsDIopI_0 umbrella PsfddppUmSk_0 skateboard PsgPXqr-N7A_0 skateboard PsvVwYAeKEc_1 boat PsytJKFxV8c_0 boat PszGWhekz-Y_0 umbrella Ps9ReRjYLVk_0 bird Ps9f-iFqX4M_0 skateboard PtL5k4ew4q0_0 car PtR7vRI9mn0_0 motorcycle PtVUPVUYld8_1 skateboard PtnFOxat4hE_0 bear Ptq7-B4P9Bw_0 skateboard Pt04IRhfVFk_0 boat Pt1vVuKH3fk_1 skateboard PuV7SV-FwOU_1 skateboard neA0T50G8TU_0 car neA0T50G8TU_1 car neA0T50G8TU_3 car newqX6GTbrA_0 car ne8K6jHnOT8_0 boat nfnKsQItZjE_0 skateboard nfxMe31pjec_4 truck ngAKsr62ACQ_0 knife ngOtFD7Fxd4_2 boat ngZtMG--t4I_2 bear nga4aEZQhJw_0 knife ngslQPG3kEI_1 bird nhI3C5y85gw_0 truck nhdMHfvazLY_0 umbrella nhoO0Evj7OQ_0 umbrella nhoO0Evj7OQ_2 umbrella nh56dQ3T3Mc_2 boat nh56dQ3T3Mc_3 boat niBK6HGH16U_0 cat ni3trEPOXck_0 bird njBEUyoUzlQ_0 bird njK1OLFCvv4_0 cat njMC5HAlnMU_1 umbrella njnGmGuXNdE_1 knife njn4TkIDn0k_0 truck nkJxMYiG9Ho_0 bus nkSvwnLvBmw_4 motorcycle nkSvwnLvBmw_0 motorcycle nkSvwnLvBmw_2 motorcycle nkVPvJ3Smrg_0 cat nkZ6NDOt4r4_0 cat nkv5eof4q_M_0 knife nlAePf94uwk_0 cat nlupdJzbyKs_1 bird nl83jp96h9s_2 knife nmmeE-Dfds8_0 bus nmwFYDopqBc_0 dog nmwFYDopqBc_1 dog nnIGNFEnlw8_0 car nnNkJ09YO9M_0 motorcycle nnUkcXbXbFM_0 umbrella nnhUxSjBHP8_0 umbrella noCrLkdGSXw_0 bus noGmFOxKIr0_0 person noIHydna8tw_3 truck nonoyrFpKVA_1 zebra nonoyrFpKVA_4 zebra nonoyrFpKVA_5 zebra nonoyrFpKVA_0 zebra nosbeVXMgAk_0 knife nqN2uJfit8o_1 car nqPkd_Quci0_0 truck nqWs5hqd8Ps_0 bus nqbsnsBZULc_0 truck nqnjh-NO9go_0 bus nq8oHNlU_BQ_0 truck nrJURcGigjE_0 motorcycle nrlcROgdPlI_1 cat PumbYcoJ5zE_0 truck Pu8rYMOC0Iw_0 dog Pu_KMtdCGZY_1 truck PvSzSsQ4YCY_0 cat PvuGk2XhJW8_0 bird Pv77ig8kBgE_0 cat PwBNm2_oKbQ_1 zebra PwE-w-S8nQc_0 cat PwRb6q11-rw_7 bear PwRb6q11-rw_0 bear PwRb6q11-rw_3 bear PwRb6q11-rw_4 bear PwRb6q11-rw_5 bear PwgxDMnN1SA_0 truck PwmBtcc64nM_0 zebra PwmBtcc64nM_1 zebra PxN14d54as8_0 truck PxOYpOxjFFc_0 cat Px02MS-Ywo0_0 knife PyevrWYsc8k_0 motorcycle Pyr-sHCH2wc_4 truck PyvyP3J13FI_0 knife PyvyP3J13FI_2 knife Py6rKt-beyk_0 knife Py-bAIGcQ1Y_1 boat PzZ-Jr7jMk8_0 bus P0S7eBa6_S4_0 dog P0e6zPkZO5s_1 knife P1_bfvyTku0_0 truck P2NRNopueuo_0 umbrella P2SgXG0mMWU_0 truck P2Wv0vXNCqQ_0 zebra P2kLj1DZq3I_1 bird P2kLj1DZq3I_0 bird P2ldC-_7nrs_1 boat P256TqMIJZk_0 dog P3MLJSbWlpg_1 motorcycle P3jB1tXpVMw_0 bird P3q6jIrZyo4_1 dog P4jpdzY2as8_0 dog P43doVXj3y0_0 cat P5DcP_VLnP4_0 bear P5Gd_8k2O5s_0 truck P5VAaJj-1Rc_0 dog P5kFeiFmPxw_0 person P5xsJqm2v6c_1 motorcycle P5xsJqm2v6c_2 motorcycle P5xsJqm2v6c_0 motorcycle P5yrLRVD86M_0 dog P6Qm9u9GIE4_0 motorcycle P72vKWjKtik_0 truck P741OzHLvig_0 dog P8BX8WSWRm8_0 bus P8K2yXmSMwY_0 bird P8MCMBcqM00_0 motorcycle P8MCMBcqM00_1 motorcycle P8h9iD7kPRQ_0 bear P80sglFzhRI_0 bear nsS9iSqNMew_1 bus ntO6br-N89w_0 cat ntVDuucoRIk_0 cat nuVxM9m1nb8_0 motorcycle nuVxM9m1nb8_2 motorcycle nvIi1SvX-sU_0 dog nvXKI_MhTTE_4 knife nvYTcYLFUvc_2 dog nvdIoQ5mj64_0 knife nvxwnGRXwZY_1 dog nxJkhdCqhc0_0 dog nxUe9yoeHvs_0 bear nxYGMvfgi8g_0 person nxj_aavOM50_0 boat nxmr9gg0ses_1 bear nx9Uisdggps_3 knife nx9Uisdggps_0 knife nyOaHbw3DLo_0 cat ny2pC-BfLT0_2 dog ny2pC-BfLT0_0 dog ny2pC-BfLT0_1 dog ny3nZLL4cQ0_3 motorcycle nzGPh9yFDTI_5 truck nzQqdKnkQ9I_0 zebra nzppX26-51c_0 boat nzytVTFaYvs_0 knife nzytVTFaYvs_1 knife nzytVTFaYvs_3 knife nz9DMQ9cPrw_0 cat nz_YTLNErSY_1 truck n0P8wVonqY4_0 motorcycle n0T51DP8868_0 bird n1VbuQk_3JY_0 bird n1ZrqU8VSBA_2 bus n2Xd8e_vz0w_0 cat n2Xrvmq2r2I_0 cat n2jvWkboChM_11 bus n2jvWkboChM_10 bus n2jvWkboChM_14 bus n3EKpxnV5U8_0 car n3bFZVLqNvI_0 umbrella n3iNRmzhO1U_0 motorcycle n3pRNFU0ovc_0 bear n3pRNFU0ovc_1 bear n38NmPI7Sss_0 boat n4cdQF8d8UI_0 knife n4mWuEmbbEM_0 bird n5J7UxAi_70_5 car n5J7UxAi_70_1 car n5J7UxAi_70_3 car n5J7UxAi_70_4 car n5i5aZXPgok_1 bus n5ojrsEczYM_1 truck n5wZ3Zin9uQ_0 bus n5wZ3Zin9uQ_1 bus n6cpTMT-Ci0_1 car n6sMWDd_j1c_0 cat n6wMhru1Mx0_2 car n7HaOXaXWJw_2 truck n7NWTiq_W-c_0 boat P9sfOBt9FI8_1 bird P95Pyq4kglE_0 knife P95Pyq4kglE_1 knife P-EecPZ9zV4_0 motorcycle P-JbMZ89Hac_0 car P-SIr3rYBzg_0 umbrella P-lf6syyjAs_0 cat P-tXkGlSa_8_0 motorcycle P_A56tkbbmk_8 umbrella P_A56tkbbmk_1 umbrella P_A56tkbbmk_7 umbrella P_un1_qBDWo_0 umbrella QATQMMA9vo4_2 motorcycle QATjEG1LPL0_0 bear QA4LOoc1Crg_0 truck QA__knfzZZM_0 bird QBZUbx6SUyU_2 bear QBbAz7q7E9c_0 bus QCDUv9KNiWQ_2 dog QCKzW_uA3vY_0 motorcycle QCl4OGNJdos_1 bus QCqvd4xHZLs_0 cat QCzgTA2cABU_0 boat QDQgSF9ciHk_4 knife QD4ioxu8LAk_0 cat QEMoyw7o_f8_0 dog QEQfoQOU_F8_1 bird QFB5gDukoqg_0 bus QGDhzG35q8c_0 dog QGDhzG35q8c_1 dog QGDhzG35q8c_2 dog QGDhzG35q8c_3 dog QGFSTul5MDQ_0 knife QGcd6O1NAkY_1 bus QGcd6O1NAkY_2 bus QGv8jcDgmBY_0 motorcycle QG25-t2CqY0_0 bus QG5tLrHw5Hk_0 cat QHVkPy7f680_0 car QHVkPy7f680_2 car QHhXgNBSjV0_0 umbrella QH2Vo_5h-x8_0 car QIe7ky6mJO8_0 bear QIqf221MKYo_0 bird QItwshU9sAQ_0 car QI65w7sMLtA_0 cat QJIgRLU_fU8_0 motorcycle QJfS9bR2S4I_0 cat QJsyPZ31U-0_0 cat QKG7PXh0UoU_1 bus QKG7PXh0UoU_5 bus QKG7PXh0UoU_6 bus QK9WWQe1WQU_0 bus QLTztdEJ8Ts_0 motorcycle n7dIhGKEzWM_2 boat n7hFNcaW9rw_0 knife n77hlwjlW_Y_0 dog n8IsRKE9S6k_0 motorcycle n8kFOAqnMao_0 motorcycle n9RozRHi7iI_1 knife n9RozRHi7iI_3 knife n9xiuvCd5Lw_1 bear n-fT4fcLulk_0 bear n-fT4fcLulk_4 bear n-gEIxTHjBk_3 bear n_EpRXVan0M_0 cat n_J23TUQdl0_1 bear n_PRUX4zrLw_0 car n_bIC-prc2E_0 motorcycle oARh23g1-LA_0 cat oAhYK7brhk0_0 dog oAhYK7brhk0_2 dog oBDdj5mkGyc_1 knife oBraEPvaSi0_0 bird oBuzx2dwA_Q_2 knife oBzhDbxL57k_0 bird oCUkN7ySpf8_0 motorcycle oCZ3WCK5BZU_1 motorcycle oCf-LgXx6Dw_0 bird oDHO9J7vFwI_0 boat oDUJYHwNuS8_0 bus oDsRL8dvgLA_1 bus oDsRL8dvgLA_2 bus oF81nMQlA-4_2 umbrella oGMlnXjD9R0_0 bird oGuIyQiDsy0_2 boat oGuIyQiDsy0_0 boat oH-XJADp0FM_1 bear oH-XJADp0FM_2 bear oH-XJADp0FM_4 bear oH-XJADp0FM_5 bear oI5l1By4H7U_0 car oI_peuU5xk8_5 motorcycle oI_peuU5xk8_0 motorcycle oI_peuU5xk8_3 motorcycle oJD17uQnW_o_0 dog oJK_TUb7HoQ_3 knife oJLVcOe7CEU_0 motorcycle oJervxxOCvY_0 dog oKTgwWf3FKA_0 dog QLxMt8F3oYA_0 cat QL4uK4sZxIU_0 cat QL-hkYCV0BQ_0 motorcycle QMEIKO8LcEU_0 motorcycle QMGNMAZLRFY_1 knife QMGNMAZLRFY_0 knife QMHCb6-qyQE_4 bird QMHCb6-qyQE_0 bird QMHCb6-qyQE_3 bird QMJHMIdkS0w_0 boat QMVKAdAOrNY_0 dog QNUGl2q9luk_6 dog QNVeq1dY-gY_0 bus QNV_xE7TePM_0 umbrella QNV_xE7TePM_1 umbrella QNaFT-Ch0Oc_1 bird QNgnQe-MASw_0 bus QNgnQe-MASw_2 bus QNibPLG3_Q0_0 dog QNibPLG3_Q0_1 dog QNibPLG3_Q0_2 dog QNrg73bCl7M_0 bus QN5joVuigKw_0 dog QOCUHjNieAs_0 cat QOGKQmMhYE0_2 knife QOQU7N2vIdQ_0 dog QOcPhbRnGh4_0 bird QOm8zog21wI_0 bear QOp31EvHfRU_0 cat QOs2s2r3hpY_2 bird QOs2s2r3hpY_3 bird QO1T0Gc_cJk_0 bird QPwnbNFbZyY_0 motorcycle QQAQLPTkDwg_2 bird QQAQLPTkDwg_0 bird QQh4Cpr7tpM_0 bear QQ7EaN8ArmM_0 motorcycle QQ-MUe-ni48_2 motorcycle QRXtuZBCXtA_0 umbrella QRZ_xQK1gx8_0 bus QRZ_xQK1gx8_1 bus QR3BO_SYrpQ_0 bird QR5EuXvYbms_0 car QSK1oOt_5R4_0 knife QSld_dZQvpY_0 bear QTPAOir-oYM_1 knife QThuW0gGa20_0 dog QTlzTtcPjwk_3 car QT0-oUhQtbk_0 dog QT17xRXmBGA_0 umbrella QVCd5pTgbds_0 boat QVRM0OueKFY_0 dog QVXv0Z1FCdg_0 motorcycle QVXzwEenImE_0 bus QWBwnViynQA_0 motorcycle QWFR4XdQv2Y_0 umbrella QWPkooq95So_1 knife QWPkooq95So_2 knife QWSsyFwwdO8_0 dog QWl839SnUOs_0 dog QW1BlOtH1bo_0 cat QXAw2xD7Sgc_0 motorcycle QXB7sLTVqfM_0 bear QXIGeVZ6Uqk_0 bear QXVQ8S7aUB4_0 knife QXjfaOwHSFo_1 motorcycle QXwh-lAa3Pk_0 knife QXwh-lAa3Pk_4 knife QXwh-lAa3Pk_5 knife QY2pVib4cZE_0 motorcycle QZOPux7sysI_1 dog QZOPux7sysI_0 dog QZhaeUKdGYk_0 motorcycle QZpfX1aipco_1 car QZui5buTy7k_0 bus QZ3FD2qszF8_0 motorcycle QZ3MWq6qwJI_0 bus QaGjoVfIWLQ_0 motorcycle QaM6ny5gEFQ_0 cat oKY-KsLfJe4_0 bird oKY-KsLfJe4_1 bird oKbCNTwLJoI_0 dog oKe3Rcvn_TU_2 cat oK9TjDSQdSs_0 cat oK9erjaiRq4_0 bus oLRDfgRIJ-A_1 bus oLSjl-qN4M8_0 dog oLrou9S3K-0_1 motorcycle oM_FQGUvPIk_1 motorcycle oNFmLa8pU3A_0 knife oNLkf1j-v6Q_0 cat oNZOg6XoSrY_1 dog oNbWPkOIdxg_5 car oNbWPkOIdxg_4 car oNyfqJGJhrY_0 motorcycle oPhE3ECqxf0_0 bear oPlhh62giKI_0 car oPrG5_acHVU_2 bird oP0yHq-dlRY_0 motorcycle oQV827pXDXA_0 motorcycle oQXdls5ffZc_2 bear oQXdls5ffZc_0 bear oQXdls5ffZc_1 bear oQ7ARK51eHE_1 dog oQ7ARK51eHE_0 dog oR-7d677bYw_0 motorcycle oSPVZs6_Bd4_0 motorcycle oSVes8uNT5E_0 motorcycle oSao8txZd7A_0 motorcycle oSb17xrITtY_0 motorcycle oSqq5UHBveo_0 bear oSxoAvNHNB0_0 motorcycle oS60CV9BFs8_4 bear oTYr-qD5JOE_0 bird oTj1e8RI67A_0 boat oTlwKNdm3rE_0 dog oTuVBf1jiPM_3 bear oTuVBf1jiPM_0 bear oUHa0FV0wwM_1 dog oUVJrf3WBrs_1 bus oUVJrf3WBrs_3 bus oUuQYVAvtgs_0 bird oVUE-0XhhsQ_0 car oVUE-0XhhsQ_2 car oVUE-0XhhsQ_3 car oV1vhE0ypUE_0 cat oV6wthYHnKA_3 knife oWFO_yss01s_0 cat oWI2O83zUJk_1 car oWI2O83zUJk_0 car oWYSJgX0THI_1 dog oXMW3YjDAqQ_2 boat oXaieymppqU_0 cat oX4YRc-No7Q_0 dog oYY_svQfTs0_1 boat QahJqWjC1v0_0 motorcycle QakBz4K6hqw_0 umbrella QbHAXTRKk8w_0 knife QbHAXTRKk8w_1 knife QbNU92uEUSc_0 cat Qbk_YIfY5q4_7 knife QcLZ-b-0PxY_0 boat QcU2S6m_GJk_0 dog QcuHNJWb-AY_0 car Qc0kbcpophI_0 car Qc5ZW-ni9ZQ_0 boat QeRfpcI_TTQ_0 bear QebJi8pjWkk_0 car QeeG_4eNyg0_0 dog Qe1-M3oVaFs_1 knife QfOdxYnCAKc_0 bear QfOdxYnCAKc_2 bear QfaVCQOGlMM_0 motorcycle QfgJh_s9H0I_1 bird QfgJh_s9H0I_2 bird Qfr5Fc1k7Ic_0 knife QfwCa3YapRg_0 cat QgRbpAz8TuI_0 bear QgRbpAz8TuI_5 bear QgRbpAz8TuI_2 bear QgXjMUMIe4Q_0 cat QhbwOw5dHPg_0 cat Qhc3Bb_6Uq4_1 motorcycle Qhc3Bb_6Uq4_0 motorcycle QhnEXqWFBuw_0 bird Qhxv39Tkzbs_1 dog QiHJ2uYByjM_0 motorcycle QjV-g1D6Be0_0 motorcycle QjV-g1D6Be0_3 motorcycle QjV-g1D6Be0_1 motorcycle QjV-g1D6Be0_2 motorcycle QjdGUh1FtN4_1 bus QjqhhoIx6nQ_0 boat Qj4Mfd45GOE_3 bus Qj4Mfd45GOE_0 bus QkPH2LBso5c_0 umbrella QkPLEWaH1bo_0 cat QkkuZ_G7t48_0 boat QkwI5-_QspU_0 cat Qk6G7eAHlCs_0 dog QlcaO8pkzd4_0 bear QliTvc637Yk_2 boat QlieDL9xPyU_1 motorcycle QlxQKy1yzyI_3 motorcycle QmP4xj9S0mQ_0 motorcycle QmR3bvWDA1s_0 boat QngGa73C1G8_0 cat QnnV6lKKIgI_1 knife QnuD7a8BM30_0 dog Qn9CU5O4FHU_0 bus Qn9Z0LVIxbo_0 car QoTopiP9k2o_2 bus oZLdU13R4uU_0 motorcycle oZoTyJNjCJI_0 bus oZ6Py8Tx-sA_0 dog oZ9qkN9Q1X4_1 bird oaXGm1MdDoA_0 cat oajaYAOs_oI_1 knife oa_73oVbH38_0 bird oa_73oVbH38_1 bird obbzKGrHOP0_0 bird ob70dcN35yg_0 bird ocNVbpQhB5g_0 cat ocPgZeXuFqs_0 car ocj3mV2T-ls_1 bird oc4RRoFoUo0_0 boat odsCgfz0yM8_0 motorcycle oeIBPeBAEv8_0 dog oeVUkEvC3To_0 boat ofDmsqy24k0_0 car ofJOKOICGco_0 motorcycle ofvHImJKiAg_1 bear ofy3Sid451s_1 bear ogIewcLFxLo_0 dog ogLOXI-Kvcg_0 knife ogzWVQ5TC80_0 cat oh7uEf_YE40_1 dog oiItk_51540_5 motorcycle oiKC4SxYNJE_0 bus oiRnmB7WQjQ_0 bird oiu_53B5AAc_0 motorcycle ojFBoKltgfQ_0 bus ojFBoKltgfQ_1 bus ojFBoKltgfQ_2 bus ojQfL_XgMM0_2 boat ojz2xLrH-Ts_7 car okKrvzNb9IU_0 car okiIzmV8YLw_0 cat okiIzmV8YLw_1 cat okzrd8v1G-w_3 boat omGx_muz0SY_1 boat omngVtTFM1I_0 umbrella oms2XkgghV8_0 boat QoqeX-W0RFw_0 boat QoqeX-W0RFw_2 boat Qo0mxFOMVGc_0 dog QpAWeYA1pc8_0 car QpDm5g1dELc_0 bus QpD7CVh2Z_c_3 knife QqdW9IMDHgs_0 boat QqdW9IMDHgs_2 boat QqdW9IMDHgs_3 boat QqhZnuITXs8_2 bird QqhZnuITXs8_3 bird QqkblYN1YOg_0 bus QrEjYyinITM_0 car QsQFhUd04jI_0 motorcycle QsQFhUd04jI_1 motorcycle QsV9BTogrKc_0 knife Qt78_24lkeM_0 boat Qu8xNQ6Vd04_0 cat QvgmjwKuAeM_0 umbrella QvqNodq3NxA_3 bear QvsjDkJ_oho_0 cat QwALBOsUby0_1 knife QwYxgsacjx0_0 knife Qw9UvjSO9_Q_0 bird Qxx3WjrGmtE_2 bear Qyc0xSSPT1E_0 dog QzCvBtKWPjg_0 person QzPFEeJYDcE_0 umbrella Qz1R2sk37qg_3 bear Qz1R2sk37qg_5 bear Qz1R2sk37qg_6 bear Qz1R2sk37qg_7 bear Q0HX6Jfnnb8_0 bird Q0J1QbF_Vis_0 bird Q0KhMTnvbxM_0 bus Q01P6P7bm7E_0 motorcycle Q0-7SsSXMV0_0 knife Q0-7SsSXMV0_2 knife Q1RqyDERgxM_1 bird Q1VXWNHzPqI_1 cat Q1VXWNHzPqI_2 cat Q197NAaQodY_0 dog Q2Sop28spdM_0 knife Q2bha73kLKM_0 motorcycle Q2vBCDtNAGI_0 motorcycle Q2zRXVl7bLI_0 motorcycle Q3ZxsgPKTGY_2 bird Q3ZxsgPKTGY_3 bird onoO4tamBlA_0 knife onpRejbK_VE_0 umbrella ooJg7-nxmUw_0 motorcycle opOHceUyoXk_0 cat opb_qoqO05s_0 bird oqUbqkDsSzI_1 knife oqvnxRx-0J4_1 bird oq4KPP5PYAo_1 motorcycle orQkUDPfTg8_0 boat orTFjuPHzxU_3 dog orcE_uPKO_c_0 bird ormZXNXni-U_0 dog osYgSn6yOG0_0 cat os3H6KzvGEg_1 knife otHFt4YAKeI_2 dog otvQKWvIXAE_0 bus ouFwG2YU59c_0 motorcycle ouNsmVT6GRU_0 car ouqFEe0ud_U_0 motorcycle ovHCJGK35r0_0 knife ovHCJGK35r0_1 knife ovQY7VA36gU_0 bird ovRBelXjQ-A_0 bird ovaFSf6jda4_1 boat ovnkb_MuAlg_0 bus ov9yaGUtSEw_0 bear ov9yaGUtSEw_1 bear owKiuZVov4U_2 dog owaIraEDvqI_0 umbrella owaIraEDvqI_1 umbrella owb-43QL8Dc_0 cat oxKhcqfQV7k_0 umbrella oxZ42ECABUo_0 motorcycle oxdCJK5GPS8_0 dog oxyS9oNIBaQ_2 boat oy52khlb79k_0 cat oy885M8rmDM_0 bus oy_Efqu_Zhk_0 knife o0CsAQaDp1k_0 boat o0VArHW9gpE_2 dog o0yyk1GchoE_2 knife o06poedEjtM_2 knife o1RqDbHx0IA_0 umbrella o12Lc5yZNco_1 bear o2E2ypLvzOo_1 car Q34_kBWh3QU_0 motorcycle Q4IH3ZOVKFQ_5 bus Q4TELEHdcjA_0 motorcycle Q4YD_lW8JFE_1 knife Q4afI-fku0A_0 knife Q4d0z-q-UXQ_0 bird Q4jZeoLzZXs_2 bird Q5DrYh7pcTg_0 cat Q5RabF9bK3o_0 car Q5cY3mt9NHI_1 car Q5cY3mt9NHI_3 car Q6Lg4c8W2XQ_0 bus Q7SXsNoT9cc_1 boat Q7TDTHQoPGc_0 bird Q7TZ3TlDNzI_0 bird Q7V8JjnLW_A_0 person Q7a4tWAU7-o_0 dog Q8gHTSzR6h0_0 cat Q807ZgwscUk_0 cat Q9LvGsq1Mas_2 bird Q9fbeFbARPY_0 bird Q9qA-2ofuFc_0 dog Q9qA-2ofuFc_1 dog Q-JQokKqXZM_0 motorcycle Q-STF8c8RSE_0 motorcycle Q-S6ypfxn4w_1 bus Q-VqbNMPAjE_0 dog Q_a7bRv2dM0_1 cat RAQAfTprH5s_0 cat RAc8MyscjAA_4 bear RAc8MyscjAA_0 bear RAc8MyscjAA_3 bear RAqMmf5FS_Y_0 dog RBNNklw-NjE_0 car RBNNklw-NjE_1 car RBdpxD5mMy8_0 cat RBssHo0ygdI_2 car RBssHo0ygdI_1 car RBvocl1t9qM_0 car RBvocl1t9qM_1 car RCzBVv_Vddo_0 dog RC444E40nLY_0 cat RC_ckl7o7sc_0 dog RDq9wvYEiSI_0 umbrella RD8OUO8u7oQ_0 person REBpFtJosSc_3 bear REBpFtJosSc_4 bear REBpFtJosSc_0 bear REbm5i5vhcQ_0 umbrella REbm5i5vhcQ_1 umbrella REiwqNPkmew_4 bear REiwqNPkmew_3 bear REjT99mHV_g_0 cat RFIE-agz3SA_0 dog RFUZkHtGWvg_2 bird RFUZkHtGWvg_1 bird RFZG72_XG3U_0 motorcycle RFcz2p3w1oc_0 bus RFhEq5WF9Io_0 motorcycle RFqSKdzXQFQ_0 bus o2z2zu4L1Ho_0 cat o3OdAgJnYlw_0 umbrella o3TpeQ7mhIQ_0 bear o4It_gqHKoM_0 bus o4It_gqHKoM_4 bus o4It_gqHKoM_5 bus o4bpCoFINtY_0 bird o4yKF7ZQge8_0 cat o4yxnKhoWrQ_0 cat o49yvv0vmJQ_0 knife o5TWf69h978_0 motorcycle o5bJmNSZmGE_0 cat o6vw6_1pc_g_1 person o6x94jhuMEw_0 cat o7UXYGmFww0_0 knife o8BqJTsAjnI_0 boat o8BqJTsAjnI_2 boat o8Gr9wZzcA0_0 knife o83uI_tdkrE_2 car o9UpoUWgJWw_1 motorcycle o9YqiVSTBVs_0 motorcycle o9qB9kYt9Bc_0 motorcycle o9vRwcqz30w_2 bear o98cAmKOAtk_2 truck o_BpJHlv8bY_0 cat o_NYHfqWzBw_0 cat pAP3j2UmTAA_0 car pAuz372kMrs_0 boat pAvBjM_cSCk_0 umbrella pA_f-DZ2FdI_1 bus pBj4KFDTwGg_0 cat pCPwOGObTcs_0 umbrella pCXmnj6vY7o_1 knife pCa3Tf27TcY_3 bear pCdwcy8npiE_2 bear pCfA0E-TIXo_0 motorcycle pC9mu-CQ9fg_0 cat pDjjH1_G6Z0_1 motorcycle pDjjH1_G6Z0_0 person RGT-FumEK7I_0 car RGXgv5gqM8k_0 umbrella RGiE9-CME30_0 motorcycle RG6y27UUUMI_0 knife RHHOcUqVF80_0 knife RHSfZLRz95o_0 boat RHrnX__15lI_0 car RIBigSX5_90_1 bear RImslgwYbYk_2 boat RIwUvnURoqs_0 cat RI14PaJgb7E_0 umbrella RJ95URcz63g_1 motorcycle RJ95URcz63g_0 motorcycle RKZ4YVnDywQ_0 knife RKa1tJXFTAw_1 cat RK8ZJaF2QHQ_5 bear RK8ZJaF2QHQ_6 bear RLP9M0bfpWo_0 umbrella RMapunE2wEc_0 boat RNPKsQSr2o8_0 knife ROfxuPZWET8_2 bear ROkJ79Y9T7s_0 motorcycle RPJ0SJeC5ck_1 car RPJ0SJeC5ck_2 car RPWms_VL6wY_0 bus RPhdhEKBBAM_0 motorcycle RP81F6rIP4w_0 motorcycle RQ5liX_fOJw_0 umbrella RREV1E0Mbhs_1 knife RSXIvkOJQq0_1 knife RSq71vJH9yc_0 bus RStmsJCm7mo_1 car RSztnKS1IYI_0 car RTTysK1hBpg_0 boat RTvVXaA35DI_0 motorcycle RT0tTVP14XE_1 umbrella RT0tTVP14XE_4 umbrella RT0tTVP14XE_6 umbrella pFCVfOX_UJ0_0 umbrella pGJMt9Jmk_Y_0 car pGnZDXcCjSc_0 bus pHC850dBc-E_1 car pHf0EP0QU9Y_0 cat pHueI1IUqzg_0 car pIhqwiD8cks_0 bus pJXxn2DRWyI_0 bus pJYetmKuiE0_4 bear pJj28cMLcZc_0 knife pJl14EZ6-Mc_0 umbrella pKPRv5lL_DQ_1 motorcycle pKz_g-J2O-A_1 bus pK1umZxS4nE_0 knife pLEV-uFmv6I_0 cat pLI_HgRsRow_4 bus pLQDtquQaSE_0 bear pLp7vmowqNs_0 motorcycle pMHRlQ2NxeA_1 boat pMaT7qWMaV4_1 bear pMg2xwjkfVc_4 umbrella pNHKmiurxTg_0 knife pOCvwILBOCY_0 boat pOjuNMevoaM_0 car pOq6RrgrXWY_0 motorcycle pPyL4U8gYpM_0 cat pP22coNl6r4_0 bus pP5q-Bszfh0_0 motorcycle pQMkOOTP0Lk_0 cat pSJypg6az1w_0 bus pSjKd_x9ycU_1 boat pSz961UYSrY_0 motorcycle RVvfyYc8jws_0 umbrella RXAW31Vm7pU_0 motorcycle RXQ-E6_Y__c_1 car RZAlTTj0Z4o_0 motorcycle RZAlTTj0Z4o_1 motorcycle RZL2H_-y3vE_0 umbrella RZrAehHE8aA_2 knife RZrAehHE8aA_0 knife RZ0yQkyeSd8_0 boat RaZy_JiiJ3E_0 motorcycle Ra48MJPLmUw_2 motorcycle Ra48MJPLmUw_0 motorcycle Ra48MJPLmUw_1 motorcycle RbQTcoldE8M_0 bus RbRqkcC6l_A_0 knife Rb5tGSqtlFU_1 motorcycle RcSm0O0Ylc0_0 cat RdNjlTlNbEA_0 bus RdP6hW5p6ys_4 car RdUjywh70lM_1 cat RdlWUo9fYmA_0 motorcycle Rd4TvDZNwHs_0 umbrella RfNyu5aooJs_0 car RfrtTbza00c_0 boat RgBWTOo9hqo_0 cat RgC0rdZCy2c_0 motorcycle RgFR8z8IzAQ_0 cat RgUwlXzmX4Q_0 boat RhYw3jSi0xY_0 bus Rhqz5maRjNs_0 cat Rh0zI8vpRWk_2 knife Rh7Y69j41EY_0 bus RiCptCjnrqk_0 cat RiOw5wO0xTg_3 knife Rid6twPtgIo_0 cat pTGbMPGsbCU_0 car pTSbrP23T0s_0 motorcycle pVCT-jEaSPE_1 bear pV8hPodV-zY_0 motorcycle pXBltXzZZe0_0 car pXcoix_wq4E_0 cat pZC4kceO-0g_0 bus pZJDlV5VS3Y_0 motorcycle pZ7RohF8JgE_1 knife paF1hQf-YFk_0 boat palM4nIm6GU_0 motorcycle pba0HVNnmbc_1 motorcycle pcOsY0MSbh0_0 bus pcb_jPcg_U8_0 bus pcpHHo_gp-Q_0 cat pc2aHxzJDtQ_0 cat pdDVE4LsX54_4 car pdDVE4LsX54_0 car pdDVE4LsX54_1 car pdDVE4LsX54_2 car pdDVE4LsX54_3 car pdDVE4LsX54_5 car pd0IEWCwpUY_0 bear pd1BZjvbFNI_0 knife pgKdcFb2680_1 motorcycle pg4m5Fi0Mhc_1 car Riq87Q_unPU_0 cat RjDo0UDX9Ws_1 knife RjItZnZQBKk_0 car RjqDxu3wf5o_0 cat RkSzsg-k14I_0 boat RktoQu-Wk0M_0 cat RmFxIMl1tSU_0 bear Rmpv0oMhUCc_0 bus RnEWcQNxWGY_0 motorcycle RnPY8wgKxj4_1 cat RnQ-v8AJQbc_0 motorcycle RnjU70B_0cU_0 bear RpTRF_oB1-I_2 bear Rpn1EcI_ESo_0 knife Rp8euBdhkR0_0 motorcycle Rp8euBdhkR0_1 motorcycle Rqs856i0jbs_0 umbrella Rrj0e5VSIgY_0 car Rsw947loMaA_0 cat RtSEfWF3PdI_1 knife Rtng6SCToEM_0 car RufUHX-TjyM_0 bear RvHvTQC9Kr4_0 bear RwC5kkt5VDU_1 person RwC5kkt5VDU_5 motorcycle RwVgY7zgnYM_0 knife RwVgY7zgnYM_1 knife RwYiNSlAYcE_0 car RwpY0u7t3vE_0 umbrella Rwp_dTfFI28_4 boat Rwz5T35lNgY_0 cat Rw5dzv79c-M_1 motorcycle RxLwy_iZqKg_1 bear RxWhDOyHYNo_0 cat phJS1iN6HFo_0 umbrella phTyZcbKeQw_5 bus pihR4mhfwxM_0 motorcycle pim0lzR8i1g_0 cat pix5Cxt_fUM_3 knife pjgi60dJalw_0 car pjgi60dJalw_1 car pjhNnA0142Y_0 motorcycle pmszdloBDwA_0 bear pmszdloBDwA_2 bear pmszdloBDwA_5 bear pnMd28rPX7M_0 motorcycle pncTBxEM4WM_0 bus pnjPhdpuKGc_0 motorcycle pn0ZChK2ASs_0 bear ppAj6dnl62Y_0 knife ppAj6dnl62Y_1 knife ppJXGy7snUw_1 knife ppwjIgwParM_0 boat pq1swOh85gc_0 boat pq1swOh85gc_2 boat pq1swOh85gc_1 boat priwWNrQnkI_1 bear prwglbuvyZ8_1 knife prw0IWDYBUM_0 cat pr3LOwTWNnk_1 bus psOuOLCJNk8_0 cat psTqTt0np_I_11 bear psTqTt0np_I_3 bear psTqTt0np_I_6 bear psUASBNRwIE_0 car psUASBNRwIE_2 car psUASBNRwIE_4 car ptCx-L_n2Yg_2 bear ptNC5ou_rOQ_1 motorcycle puZUIBS4Ceg_0 cat puw9BfAKOHU_0 bus RxiBbfFH3is_0 knife RxiE2beIvjQ_0 bear RyWLXS1Vrco_0 knife Ry4q0UokRjo_0 motorcycle RzWczJnyzmg_0 cat RzWdM4_lg2c_4 bear Rzj5xv434WA_0 bear RzrQOptkjFM_0 motorcycle RzrQOptkjFM_1 motorcycle R0hj1kAnMgs_0 car R0w6j1wmwo0_2 knife R0w6j1wmwo0_3 knife R1Fkwaa8CxU_0 motorcycle R2FlyNrjZBQ_2 boat R2FlyNrjZBQ_1 boat R2Fps165H9g_2 knife R2XiIC1qbAM_0 bear R2YmjDNC8oo_0 bear R2duXYQhnFA_0 car R2sy6qbPc4c_0 car R23ZSmBA2Rg_0 knife R3zhr1iboG0_0 bus R4ktPNCb564_1 bus R4vLajpLSMk_0 cat R5CBlOfUL4w_0 person R5cIoEcqZ9E_1 knife R5r3AIx_BoU_1 knife R5r3AIx_BoU_2 knife R6PuHPDiwPs_1 car R6f_t-MqO_s_0 bus R6tsNuvoTus_0 car R6uZ5JpxQ88_0 cat R6wk6JHQSeI_0 knife R6wsV6cYN_w_1 bus R7w-mdDyhG8_2 knife R8TV702EIqs_0 knife R8j0mjQR4lI_4 boat R84Bj4PKOvE_0 bear R84Bj4PKOvE_1 bear R9LK4x3pO0Y_0 cat R9L1I9EEE0g_0 motorcycle R9zDzUslz9g_0 car R9607CioN3U_0 car R99fGQRB6rM_1 car R-UGxl6KGoo_1 bus R_LEKDTlVvs_5 boat R_NxqXdz3RA_0 car R_UPR78XIvA_0 knife SAFptHT-UpM_1 boat SAFptHT-UpM_2 boat pvrO7c2imos_4 car pwgqJO3yKHI_0 cat pwwdlKxLCqQ_1 knife pxBtDlmwesI_0 car pxIlEGkEw5U_0 cat pxwl3iVkx08_0 boat pyAuY2v2U0I_0 cat pyTXP2GZRuM_0 knife pyTXP2GZRuM_1 knife pyTXP2GZRuM_2 knife py0K3KEYfjA_2 umbrella py0K3KEYfjA_4 umbrella pzZvI_g1S8M_0 motorcycle p03u2BJIvyE_0 bear p03u2BJIvyE_1 bear p1p9QUFIi_8_0 bus p1_thBtA2-g_1 bear p2pRN03gXFk_0 cat p26eBX5AGCo_0 boat p3MF-uxvtWk_0 bear p32jOqTS5ec_0 cat p4MmW7gFlLI_0 motorcycle p4MmW7gFlLI_1 motorcycle p5NxEAfgmro_0 motorcycle p5bLvlU8ua0_0 motorcycle p5lUPYsz-HE_0 cat p5vt7l9pW-0_1 motorcycle p5vt7l9pW-0_0 person p5_O08ZNK_c_0 motorcycle p6GkhJZsCi8_0 cat p6Rtu645O08_1 motorcycle p6Rtu645O08_0 motorcycle p6dBx3tBRr4_5 bear p6dCoZRaQOA_0 boat p6dCoZRaQOA_1 boat p6dCoZRaQOA_2 boat p7OlEbiu5to_0 cat p7WwUD62qfY_0 motorcycle p7gjVQyX07A_0 cat p7pnYAaDqPI_0 umbrella p7sHze5SC0g_4 bear p8MEDllYMKg_0 cat p8RUtiaGu5U_0 cat p8ZUCNMnKpE_0 car p89fuT8e_zk_0 cat p8-8JqAgtv0_0 motorcycle p9XjLjpQX-8_0 cat p9by0qLqHOQ_0 knife SAkHT1Ozg1c_0 motorcycle SAkHT1Ozg1c_2 motorcycle SA1Tb1XbngU_0 cat SB1UBp1PVf4_2 bus SDKsL-L7GbI_0 knife SDbe9JVnITk_0 knife SDk3Y3jzalg_0 knife SEp92WMharw_0 bus SExW2mVb1Mc_2 car SExW2mVb1Mc_0 car SExW2mVb1Mc_1 car SE5Rg8Qpb8c_1 knife SFB2FGuZb6w_0 motorcycle SFMc-UCkcT8_0 cat SF8c7EeFPPk_0 motorcycle SHcJfBJBQe4_0 bear SHxyKRdKRc8_0 cat SHxyKRdKRc8_1 cat SH1noq6GrKw_0 knife SISqo1FBefA_0 bus SIbLAYX2J_A_0 bear SJAZnOnRtag_1 bear SJsxWsiEuTg_0 motorcycle SKNl4frouUY_1 knife SLEOr8bmm2w_0 motorcycle SLEOr8bmm2w_1 motorcycle SLzqvins4p8_0 bear SMYpv_Ea3w8_0 person SM6BtnyDz5w_0 cat SNZ0xGGmZvU_0 knife SNhnfqJHoI4_0 motorcycle SNl4Gq_2aVQ_0 bear SNrosAtwG2k_4 bus SOYkQc-toMU_0 bear SOYkQc-toMU_2 bear p-J0yyoF0lU_0 motorcycle p_C9Zwt3N5c_0 umbrella qAJSLnflSrQ_0 cat qA5rC8MxCoA_2 bear qCzILENpEWk_0 boat qCz4ft26CAw_2 knife qDobzjbo_aM_0 cat qEcNn2_TQC8_0 cat qEei5YCRiHA_0 car qEj3r8dtvKg_0 boat qE5fKHWTLMw_1 bear qFR-yuWiHVk_3 knife qFR-yuWiHVk_4 knife qFwugOO0pC0_0 knife qGjYX-iNrPE_0 boat qGohF2oMPS0_0 motorcycle qGxfRwBmBEc_0 motorcycle qGxfRwBmBEc_1 motorcycle qHKwI-35nNU_0 motorcycle qIIu-MIIYIE_0 boat qINDYDOlPLA_0 motorcycle qIPydTwqwmI_3 car qIPydTwqwmI_0 car qIPydTwqwmI_1 car qIPydTwqwmI_2 car qIkNPwKd6ag_0 knife qIkNPwKd6ag_1 knife qInP3tWVtWE_0 cat qJMxoAbx9YU_0 boat qKxQVpaLChg_0 bear qLfa8e4ffQY_0 bus qL6LVXg4Vt4_0 cat qMEMl1FFVIM_2 umbrella SPRByN4TiFg_1 boat SPsOjXxZymk_1 boat SQ_ChhUwWng_0 bus SRUB2kzDBTk_0 person SSFOqr1ARgI_1 umbrella SSaN8vntuYs_0 bear STTRwCtQ8_8_0 boat ST6aA292Pos_0 motorcycle SUMc-5fiNzQ_0 motorcycle SUnPNgAE_ho_2 boat SUyRs3xvc9c_0 cat SVBc-W37yW0_0 umbrella SVSMGxy8Z6I_0 cat SVXaBPnNWO0_0 knife SVXaBPnNWO0_2 knife SVt7vQ8LYZU_0 bear SV70cwNA6o8_0 knife SWJyq_mITbE_0 boat SXmy9BLHr84_0 bus SXvXN3waFWs_6 bear SYCg5NuWc60_0 motorcycle SaHw7yyoeJg_0 cat SaSgclGWGwE_1 motorcycle SaSgclGWGwE_3 motorcycle Sa1iRLR4d_c_0 bus Sa4L2rdyD10_0 knife SbWCXCuXBqY_1 bear Se3XbBA4N4o_3 knife Se3wtx4DzwE_5 bus Se3wtx4DzwE_1 bus Se3wtx4DzwE_2 bus qMlYXZy1Tow_0 bus qNfS9Y5zs-Y_0 car qOaABf_zb9U_1 boat qO7qHolBYj4_0 bus qO8D0E7MjOI_0 cat qPGkJRPae6A_0 bus qPMDgkgSTnA_2 motorcycle qPaox7otsVI_0 knife qPwAWEtJBqA_2 motorcycle qPwAWEtJBqA_0 motorcycle qPwAWEtJBqA_1 motorcycle qPyR7CpZ6l0_0 knife qP88t7GfZc8_0 knife qQaIW7IjCZo_3 motorcycle qQaIW7IjCZo_0 motorcycle qQaIW7IjCZo_1 motorcycle qQdtuBd-SgI_0 knife qQlsMjenbfE_2 knife qQ5tf8s7KrE_0 bus qRO6U_tg6SE_0 cat qR4kw8rf-FU_0 motorcycle qSQGG-K89mg_1 knife qSgOYqBt_8k_0 bus qSnoKy6T22k_0 motorcycle qTKtODdEZIg_0 cat qTut_O_LppA_0 bear qT00uOC9JpQ_0 car qUuTEKdKNNg_0 car qU7DT4ipQHw_0 cat qVSnhT0Luh8_0 cat qVyAlx4rMTo_2 bear qV7U9CRjZGI_0 cat qWN8i7sJyVg_4 umbrella qWcXQWy7yw8_1 bus qW-zRq8VTV0_0 boat qX8RcjE0tjs_0 motorcycle qX-YEHlu0Kg_2 knife qZWxhCk8AX0_0 knife qZf1fw737A8_1 car qZyxILyLOv0_0 knife SfZLu5uG7mc_0 car SgDdyLB3fFo_1 motorcycle SgHH9KN_nkY_2 motorcycle SgOvlqqKbEI_0 bear SgSsk-eeClA_0 cat ShHLzcBozxo_1 boat ShPl28Zw1kU_8 car ShPl28Zw1kU_3 car ShPl28Zw1kU_7 car ShPl28Zw1kU_9 car ShaLoFJZv-M_1 knife ShhC84AwZ04_0 bus Sh6uHJRUnP4_0 cat SiSP3Kko4VM_0 bus Si3psXQA46c_0 bus SjLNVLIdpbc_0 cat Sj0pcvct_3k_0 motorcycle SkLwUmczAMo_2 knife SkLwUmczAMo_1 knife SkVIH0IZI1I_0 motorcycle SlBZM22tlSU_0 knife SlIzgQZ63h4_1 knife SlWmnHWeqIE_0 boat SlYqzpZkWho_0 bear SmCvuBfyU5o_0 motorcycle SmCvuBfyU5o_1 motorcycle Sn8nb_cv5K4_0 motorcycle Sn8nb_cv5K4_1 motorcycle So5dCmgNRtU_0 bear So-dFj7N07Y_0 car SpGfQe7sWIQ_0 motorcycle SpuAy2Z1ejE_0 boat Spx8fHkY0Ac_0 bear SqUzKvBRVmQ_0 cat SqkoepvLN3c_0 motorcycle SqkoepvLN3c_1 motorcycle Sq-LvVdVwhc_4 bear SrBwCHcEe4g_0 cat SrPgW-L7Gps_0 bear SrTxMAryank_0 knife SsQb12lMU_w_1 car SsQb12lMU_w_2 car qZ0egYy10zs_0 cat qaKYHGIZ8tU_0 cat qantWNz3Z-k_0 bus qc1U41zjMfI_0 knife qeSfa-Xin3s_0 bear qfZHHSjai5Q_3 motorcycle qfZHHSjai5Q_5 motorcycle qfZHHSjai5Q_0 motorcycle qfZHHSjai5Q_4 motorcycle qfZHHSjai5Q_6 motorcycle qf4dZ323eu4_0 cat qf5FQP-vjpY_3 bus qgYBD0GBerg_0 knife qglTXvFe5vw_0 motorcycle qgr1pdkQkKM_1 knife qhTOaoL2B54_0 bus qhgQ0_y6Jr8_0 motorcycle qhyihSkbubs_1 bus qiW4cUVZCJA_0 motorcycle qjfkIHC3sNA_0 bus qj1y76m_WFg_1 car qklXdTo1CKQ_0 truck qlGmmBY7ITI_0 cat qlGmmBY7ITI_1 cat qlfCKWLj_xU_0 boat qlvwUVksAC4_0 cat qnaQOGGmyhI_1 motorcycle qo2tG-wOpLI_3 car qpBRU2SONe0_4 bear qpNPlLO7Wdo_6 bus SsWwZCQR8pA_1 bus Ss6lM7iutJ0_2 boat Ss-ENa079_Y_0 car Stg0xs4yv5A_3 bus Stg0xs4yv5A_1 bus Stg0xs4yv5A_2 bus StoHoHg6XHo_0 motorcycle SuoVrAXkHsM_1 boat Sv-Xsjm8Seo_0 boat Swfda4hcQzo_18 umbrella Swfda4hcQzo_0 umbrella Swfda4hcQzo_3 umbrella SwrxLGIVuNg_1 bus Sw01FqLPH0o_0 motorcycle SxxBAhDGWzU_1 car SybtH9db7tI_1 boat SybtH9db7tI_6 boat SybtH9db7tI_0 boat SybtH9db7tI_4 boat SybtH9db7tI_5 boat Syk5Jc9_tQA_1 boat SywBQoMh8Q8_1 car SzD0AW8MKxY_1 car Sz3ay4xexe0_0 motorcycle Sz3oWSS6V3s_0 bus S0AoM2Xz64Y_0 motorcycle S09dKnW798o_0 cat S12WKCebYHg_0 boat S2YoTKzOHW8_0 umbrella S3O_xjPQToU_0 knife S4lNN0zJE4A_0 cat S49Hdfpc-SI_1 boat S5VjgUVKjV0_0 cat S5Z4g_SORHc_3 knife S5Z4g_SORHc_4 knife S6crKzUWKYI_0 umbrella S6ksiMdECu8_0 umbrella qp11ZgRmeck_1 motorcycle qqd7FMwn5Ks_0 cat qqmk0BKAubw_0 boat qqo83uqRldw_0 motorcycle qqumKQ_igJQ_0 motorcycle qqumKQ_igJQ_1 motorcycle qrHPEAVq_yE_1 boat qrJljeVBE-k_0 boat qrJljeVBE-k_2 boat qrTOqXRwHqM_1 bear qrTm-7zA5FM_1 motorcycle qrU7MAMf42A_0 motorcycle qrfZoDvW7wI_2 bus qsFkwL9ikBE_6 umbrella qsFkwL9ikBE_0 umbrella qsbpGZepU_4_0 motorcycle qs4ACjrDQvo_0 cat qtEJPGYfmb0_0 motorcycle qtQNJD43Z30_0 knife qthVtX1KeJY_0 cat qtmXJD337Sg_0 cat quMSh4JZfSE_0 bear quSzbk4CkBE_0 car quZjkqmOTys_0 cat qvAPzGCqVG0_0 bus qvAPzGCqVG0_1 bus qvCVL7reF8g_2 bear qwBsDRYIhwg_0 cat qwI3fCK486I_0 cat qwZ_bpVY018_1 bear qwcgkEVHQS4_1 motorcycle qxwgvTIA0Oc_0 umbrella qykj452YYlU_0 boat qzjG5RMNfB0_0 cat q0tjDTtHr00_3 knife q1LbqldHuM0_0 knife q1QElQCedrc_0 umbrella q15Lr3-V3qI_2 motorcycle q2K3ctdaVGU_0 knife q2MasRNKQxI_0 bus q2NfowB59fs_0 motorcycle q3J7hUfBGGQ_0 cat q4EXWy685Wo_0 person q4EXWy685Wo_3 motorcycle q4EXWy685Wo_6 motorcycle q4EXWy685Wo_7 motorcycle S7SEfKdokC0_1 bus S7-k1XdAR7Q_0 cat S8BbQRnxfqY_0 cat S8WFgIrdEyI_0 car S9LooqaA-VA_0 cat S9wDiwQMla8_0 person S9wDiwQMla8_1 motorcycle S9wDiwQMla8_2 motorcycle S9xCWTCFhNc_0 motorcycle S-T-e07Bgys_0 motorcycle S_K_nwYUS2o_0 cat S_09gd9e0zE_0 boat S_5w6lmw0DI_0 knife TAzjOrAfzFM_0 cat TA1NbMN7gNo_0 motorcycle TBvuwl0phUE_0 motorcycle TBy---hD-FA_0 bear TB9qJG8A-H4_0 car TCS6svwO2AE_0 boat TCVj-PtxnsQ_0 bear TDSmQkKnGFU_1 car TENive2WCAw_0 cat TFUV5Dy2MvE_0 motorcycle TFu5bNUW02Q_0 bus TIZr3Y-PLQM_1 knife TIpoS2Jymv8_2 knife TJJgVPay9LE_0 bus q4zFevdC3-w_1 knife q5D67534lFM_0 motorcycle q5ESvcujAps_0 person q5wOimcVyaI_0 cat q6YyhMSTSjg_2 bus q6YyhMSTSjg_3 bus q65QzEDi_jo_1 motorcycle q8nG4OvfGhY_0 cat q8oKL5zvWZw_0 cat q9QycGD31Co_0 cat q9ZSVLXRUx8_1 cat q9p4QZdwQ0I_0 boat q-Sw3Dx1Xb0_0 knife q-lbxXK_UY8_0 bear q-nt9k61jqQ_2 boat q_NnyABqOFg_3 boat rAcvNOp95pA_0 car rApBsMx8ZjU_1 umbrella rAtKVQ_h94Q_1 car rBLqbf-KdaY_0 car rBjCxCwLz84_0 car rBl7T312SPQ_0 cat rBnSmzTRsqE_0 car rCAA1xoobto_0 car rCOxllaoO64_0 bear rCrQRhaJeAA_0 bus rDEW_AdTSH4_1 cat rDEdeXsgOdU_0 umbrella rEL7A7rKARs_3 knife rFF0purpqAU_2 knife rGgvqpRsaew_0 bus rGlpoWppAfU_0 car rG4cDTukyNw_0 car rG4ld81Rxt8_0 car rHHUlsaTde8_2 bus TKCXvzTT2ws_0 umbrella TMyv9XNlPGQ_0 bus TQWq_YDrKc0_2 knife TQm0C-2ersM_8 boat TQm0C-2ersM_10 boat TQm0C-2ersM_1 boat TQm0C-2ersM_5 boat TQm0C-2ersM_6 boat TREARdQ16GQ_0 car TREARdQ16GQ_1 car TSQwlIeADdw_0 bear TSQwlIeADdw_1 bear TSQwlIeADdw_2 bear TSpUcayboiM_0 car TS7UuEszy9E_0 car TTQQky-HcCs_0 knife TTdbV_lHq_s_0 cat TUrnPZr3eXs_0 bus TVjvTR7CrNE_0 knife TVvo40ERO9Y_0 cat TW6cU7OYa60_1 cat TXrnNVUe53o_0 boat TXsQGHJjWhI_2 knife TX2BAlXe5IA_0 boat TX2BAlXe5IA_2 boat rIUepAhKVnM_0 cat rIc3ZEnqjQA_0 umbrella rIezbmq7N9U_3 bear rI79TJwwnW4_3 knife rJGGo2bI150_0 bear rJGGo2bI150_1 bear rJGGo2bI150_2 bear rKiQjOPzf0s_0 cat rKs2bGgU29k_0 cat rLm1866Q28U_3 umbrella rLm1866Q28U_0 umbrella rLm1866Q28U_1 umbrella rNlm7i1BcaQ_0 cat rNw1jiERG4I_1 car rOtd7pdh-zY_0 cat rO0qo7r4TTc_0 cat rPCOxxRwiTM_0 bus rP6vb-cxVcI_0 bus rQBwAWkz3Ao_2 boat rQBwAWkz3Ao_0 boat rQBwAWkz3Ao_1 boat rRL4f466oNQ_0 umbrella rR9vwlyXtYs_0 bus rSNfdcbzEhE_1 boat rSNfdcbzEhE_2 boat rSNfdcbzEhE_3 boat rSNfdcbzEhE_6 boat rSNzuWEgSeg_0 cat rSWYvSf29vQ_1 cat rTM-3OYHQZA_0 bear rTM-3OYHQZA_9 bear rTreVVS3XVg_0 umbrella rUcsGq10bCk_0 umbrella rWLG9njOx1k_0 car TYuoW3gezZ4_1 car TZFETDh9bQo_1 bear TZFETDh9bQo_3 bear Tain2YW14ok_0 umbrella Tb943q0WnTY_0 car TcfdUbzZcIc_0 knife TcnKT-jCrxQ_1 bus TcnKT-jCrxQ_0 bus TcnKT-jCrxQ_4 bus TdmeXkKeGmE_0 knife Tdxsosl1CIk_0 umbrella TeF2gxyzjF8_4 knife TeM8oPJR8nM_2 bus TeM8oPJR8nM_4 bus TeM8oPJR8nM_7 bus TeSMF-Tw8b8_0 bus Tf8ZmK4GZYU_0 bus Tf9piH7b4Js_1 bus TihSkV4th6I_0 umbrella TimXSaV1u4M_2 bus Tjs55_3zB_o_0 knife TjvHNNlcym8_0 knife TjvHNNlcym8_4 knife Tj-U_ZtaHe0_0 boat TkmEiKe_Uto_0 boat TkuUMAPSGiU_1 car TnN1RBRfLnE_0 umbrella TnN1RBRfLnE_1 umbrella TnXDBpRvE_U_0 bear rWw_OZqgPk8_3 bus rYlL6avPERw_0 car rZDchhWp8lc_1 bus rZ7XejB4nyk_0 boat rawi3Ka9Oew_1 car rawi3Ka9Oew_0 car rbONk59p13Q_0 bear rbWOxoprQ2M_0 bear rbXmAC9QV2A_0 car rbjK97ECn_A_0 boat rcrE_BJU-n4_0 knife rcrE_BJU-n4_2 knife rfksy8z9X40_0 car rgWglS6-TTw_1 knife rhIa7DWBXUM_1 car rjVLfZDg-1g_0 boat rk9SO8fR7-0_1 bus rk9SO8fR7-0_4 bus rlBfiB0epGw_1 knife rlLJTjn9vkk_0 umbrella ToclpwxGMe8_0 bus TpKpXHgy7yw_2 knife TpKpXHgy7yw_5 knife TqPnQuSGm2Y_0 bus TqZZfXdm7D0_0 car Tqnj4qeawHg_0 boat TqsQOw3CqXo_0 bus TrXkieSIkII_0 boat TsfcgwFff0k_0 bear TsrQwMo3niY_1 bear Ts8Wofx6QYY_0 car TusmYht5g7o_0 bus TvbiwdoAnv8_0 boat TvvBAOBoHFU_1 umbrella TwEihF94LGQ_0 umbrella TwSkZlbuaEU_0 bus TxUm-m-jFQM_0 knife TyV9inNHHAE_0 bus Ty_FDwb_nLY_2 car T0Mp-gJmMlU_2 bear T0Mp-gJmMlU_3 bear T0tT7l2X1_g_0 bus T1Zywya-PcI_2 car T1Zywya-PcI_3 car T1Zywya-PcI_1 car roNPRQwafcU_2 bus roNPRQwafcU_5 bus roW8_xIYVAk_0 knife roXQ3vv08_A_0 bear rqA8P346qIQ_1 boat rqDqbsbIcc8_0 bus rq5jwk8hqYA_0 bus rq5jwk8hqYA_1 bus rriv5ZJYcJI_1 knife rsMmhzkVg_0_0 boat rta_HO-3L_A_3 bus rwH7x0MR_38_0 boat rwS5mEyV7Go_1 knife rwS5mEyV7Go_2 knife rwcVAIM0TvE_0 bus rwcVAIM0TvE_1 bus rwu0xKkvzvA_0 knife rxRxMZ6DIjw_2 umbrella rxSJHCdoi0c_0 bear rxm15TcjWqQ_0 knife ryBGF3WFvsY_0 bus ryBGF3WFvsY_1 bus ry0Pnb8VkxU_0 bus ry0Pnb8VkxU_1 bus ry0Pnb8VkxU_3 bus rzDa9eW_dpg_3 car rzDa9eW_dpg_5 car rzOhM6n6Amc_0 boat T21Uim3jGuo_1 bear T3wZwUQ_7q4_0 umbrella T5ZgfFcAd94_0 bus T6QiKZd4bH0_0 knife T7h2fJLtABk_0 knife T8C-sLfGg3A_0 boat T-5AESRu0pM_0 car UAptbKXXoJI_1 bear UBk45sVKl_o_0 umbrella UCnTA86V3o0_0 knife UDmjHWk8iRk_1 bear UE1kUiVy7LA_1 car UFPrfB6_TJY_0 bear UFQmHju3MrM_0 bear r1JK0UIvyoM_0 bus r1YNttJqXjI_1 bear r2GN4IDacgM_0 boat r2GN4IDacgM_1 boat r2GN4IDacgM_2 boat r2GN4IDacgM_3 boat r2sw-3mWNEQ_1 boat r4U8cMe6_Uo_0 umbrella r4cneWcmGJc_0 bear r4cneWcmGJc_1 bear r43KKtRQNxw_0 knife r5c09tdbF3U_0 knife r6HzXMpwuOg_0 boat r7V8M9vMX8I_0 boat r8oV5neCRZc_1 bear r-Wqqn-oS_0_0 bear r_squ5DWzV0_0 bus sAa0aLc0rvM_0 bus sAo-z30biYY_0 car sAqB_9DrpiU_0 boat sCGJB9oAeHo_0 car sCX1zbdQvbE_0 boat UHvwjd6eSDY_0 car UH6GKx07mu0_2 bear UIlo6WvfABM_0 boat UJ7xasCu9yw_0 knife UKdl8BrKy4g_0 knife ULTTzu_-eQI_2 bus ULgPda0ny1Q_0 boat ULxGPhbhuwI_0 umbrella UMQ6fAZTiLo_0 umbrella UNfKxOwP1V8_0 bear UNyq1SNbNPk_1 bear UP2WXifDFc0_0 bus UQdjo1v_Hv0_0 car UQrP0Wa7bfA_0 bus UQ90qkTMSes_0 umbrella URiNDCZBU7E_1 car URmMAndDPfQ_0 boat USYudaDNkeU_2 knife USYudaDNkeU_3 knife UTx1Fw7nQcQ_0 bus UVGq9IRroYo_0 boat sDSmkWE8qw4_0 knife sEnhkLttWlw_0 bus sFgXir9g_Os_0 car sF2EQhRNlQc_0 umbrella sGpQTqemybM_0 bear sGzXdAI4YSQ_0 bear sG_AruJlxiw_0 umbrella sJA7-N7htNo_0 bear sJL716urwpY_1 car sJL716urwpY_0 car sJTLB7bgb0k_1 knife sJsEpKneYMs_1 bus sMm8f8vBx7c_0 umbrella sOQWtx6GiR4_1 umbrella sOQWtx6GiR4_0 umbrella sOvnHbg6d_8_0 umbrella sPDY-ey2kNA_0 umbrella sPDY-ey2kNA_1 umbrella sQEBpH647Mw_0 umbrella sQJr7LooP_s_1 boat sQftML4HXiU_1 knife sQvi3OxMoKU_0 bear sQvi3OxMoKU_1 bear sQvi3OxMoKU_2 bear UWJIq_1uAnA_0 boat UXDmIQTthAE_0 knife UYRhIhbuh34_0 boat UanzlUcmoJY_1 bus Ubj2t-7KcJk_2 car Ub5O76sDojg_0 car UcBLQsI3Dzs_0 car UcKyiCjpXoA_3 bear UdFEBlYt9tM_0 umbrella UdaAkO2f_pU_0 bus UeQLdrnbe8E_1 bear UeQLdrnbe8E_3 bear UgHNBgeg9cY_3 knife Ugh33I0Qxi4_0 umbrella UgkXJsrPys0_0 umbrella UhgJaZWsdCQ_0 knife UhupGJ7k3Q0_0 knife UhvhrEMHY0E_0 boat UhwOdFtF8os_0 bus UiZ3tYMpOic_1 umbrella UjTdR_85bTo_0 umbrella sSPe9VqmSuU_2 bear sS-GtompdcQ_1 boat sUhpJsSmrzA_4 boat sU-mmzCCGmg_0 bus sVbrxAG6jtA_0 car sVkPUjUh0UQ_0 knife sV9ymK-zZ8A_4 bus sV9ymK-zZ8A_6 bus sWfQh6SsvG0_1 boat sW7n8r3vvl8_1 knife sXwrjhXbAwA_0 umbrella sYE45Xnof5I_3 bear sY1i3-cQv70_2 boat sY3G5eOlysI_0 bus sY_jGNxKdYw_0 knife sY_jGNxKdYw_2 knife saBAx3Xw2PE_0 bus sbR26E99_A8_0 bus sbmsWqsHD9M_0 bus sb1unJ1sby8_0 knife sb1unJ1sby8_4 knife scFiRRTU5jg_1 bear scJFbu3WboQ_1 car sc-BJ-WirDo_0 bus sdHNJK0mfWQ_3 bus sdd5ViCUDwY_1 bus sfVwMcMm77E_1 umbrella sfVwMcMm77E_2 umbrella UjxwNRWfxBo_2 bear UkBlnrNOssQ_1 bus UlLwBfXpz4A_1 bus UmAOVqCB6UM_0 bear UmBxMf5cHV4_0 knife UmewKWpE2qE_0 car UrRiUQPaxic_0 umbrella UrxeEW4FBq4_1 umbrella Utvo55GUNyg_1 bear UutgI7H2EPc_0 bus UutgI7H2EPc_2 bus UutgI7H2EPc_4 bus UutgI7H2EPc_5 bus UutgI7H2EPc_6 bus UvsMOU9XGYk_0 car Uvsup5BdpLM_0 car Uwlk3sF-l38_0 knife UxD-6ScNF1U_0 bus Ux3oyD0wLig_0 boat Ux_-m16Ntqs_0 bear sgDzqYTo0GI_0 car sgDzqYTo0GI_2 car sghMPNg9wB0_0 bus shgKQ2FcjfM_1 knife siNixoeB9Ew_0 car si8Uk6frpqI_3 knife sjBWnj8kKVs_1 bear sjESht-PXb0_2 bus sje-nlCBYAk_0 bear sk5gj6VnXds_0 boat slGCyLNlI3w_0 umbrella slgsRri0IUU_0 bus sli0aHrS-l4_0 knife soPkYPTLD-Q_1 boat soe3qmwZTEE_3 knife soe3qmwZTEE_4 knife splTIYA-rtY_3 knife srUGXKwzLf0_0 bear U0G9nt_JMp4_3 knife U1jXflUgiSo_2 knife U1p1HQ3ZsUo_2 car U1tGGfRyOzY_1 car U3BQYG5-Koc_0 bus U3pwXnANDgk_0 knife U3pwXnANDgk_6 knife U4nccTmpY0A_1 bus U7N--AsibJc_1 knife U7fW1r0kRYw_1 car U7-_NQlr8l0_1 bus U8EGQyjwfEQ_0 car U85wCYoCIZ4_0 knife U-B7Xkx_rF0_1 knife suQJeplwaco_1 bus svZPjH3EGcI_3 car swj8kdhr03w_0 bus swkyfcVE17I_1 umbrella syJ4LBRPwjs_1 knife syY8MaSUvJI_0 car syfJEZrVzqA_0 bus sy9XCn-ebrE_0 car szClXDUETvQ_0 umbrella szW2Gonojss_0 knife szXVjlTlt3w_0 bear sziUCgMKvrM_0 bus sznHM_K2obc_1 bus sz6Zoh7MfnA_0 bus s0ABooHpZjo_0 knife s09Dr7gZ5G8_0 boat s1t73kIOSQU_2 bus s2BVmX4vImY_0 knife s2gkrcGsOxU_1 bear s2nioy3J4RY_3 boat s2nioy3J4RY_1 boat s2nioy3J4RY_2 boat s2qgkHBVQxo_0 bear s2qgkHBVQxo_1 bear s3lwoM0rD2U_2 boat s3-sF0tSY8w_0 umbrella s6BicsP9eBk_0 knife VA3OWlsrD28_0 umbrella VBPWsv5FfbU_0 bus VBPWsv5FfbU_1 bus VBr3P_OGawE_0 knife VB6eUS7LSfM_1 boat VCCevTa32Ng_0 car VDz1RZU6x5c_0 bear VESEWamKy10_0 car VFv1UuT7klg_2 knife VGAYYimByOM_0 car VGwSM3IXcJ0_0 boat VG_OHq6R1AE_0 bear VHiMLGyNYgQ_0 car VIASAf569_k_0 car VIxj6BV3kgM_0 umbrella VJZpavOgVEo_0 umbrella VLaCK3u84vI_0 umbrella VMLuyFD54AQ_2 boat VMXrHUjXjyQ_0 boat VMXrHUjXjyQ_1 boat VMi5mAdZyZI_1 knife VMs0jemUzI0_0 knife VNuYRPiFrus_0 bear VN-BCqBlrhs_0 car s8vzssNUlOA_0 knife tAOx6NFDD9I_0 knife tAxbjy_edDI_0 umbrella tBOSPNFbuv8_0 umbrella tBQRfKeIYZc_2 bear tBgtSnOMOwM_0 bear tBh6HxQHmrs_0 knife tCZLl-MZJp8_0 car tDYPtg0At_Y_0 bear tE42n_1PW6w_0 bus tFfqpeBbvr0_0 umbrella tFjlTZqwoWI_0 bear tGycfa97LVU_1 bear tIX4eIYzfD8_0 knife tIX4eIYzfD8_1 knife tIs05U9pd04_3 knife tIs05U9pd04_1 knife tIs05U9pd04_4 knife tIs05U9pd04_5 knife tJXbZyaUOD4_0 car tJhfshKvRmE_1 bus tJhfshKvRmE_4 bus tJ01Y3R3Qmg_0 umbrella VOcplsa6Gq4_2 knife VOcplsa6Gq4_5 knife VPI_Nm3GHHc_5 bear VPI_Nm3GHHc_2 bear VP0u_E6FOsY_1 car VR_V9WaFYn0_0 umbrella VSj9dXwt7zI_0 bus VSxoLvaJN2Q_1 bus VUcCABjVSO0_0 car VU2lUX4NdkM_0 knife VU2lUX4NdkM_1 knife VVg7sbsw9vY_0 bus VWpm6_Uhis0_2 boat VX9TPrjMcOg_0 knife VX9TPrjMcOg_4 knife VZ5r0BHRf84_0 boat VaW7Go5pX-c_0 umbrella Va50KanUO94_0 umbrella VbA0B1JcpNY_2 knife VbeIRLOQ5pI_0 bear tKCjJuulqx4_2 bear tKCjJuulqx4_3 bear tKCjJuulqx4_4 bear tKN3Qo0oUoc_3 knife tNvGTzks1yw_0 car tNvGTzks1yw_1 car tO0igm1AwqU_0 bus tPae9uGqDog_2 bear tPzWEC_9_H4_3 knife tQpyrprwwc0_0 umbrella tR2sDFGND7g_0 bear tSEneDiCrqg_0 bear tTFTWquOTi8_0 bus tTjbx39rZMk_0 bus tT2pUZ0W33A_0 bear tUHf6Ynx_vI_0 knife tVJE-0uNX1s_0 boat tVTkAh80t5I_0 umbrella tVuL82POt-I_1 car tXMBGjGduCM_2 knife tXsMGHCKw7U_1 boat tXwfqREzEtI_0 boat tYGp2PFiAUE_0 knife tYas1z25M_4_2 knife tYcNeSisfpI_0 bear tYdhIaTDwiE_1 knife VdLohVQNC5Q_0 knife VdLohVQNC5Q_1 knife VdLohVQNC5Q_5 knife VdLohVQNC5Q_6 knife VeUIJlyGjkY_0 car Vekx17G8mkk_0 bear VfBqMWT6aRM_0 knife VfKgW5eSGsk_0 umbrella Vhmj1OGGQuc_1 bear Vhn-8bCU70s_0 bus Vh21adwevRU_0 bear ViXmx_D5BAY_0 knife ViXmx_D5BAY_3 knife VizxeIzWEFw_0 car VjF-G6FQooU_0 boat VjS5w2pc0tA_1 boat VjvpOU349zY_0 bear VkDn2-1H23o_0 umbrella VkDn2-1H23o_3 umbrella Vk43AD4O_hc_0 boat Vnrw6Fjmj8I_0 bus VnwwgTO4w_k_0 umbrella Vn4aKSlYXX4_3 bus VppPgMZqfEQ_0 boat Vp0kah4_m6w_0 boat Vp0kah4_m6w_2 boat VqHSuVVKfjs_0 bus Vqo2RiAzLnU_1 car Vrnm_kf7OCs_0 boat VsDgOcOWqXw_0 bear tYofvh4_3K4_0 bear tadYkEU5suY_1 knife tbIUesoKv9Q_1 bus tb_hKPkH2co_0 knife tcFQ5kE3PKM_0 car tcFQ5kE3PKM_1 car tcSHrlGTFJc_0 knife tc912gGdckQ_0 boat tdjDSO8NFx4_0 knife tdpAPPsHlDQ_1 bear teJyM5tywno_1 bus teQkZqDa1lw_0 knife teb83RDwop4_0 bear tgSfan8G7wo_0 car tgVXG7H_acI_0 umbrella ti3J-8aWPcw_0 bear tjldcvPuif8_0 bear tj4mnSXX2DM_0 car tm2bmSBR4uE_0 knife toiMoCxSyKY_2 boat tos1ELGZH0M_2 umbrella Vs3Mi3Ch_EQ_0 bear VtHzTaDh4WM_0 bear VtHzTaDh4WM_1 bear Vt8DAmG3nHs_0 car Vu4xkIEs6U8_0 boat VvXxRawsOCs_1 knife VvXxRawsOCs_4 knife VwYEgB5HOD0_1 bus VxdUG7Sinyw_0 car VyDNhpvCuc8_0 bus VyfIuIcelhc_0 umbrella Vz3wJsLA_gI_0 bus V0NnR8HLSbo_0 umbrella V0o8kxcOZRc_2 bear V1a9QcSegdw_2 umbrella V1dqjmHNyIY_0 boat V23vmoZYoVw_0 bear V4o7I9cLp-g_0 bus V6nKvvfzWpg_0 boat V64pvhB8sKU_0 car trAReSHvUdQ_0 car trAReSHvUdQ_5 car trAReSHvUdQ_6 car trAReSHvUdQ_1 car trAReSHvUdQ_2 car trAReSHvUdQ_3 car trAReSHvUdQ_4 car tsNhgDUKwHw_3 knife ttdTnGOIBmA_0 umbrella ttdTnGOIBmA_3 umbrella tvVLkJ0HTQQ_3 car tvew-P2UPL4_0 umbrella twiEfNprSoE_0 knife twiEfNprSoE_1 knife tw7jf9U2-kM_2 bus txpIIsM1T8U_0 bear tx2dZF1Ckxk_0 knife tx5tKODiGuo_0 knife tx5tKODiGuo_1 knife tyO37NBAS1Y_0 bus t1UtwxOBGvE_1 knife t1vrE0cEB80_0 bus t10FRgv9o5M_0 bear t10FRgv9o5M_4 bear t14PUW9SINk_0 knife t31z17N5skw_0 knife t31z17N5skw_1 knife t31z17N5skw_3 knife t31z17N5skw_4 knife t33TQH8-7tg_2 boat V9UCv2qhsxc_0 car V9ulnUIQGJU_0 bus V9ulnUIQGJU_6 bus V-KNIu_PsaQ_0 bus V-NvBHig1i0_0 bear V-tMggTxBu4_0 knife V_Bb7A55f-c_0 car V_dJ2KuqfOA_0 boat V_dJ2KuqfOA_1 boat V_t8pbEf8bA_1 boat WB7fT2tI7Pg_5 car WCSEuwFm7KU_1 car WCfc8YGLu1o_1 bear WCfc8YGLu1o_3 bear WDgLmrXq4vg_0 umbrella WHLIJlNh3TQ_1 knife WHQXE5tuTXk_0 car WHUaoqVF57g_0 car WIdj4ovuDWQ_0 bear WIdj4ovuDWQ_1 bear t4oaGCoTBZc_0 car t42tnyTtYWE_0 boat t7OKXKxjHls_6 bear t8X-x_7pv94_0 car t_-dK1Xhg90_0 knife uAjqm8B-aio_0 knife uB_Hurzj4s0_0 car uGEDuDcqqvU_0 boat WJ2A2XRRTw4_1 bus WJ_vIH7FJsQ_0 car WKDhXr_5mbI_0 knife WKKFM7oRSd0_0 bear WKS6aq75gk0_3 knife WKV4j8-G1Nc_0 knife WKfQfA_YQTY_3 knife WKubVTrND7s_1 knife WKzUT3zOIU8_0 knife WLxzHH6iJlk_4 boat WMSu-XOQe5w_4 bus WMSu-XOQe5w_0 bus WMgP1z0x0Io_0 bus WOVTnN-HcZ0_1 bus WOxTA78OlZU_0 knife WPqEyeVtih8_0 bus WPuItCUuEkY_1 knife WQAr1enuPKw_1 bear WQX6ptTAKHg_0 knife WSc0kYKLGTg_0 bus WStgEyiPBBE_0 car WSvHn5XJq0Q_0 knife WS0DayzAv80_1 boat WS0DayzAv80_2 boat WTXytzbF5lU_0 umbrella WT69VoU2Hps_0 car WVx9vOoutGo_0 bus WWKuCF2FuYk_0 car WWm9iMkKk-g_0 knife WW7ib8XAVz0_0 boat uHqj6xQGOYg_3 bus uHqj6xQGOYg_4 bus uHqj6xQGOYg_6 bus uHqj6xQGOYg_7 bus uIKZlXUoHOc_0 bear uJMFDY-BKiQ_1 bear uJMFDY-BKiQ_4 bear uKdOuLYJjrg_0 knife uK-zcpEE8nE_5 boat uLdXkXRsHok_0 umbrella uMK6b2TG8rc_0 bear uMV37U-DNUQ_0 car uMciOwjd0GU_0 car uMciOwjd0GU_1 car uMd1DmjxAZQ_1 car uMj3V0s7mUo_0 bus uM_jxm7bFp8_0 boat uNDkbmlEYeQ_0 bear uO7OtV3J1AY_0 bear uPE1o5dCYDc_0 bus uQhMkVrdghM_0 bear uRLAyu-3l0A_0 knife uStpLanz0fU_0 car uTAqzBGMDOc_0 bus WYwRW_t4jb8_0 car WZK5IqBtpGE_3 knife WZgxjIvc2nk_0 boat WaEyVBSggwQ_1 bear WaaW6bElWCM_0 car Wb20JaIrr8M_0 knife Wb20JaIrr8M_2 knife WcNlbTBZM64_0 umbrella WdIATjW74Pc_0 boat WdYFXDv4TEo_1 car WdgTHJurLx0_0 umbrella Wd0xTEH2d9k_0 boat WejCws8AoxE_1 knife WejCws8AoxE_2 knife WejCws8AoxE_3 knife We4_tuFKyGE_0 knife Wf6hHpxRW_Y_4 knife Wgx6hhiRLoA_0 potted plant WjiMUA6_CkY_0 boat Wlm2mLKCMlM_1 bus WlsN6HURFTc_0 bear WmFqo8n67Ok_0 bus uWi9-84kTFQ_1 bear uXHJHV0bwUk_2 bear uXe9WOlTFcs_0 bus uXe9WOlTFcs_1 bus uZgcOYmazsw_0 bus uaJ1g0xJ4QY_0 bus ual32V7-KJo_0 boat ua_5GosOa-c_1 bear ubFoUAh6d4g_1 knife ubOiomYqbNs_2 knife udSE-6UkgwM_5 umbrella ue1CIlwhPEs_0 umbrella ufFT2BWh3BQ_0 bear ugWs4v6DbUw_0 bear ugsJ5cOmFTg_1 boat uhXcL98XNCY_5 umbrella uhXcL98XNCY_1 umbrella WoxbRmDfLeI_0 umbrella WoxbRmDfLeI_1 umbrella WpCyx-QCMec_0 bus WplsTumdQf8_0 boat WqFFUvf-YJk_0 knife WqxU9aIFmNY_0 umbrella Wr5BjrtC4Ts_1 knife WsEiHZFGeFs_3 umbrella WsaP8FyRUCc_0 car Wses8y3NyJ4_1 bus Ws9V_B7mqJI_0 knife WuTHL7GtG-8_3 knife WvGzCV5ICZM_1 boat WvuZRZqhxk4_3 knife WvuZRZqhxk4_5 knife Wvv8cOXaAZI_0 bus Wv-Weuc4E1A_0 umbrella WwLtxfDC7ok_0 boat WxWXB9hf7n0_0 car W0kDpFkg6xU_0 boat W1z3EAv-eJw_0 bus ujnUCtI7gzI_0 bus uj4TRH5r_ww_6 bus uklsFjegS-w_0 bus ulzto7-Hl64_3 bus ul__w-oqHrw_0 bus umjU9X1kuYg_2 car umjU9X1kuYg_4 car umjU9X1kuYg_1 car uoGBYfJo5Xg_0 car uo1J9BUgQmk_0 boat urRNkZvzuHI_2 knife urmSoxyi9Vo_0 boat urmSoxyi9Vo_2 boat utmsGeHFdvI_0 boat uuBKDGoTmGY_1 car uu-UptVYr_A_3 car uvV7cblR4qc_5 umbrella uvZOzZjBKXY_0 bus uwL5LYln0EM_3 bus uwL5LYln0EM_4 bus uwL5LYln0EM_5 bus uwL5LYln0EM_6 bus uwx7UKo4jcg_1 boat uwx7UKo4jcg_0 boat uwzHiGF1YMM_0 boat W2z3SxorVnI_0 knife W2z3SxorVnI_1 knife W38vB3cw2fA_2 boat W4Is7CI2Sfo_1 umbrella W47ZA0onzb4_0 knife W5dSTfMCj-U_0 boat W5zIkmZyS18_0 bus W51Spbo8SQQ_0 knife W6YCv9ZVVOc_3 boat W6uCEMEi7_E_0 bus W7JkNuRYNr0_2 knife W7JkNuRYNr0_3 knife W7JkNuRYNr0_4 knife W7JkNuRYNr0_5 knife W7yqHDA_RMU_0 knife W8EKt6cG0E8_3 bus W8EKt6cG0E8_7 bus W8EKt6cG0E8_1 bus W8xqW-QD_B4_0 knife W87M2lQeWNk_0 bear W87M2lQeWNk_1 bear W-ZpC_K7Df8_0 car W-x__78AyrI_0 boat W_Wc7lFraRg_0 bus W_v5wpcibRM_0 boat W_2LqiQ_ico_1 knife XAa2L1v8iJM_1 umbrella XBAOFn8KXFo_0 bear XBn6P-IKuis_0 person XBssw3bqXL0_2 bear XCZv_AjZo08_0 knife XCu0Ea4zHuQ_2 bear XDtfr902CVM_0 bus XD1OYmmeKic_0 umbrella XD1OYmmeKic_2 umbrella uxFX6p61oPY_0 knife uxlDad59mFc_0 boat uyWVUOcgZHg_0 bear u1OhTXTmuWM_5 bear u1TvbkpmEbs_0 car u1vMDzyFxzI_0 bus u2BVfAFQ1zU_3 knife u2BVfAFQ1zU_2 knife u2EDuPJijZ8_4 boat u4K3jRl7Gag_0 car u4S9mlFpt0s_0 bear u4uwaq4uf54_3 car u4uwaq4uf54_0 car u6XGBXhCJ18_1 knife u7STs8FCy_g_0 bus u-1HZJXwFHo_0 umbrella XF8B5xjRCF0_0 car XF8B5xjRCF0_2 car XF_oHXRGd1o_0 boat XGRZLrZC9zY_0 boat XIlybSpq0mg_0 bus XJmn9i57K3g_0 bus XLvSaN_M6lE_0 car XL0B2niNRCw_2 bus XMlEA_yRojM_0 knife XMyio1ZckJc_0 bus XQBtgwUzEL0_0 car XQX5y5BQykU_0 bus XQ6u2yTbu_0_0 car XQ7UbbPjnDo_1 knife XRenv5AHI_8_0 boat XRpgkCuziGY_0 umbrella XSI7M8s2Tc0_0 bus XS4ow1Wcaro_0 car XTm-jN1RVHA_0 umbrella u_YKLGqrMKQ_1 knife u_gN-dXNRHI_0 knife vARZcTna8NU_0 boat vBEaeqdPsho_4 car vBEaeqdPsho_3 car vDT-DShjnjU_0 umbrella vEMHY2cT6kA_0 bear vEi5gkcTDGY_0 bus vE9zapt1WdI_3 car vFSRrtT5AL8_0 bus vGbt_XsSaVk_0 knife vGi-DjriLLs_0 umbrella vHAlsHYE3mo_3 car vHAlsHYE3mo_0 car vHXM9IJdVcM_0 umbrella vIQAK-4lMOc_0 umbrella vIgmRBC2ayQ_0 umbrella vJl9QkAbpc8_0 car vKxCl7DzJjI_0 knife vK8dgvZ5B6A_0 umbrella vLA-mHM7MAQ_0 knife vL-6uNdrCV4_2 knife vN54ADSnJmE_0 bus vOKH_DIjvAU_3 knife XUkTknKOdrs_4 knife XVa23hmwe-E_0 umbrella XVrNN52RTEs_2 car XVrNN52RTEs_3 car XV694aCXY8Q_0 boat XW6BQWpl3bI_1 boat XZl5Luzj6v0_6 bear XaSsc3noeLs_0 boat XbHWOyNM3Bw_0 bear XbHeGzyGejE_0 bear XbWrCVe09YA_0 boat XcLl0qSs9bU_1 knife XcifNE0anDo_0 knife XcifNE0anDo_1 knife Xc1jzGFyrnE_0 car Xc5LW1FIVE0_2 knife Xc5LW1FIVE0_3 knife Xdu-98BUgmA_0 knife Xd7VbtoAdb0_0 car XeOwt5KeVfA_2 car XeR1DgyOa9o_0 knife XekvrqFtazY_0 bus XeplLROyXyA_5 umbrella XgBTEQN_ZxA_2 bus XgBTEQN_ZxA_4 bus XgBTEQN_ZxA_7 bus XhSmPb3cA_A_1 knife XhSmPb3cA_A_3 knife XiEeY5R56EQ_0 knife vOy0N09kGEE_0 umbrella vO56uCHmSjg_0 umbrella vPVpX6GPY5Q_0 bus vPVpX6GPY5Q_1 bus vQ_8ry_dx68_3 boat vRhGvmXk2js_1 boat vRzpk-thwA0_0 bus vTvjeXsP7TM_1 car vTwSeYRU_WQ_0 car vTwSeYRU_WQ_2 car vUKk9LqKVpA_0 boat vUKk9LqKVpA_1 boat vUg2Sr7Jl-Y_0 umbrella vVKZzTBvsF4_1 bear vVNCUA8hss0_0 boat vVUbZCrCqEU_1 boat vV72xGim-is_5 knife vWMiT73g5-k_0 boat vWO0tyaGuaM_0 umbrella vWUAzQ_EEJ4_0 knife vW_aJr-PSvA_0 bus vW_o48lG_0I_0 bus vXX9FmlwVlk_1 bus vXX9FmlwVlk_6 bus vXX9FmlwVlk_0 bus vXX9FmlwVlk_2 bus vXX9FmlwVlk_4 bus vXaLFnwvrX4_0 bear vXvR0RiGzj4_1 car vYROjLzMqvY_1 bus vYROjLzMqvY_2 bus vYROjLzMqvY_3 bus vYwdLoOa0Rc_0 umbrella vYwdLoOa0Rc_1 umbrella vY1sAfu99Es_2 bear vZznldYVwGA_0 boat vbfWHUjHR2k_0 bus vcdEtOGEEcU_1 bear vcdEtOGEEcU_0 bear vcdEtOGEEcU_2 bear vch6R3EO9Ec_0 knife XjHJiHO6onE_5 bear XmVv2wQSvjs_1 car XoJahpK73EM_0 boat XoqPCnlpymI_2 knife XpDVw5mS058_0 boat Xp591jCTBOA_0 bear XqfkP1lAkyE_4 bus XqfkP1lAkyE_5 bus XqfkP1lAkyE_2 bus Xq-5DHWJ1pk_1 bear Xrh68BP53Gw_0 car XriRhjtrlLE_0 car Xu-ZZl_L38Q_2 boat Xv9eEVcD2P0_0 bus XwvKtur_QEk_0 knife XxHnDkI1NdQ_0 bus XxHnDkI1NdQ_1 bus vfzGrdk_Mxo_0 bear vhrRnvGSMMY_2 boat vhrRnvGSMMY_5 boat vhrRnvGSMMY_6 boat vhrRnvGSMMY_8 boat vh4BHzMwVT8_2 boat vh4BHzMwVT8_3 boat vi4ktD0dAD4_0 car vkfdn7gkQh8_1 umbrella vknUR0K4MqM_0 bus vlNLyHxz1TY_0 boat vlaeAly1nZc_0 boat vmr5UiZekic_1 bear vo0WWdM7UCs_0 bus vo6Uzhx2fcw_0 boat vpItyB8epmQ_4 boat vp8NiaEmk2M_0 bus vqeybXtIwxE_3 umbrella vrK5lDQJnmc_0 car Xy1w-6sjVS0_0 bus Xzj_w2QkjRg_0 umbrella X0iu2HmUYfY_0 umbrella X0nevXM5278_0 car X1drOgA68EU_0 bear X2zWe7ayseQ_1 bear X3ST-FA3VS0_4 bear X4YaqObAEns_1 bus X4kxk4G-BOs_0 bear X4kxk4G-BOs_1 bear X6Y6e6qsVOc_1 bear X6tuO-hL1cg_0 boat X6z7yGyP3UY_0 boat X7AJSe6kUz4_0 boat X7PChwjgRog_0 boat X7mkuAPcpg0_0 bus X8Wc00FiJn8_1 bear X8lHVX9uGm4_0 car X9dNz1MhFTM_0 car vtOaPYxGauU_0 boat vwp5f1sTcOM_2 boat vxEizaWVZ2E_0 car vx7S4ISNz90_0 bear vzKEVGD3E3w_0 boat vzKEVGD3E3w_1 boat vzmWbtFBxb0_0 bus v0DjGmLiuao_0 car v0P7DOSAooM_0 boat v0Uh3fazz7A_4 bear v4CWziKFAvg_0 boat v4CWziKFAvg_1 boat v4TWD1hSObU_0 umbrella v4TWZQM-t_M_0 boat v4wheqJ7qmw_0 car v4-PEShPpKo_1 car v4-PEShPpKo_0 car X_1xeuzdJII_3 bus YAI5kxAVlag_0 bus YAS9QgwaKuA_3 bear YAacEL8GB8Y_0 bus YCTBEauAnvs_0 boat YCT0ue2AdNE_0 umbrella YC0SWC1thDM_2 car YDxjfXnUsjA_0 bus YFb4IgdgsQI_1 boat YGm0A03QK-0_0 bus YJklsCjPlRE_0 car YJrYjEZ4Hfo_1 bear YLNAOu0nAaM_1 bus YMWEbvBeA2k_0 car YNOl5XssrmA_0 car v6RTPFSqVAo_0 bear v6d52nxP9CI_0 boat v6d52nxP9CI_6 boat v6d52nxP9CI_2 boat v7R5EfiWsMU_0 boat v7mxF1u1eJA_0 boat v74SVFcInoY_0 bus v77um2oiCmw_1 bear v8vdjpigkqA_3 bear v9EO_34zhPY_0 bus v9dJjyyqJ14_0 bear v-_nfHjdDrM_0 car wAJI2wAjCLA_0 car wAktmcUSj0Q_0 bear wAsEbrNlx-Q_0 car wBEyQdKDniA_0 bus wDOuWmULTDo_0 bus wDwRfk2Ka7A_2 umbrella wFuYr5TAoA4_0 car wFuYr5TAoA4_2 car wGqMuP3z6nY_2 bear wHdnCnPBax4_0 umbrella wHrdTEho0Do_2 bus wItLJ3GVPHo_0 umbrella wIzhSLqL-4M_0 boat YPR6uiSn_PI_0 bus YPR6uiSn_PI_2 bus YPWoY6sseHw_2 bus YP9HVTyFrM0_0 umbrella YQRaUcLNZjw_1 car YRmCe16K5EI_0 umbrella YRxTciapqLc_0 bear YSFyOBQNQzc_1 umbrella YSOeyn1SUIc_0 bear YSx79S6HsRE_0 boat YSx79S6HsRE_1 boat YVueKFH38pQ_0 umbrella YWAY2hVlXwU_1 boat YXC4y1_fd5M_1 boat YYjM_RIWUWk_0 bus YY-G2b46dbU_0 bus YalvFPYggIo_0 bus YbsAJsBizWo_0 car wJbu3nAVmh8_0 car wJ-qeIIyve0_1 bear wKlqztWBWCE_0 bus wLXsUww1z0Y_1 bus wLXsUww1z0Y_2 bus wMW3eYDAmiM_0 car wN6DTQLhQo0_0 boat wOAtMDJ1DIU_1 bus wOqLqQhPKNs_2 bus wPCVya7FjXI_0 bear wPcWihBU6Fc_0 boat wPjzhuBuZ_E_0 car wPrTnHfCQy0_0 bear wP83jrOriho_5 boat wP83jrOriho_1 boat wP83jrOriho_3 boat wQY4K0ZN5RY_0 bus wQY4K0ZN5RY_1 bus wQY4K0ZN5RY_3 bus wRJ_foSdk2g_0 umbrella wRs7_Un28R0_0 bus wSaf-OQyJzM_0 boat wSkaSUiYB60_0 boat wUG-UKf5xOM_2 bear wUtwwmbus0k_0 bear wVI9BeWuM68_0 bear wVX6wPj2U5M_0 bus YcrP36sQwVc_5 bear YepGVMeHePw_1 boat Ye3mi53K_Oo_2 boat YgouPUMM7w8_0 bus YhZT5GU-dEY_0 bear YiDVwrN1Djs_3 bus Yi8XHxZACGY_0 bus YlGg5v-AWZc_2 umbrella YlnMI5yk7FU_0 boat YmRfW-9QwH0_0 car YodCYpx5p8o_2 bear YogxE9OtHGE_0 car YogxE9OtHGE_2 car YozOMrrhBWk_0 umbrella YozOMrrhBWk_5 umbrella Yo8IaFdsDHQ_0 umbrella Yo8IaFdsDHQ_1 umbrella YpGGnhGqqkc_0 car Ypv2bwSbJbg_0 bus YpyrD-P9emk_1 bus Yq3H6FwjqwQ_2 bear wXg6MT7--Ms_1 bus wYO_Z3tO-P0_0 car wYO_Z3tO-P0_1 car wYO_Z3tO-P0_2 car waGAoKeMDbo_2 bus waZHoBhYNXM_2 car wan2A1Zp9pg_0 umbrella wa4LKNmoGCI_0 bus wbBafnofeHM_1 bus wcLRQ5lDklc_2 bus wcRJMRP7TtY_0 car wcUHhJA9ynY_0 umbrella wcUHhJA9ynY_1 umbrella wc6z479m8VU_0 knife wePYCAT9VWI_0 boat weUGYN9mO8M_0 car we9P1H3yM9s_0 umbrella wgn5GA4Kt_w_0 bus wioe2rgDFxQ_0 bus wi_60seXhMg_0 umbrella wkCC1-6dZZc_0 bear wkRF61CxvWQ_1 boat YsJGlSMV6fc_0 bear YsKpyV6dNVU_0 umbrella YsKpyV6dNVU_6 umbrella Yukb6C-FiPs_0 bus YyqN8OKq7-k_0 car Yy9Cj5ayVow_4 car Y2esC00COVs_0 umbrella wkhiKomfWwo_0 boat wku7FWw9zok_6 bear wmN3gF7czBE_0 boat woB4lneU8v4_2 boat woB4lneU8v4_5 boat woB4lneU8v4_3 boat wonqKYd_Hkc_0 boat wulomSbG8Ww_0 boat wwHyMOLjtHw_0 car Y8gjbHlOSpg_1 car wz-CYTAvpJA_0 car wz-CYTAvpJA_1 car w1xC4CowaVk_2 bear w2d7ZPHVRsQ_0 car w4QoeqK4vN4_0 boat w5KKrxi32ZU_0 boat w5RAGrRh6N0_0 boat w85PvG-O3JQ_3 bear w-RoxIo67S8_0 bear w_dzHMbP1wk_0 car xAdflusGMAM_2 bear xAdflusGMAM_1 bear xBQVhJr5tn4_0 car xBQVhJr5tn4_1 car xBW2dB1aHqE_1 bear xE-fIbBizEc_0 boat xIjuSe8NERE_0 boat xIr-46lqsbs_4 boat xI3wdcR9GOU_0 bear xJaqlEqJIsg_0 car xKUjAAXXark_1 car xKjnn1lJsUE_0 boat xLl8JlHPals_0 bear xL0aucx8LjA_0 car xM1N_JeMAns_0 car xNfYVO0HOWA_0 bear xNfYVO0HOWA_1 bear xNqzZtEMt6A_1 car xOQ_zqhFFoQ_0 car xOQ_zqhFFoQ_1 car xOQ_zqhFFoQ_2 car xPgexGqlrpM_0 boat xQ2ursLiV78_0 boat xVl7ISxNOBo_1 boat xWfIV6ykSZU_0 umbrella xYRbcgZcjTo_0 boat xZdiy-peZpE_0 bear xcC48didfYg_0 car xds7aav_WA0_0 umbrella xeEFpaZutxQ_2 car xeEFpaZutxQ_0 car xemv_TG3nHo_2 boat xf7e7HpnDAI_2 umbrella xhLH-f-e2Ds_0 bear xhLH-f-e2Ds_5 bear xhLH-f-e2Ds_1 bear xhLH-f-e2Ds_3 bear xhLH-f-e2Ds_4 bear xhYRRVSUjcI_0 bear xh6_xD0_FUY_0 umbrella xi1l0PNYmVU_0 car xi1l0PNYmVU_1 car xk-PCxxgLyQ_0 car xlSq_r-1VZI_0 car xlTBS98u4Xk_1 boat xl03KNG3qcY_2 bear xl03KNG3qcY_3 bear xmXEOSj-QR8_0 umbrella xm61skXJVHY_0 bear xm7yMjZR_HM_0 car xniXqwdU3rM_1 car xn_6GQGdyww_0 bear xoL1TWqV2UY_8 car xoL1TWqV2UY_3 car xoL1TWqV2UY_4 car xoL1TWqV2UY_6 car xo93ACxVFCE_0 car xu3hCCY1M98_0 car xvJ-vgSlRFQ_1 bear xyUFBTV5sfA_1 boat xyUFBTV5sfA_5 boat xzFwd6rktG8_1 bear x1PZyiPtcD0_2 bear x1PZyiPtcD0_0 bear x2MUZI0ckUs_0 boat x51qh-jbh2w_0 car x8bgasvRg_0_0 car x_PtUMz2m3g_0 umbrella x_yZa__92dU_0 bear yE9ySV90e2U_2 bear yFdbcjv2scY_0 bear yFwt2mHmJQw_2 umbrella yFyTQPoWKrg_0 car yGYLwBmuRVI_0 bear yGYLwBmuRVI_1 bear yGq_wX2hSms_0 car yHFbPuIOGec_0 boat yMVPEp44IcU_1 car yNYzTl3zuSA_0 car yOeQRz1L-6w_0 boat yPx8JYuB8jo_5 bear yTEPer0Bvnk_0 boat yTr7cqNxVw8_0 boat yVwePYmRfaA_2 boat yVwePYmRfaA_0 boat yV3gYczZGSU_0 boat yWKpg3C3HRA_0 umbrella yWQT0KUXmZs_0 car yXA2s-Ylkx4_0 umbrella yYt1-j5ltQg_0 bear yZOWsBbP8Dw_1 boat yafgzvvEBsk_0 car ygqn0Cw0cJg_0 boat ykAF4z2vPRI_1 car ynSIMn0mh5Q_0 car ynuXudWT-jg_1 boat yqDO3G8QSxs_2 boat ysudb_DYv1E_0 bear ytzy45KRs4k_0 umbrella yy-1Eaz2SGI_4 boat yy-1Eaz2SGI_5 boat yy-1Eaz2SGI_6 boat y26dbfVQaAI_0 car y3HDa7ZvWW4_0 umbrella y5rlUzgK0z4_0 umbrella y6l_Xj3A7dU_0 bear y6nMm6sNieE_0 bear y6oa4gTfIaw_0 boat y7_Teuq-Jd4_0 umbrella y-J-zu3KYKk_0 boat y-lv7_3azcQ_3 bear y-lv7_3azcQ_1 bear y-lv7_3azcQ_2 bear y_Kbef75lDk_0 umbrella y_OvZEh5PxQ_1 umbrella zA7rl-0pCw4_1 bear zBCRUfv1YVo_0 car zBomR9gjgg4_1 car zCnqglOaM40_0 boat zC1J8hrm_FI_0 boat zGOI3Uds1-A_0 car zGvuvfZeouY_0 car zHwK-Ov5Dn8_1 bear zIGdWP0BOPc_0 car zIoLntgax_4_0 car zIrTQvy-DtU_0 umbrella zKN-t-wHfVw_0 car zOxKFs0x_-M_0 car zPUoexM4GJg_1 bear zS4G-dKS3dg_0 car zUYNrm52mG8_0 car zU9O4EpnP8g_0 boat zW4j5HFdFCE_1 bear zW9G9_luulU_6 boat zW9G9_luulU_8 boat zX70EOhK1IA_4 boat zX70EOhK1IA_0 boat zX70EOhK1IA_2 boat zX70EOhK1IA_3 boat zYNSRTs7wcI_0 boat zZMZCzV930Y_0 boat zaXvp0LSorI_0 umbrella zcIJlqUAlyQ_0 boat zcdpKM2gDkA_3 bear zdWOfDZyRWg_0 car zdp6LbsF3Fo_0 car zdp6LbsF3Fo_1 car zglydzoqdNw_1 car zhSMuVKY4jM_1 boat zhgbbZA2jZo_0 car zj0QGbLx2Ek_0 umbrella zkC1ygaZUL4_0 car zkFlovQ2F80_2 umbrella zkFlovQ2F80_4 umbrella zkFlovQ2F80_0 umbrella zkYqOEAbTTE_0 car zk5BFmxsRfQ_1 car zmXJ3VmO_yQ_0 bear zmXJ3VmO_yQ_1 bear zn_LOCSgnBI_0 car zobMJDgPWmM_0 boat zpW9Kjtbu7g_1 boat zp4-YNYr-l8_0 car zqDdt_wpfcM_0 bear zqyhnAN5qnA_0 car zq-AjPBQb3w_0 umbrella zsszkZnE24M_0 car zsszkZnE24M_1 car zwKNqBmI95k_0 umbrella zxfyvjQQ0QY_0 car zxuleRJc5Pw_1 boat zySbpWHTUUI_2 umbrella zzDlzbpuFUg_1 car zzOYV3PIwDo_1 car zzljeIZDjM8_0 car z1CT7NYPStE_0 boat z1CT7NYPStE_2 boat z1DFtYFOfsQ_0 boat z1GcDqMXI5U_0 bear z1WPNBklZbo_0 bear z3V1O449zY8_0 car z3V1O449zY8_1 car z3V1O449zY8_2 car z32BNdijIPo_0 car z4C0C5AtXd8_1 bear z4Nk6je-k5E_5 bear z4Nk6je-k5E_6 bear z4Nk6je-k5E_2 bear z4Nk6je-k5E_4 bear z4YdhKjeNQk_0 car z5PqRVPhGGo_0 bear z56C-TtwATI_0 car z6Bzk_B2FVo_1 umbrella z6gL7THeOz4_0 car z8GzZUKj04k_0 car z8QYapjsTBo_0 bear z8WzXJMRLkg_1 bear z9CJpzFuqHU_0 boat z-gqhqI7U10_0 umbrella z-n_qZEuRko_0 umbrella z_CWMOiNpzY_1 boat 0Ah0DHbJ6Uw_0 bear 0B-l9QmJK3I_0 car 0DHXMcNUn60_1 umbrella 0EEILwHA4Dg_0 umbrella 0FRiwnN3Wv8_0 bear 0FUPhsPv9vs_0 boat 0FUPhsPv9vs_1 boat 0GR555fb7uE_1 boat 0GR555fb7uE_3 boat 0Gal36CHm94_0 car 0Hf-spRN8iA_0 bear 0H81H-1s398_0 car 0JkwSF_s82I_0 umbrella 0JxUW6X6VTA_1 car 0JxUW6X6VTA_2 car 0LY3jcKxA2E_0 boat 0NN0x0UcFVI_0 car 0NgLxOGQPPM_1 car 0Nh6NERAbQM_0 umbrella 0NyneL4SB78_0 umbrella 0O2cDoxCAhA_0 car 0PqvPOqRHik_0 bear 0ROl0QaHTgU_0 boat 0ThOYMXH3Mw_0 umbrella 0TyHCEslM-4_0 boat 0UGD0u7LEPY_0 car 0UVJn4oJR3I_0 car 0Vu78K6ZsOk_2 bear 0XETGtPrUR0_1 boat 0XrWsyRsBYs_1 bear 0YWXAZlIFZE_0 car 0YWXAZlIFZE_1 car 0YaZ8lrPQJc_0 boat 0YaZ8lrPQJc_2 boat 0YaZ8lrPQJc_5 boat 0ZJeQYZxfGQ_7 bear 0ZJeQYZxfGQ_6 bear 0agrBEPe_w4_2 bear 0bx9mbPU7zo_0 umbrella 0c5dV9e0rL0_1 car 0hafN9Sygek_1 bear 0jL3xw-Gfq8_2 boat 0kyg-HgBo7o_0 boat 0lXT8w6Nvz4_1 car 0loh5Nhb32w_0 bear 0lyjvzKFjn0_1 bear 0lyjvzKFjn0_2 bear 0mIwwe5irHk_0 car 0mSZED2I97w_0 car 0mSZED2I97w_2 car 0mSZED2I97w_1 car 0oHtf7nx8m0_0 car 0oHtf7nx8m0_1 car 0peaciSDgqg_0 boat 0rIli5nmkus_0 car 0sAim6AJwgY_0 car 0sAukk-qZs8_1 car 0sWjMW4aW_Y_0 bear 0sbXLfSaBvk_0 umbrella 0tapt-cyoSY_12 bear 0vC1j_r-gPc_1 boat 0vun54M7U5c_0 umbrella 0wXgXCqnblk_0 umbrella 0wzUHyuc5JE_0 boat 0zKI3bZagm4_2 boat 01aEu9jy-zA_0 car 02AiKGZAu3k_2 bear 02bMGGTZE_M_0 boat 04FPpXq4qHc_0 umbrella 04FPpXq4qHc_5 umbrella 04jEe0lfdos_0 car 04p58ydbAvM_0 car 05VoMpLo7Cc_2 boat 05rSMaVX3yA_1 boat 06kAyBeWx5c_1 umbrella 08Fj_YF5X8Q_2 bear 0-Jhv9dONP4_0 bear 0-zDto8pBU4_0 bear 0_ByJ0bAD70_1 bear 0_P-fui2MeI_0 boat 0_soacANAc8_0 umbrella 0_2dsK8nudw_0 boat 0_2dsK8nudw_1 boat 0_2dsK8nudw_2 boat 1EIBn1zqhJA_0 boat 1Fv0cFr9B_Y_0 bear 1Gd-hUsNAsQ_0 bear 1Gd-hUsNAsQ_5 bear 1HhUsmUQmRY_0 boat 1KnTTBiP4ig_0 umbrella 1LKTvGMlL60_0 bear 1MVBovgEi4s_0 bear 1OvseXyo27E_0 umbrella 1PYMTwN-dl4_0 boat 1REcM5EtrZg_0 boat 1REcM5EtrZg_1 boat 1SQF7Tb6pUA_2 bear 1T4c050qGWo_0 boat 1UGqDCwd0TU_2 bear 1VziogDsYAs_1 bear 1WOfnEUurGM_0 boat 1YelAl0OQQg_0 bear 1anH_WthXTc_0 umbrella 1anH_WthXTc_1 umbrella 1avrrmB_Q5s_3 bear 1cbY1pGpdhM_0 umbrella 1cy1p57Z49c_0 boat 1dmbrwAgFuc_0 bear 1fPDeE9SwYI_6 bear 1gbd0C2wJrI_2 bear 1huEYUsV2ng_0 boat 1iD7yA3Elk4_0 umbrella 1iLq0PGfeCs_1 boat 1irtTU-RM8g_0 boat 1lCEFERcEKg_1 boat 1lSGhF2K_lM_3 bear 1l-NcYZKF8w_0 umbrella 1miy1sfneCI_0 bear 1qIgbCRt2C4_0 bear 1qknV5a5WQA_5 bear 1rt4XRA4RHE_0 bear 1rt4XRA4RHE_3 bear 1v8UDwaLZOk_1 boat 1yym4MiYTrs_0 boat 1yym4MiYTrs_1 boat 1zGry9uSuEs_0 boat 10oedSsXbw0_0 bear 14R96gxvKtU_1 boat 15ImffljXUs_1 umbrella 16BnXZheZE8_0 boat 18XvETJJDqA_0 bear 19ID_DbSclo_1 bear 19vhT11oPv4_0 umbrella 1__PWUxtAJI_0 boat 2Da3689mFHo_0 boat 2DimBSzdfPw_0 boat 2Fo-71zWO5Q_0 bear 2F9aM3isFOg_0 boat 2HDMk0mGW_w_0 umbrella 2IWPUKQEQc0_0 boat 2Irm_qCNQ_g_10 bear 2Irm_qCNQ_g_2 bear 2Irm_qCNQ_g_4 bear 2IyAOD0OkOg_0 bear 2I_k7e8QpWI_1 umbrella 2LWxx48-zmY_0 boat 2OYJuEnLK_w_0 umbrella 2O-9dVZBFm4_0 umbrella 2PL1rgU3jQ4_3 bear 2Pxvoh1PnpM_0 umbrella 2QOthN0H0jo_0 boat 2UBlre798kQ_0 boat 2U7mw3Z_nrI_1 bear 2ZeSJRQEIDg_0 umbrella 2huYkh1UAa8_0 boat 2j5p2kIFnF8_0 boat 2kAmyrOg2is_0 umbrella 2l4-4yNg4uM_0 bear 2l4-4yNg4uM_1 bear 2nWt5S5AcdM_0 bear 2oAbMVTBupI_2 boat 2olUVemt4wc_0 umbrella 2rbAoA6KuZ4_0 boat 2rzjzIvxob0_0 umbrella 2sDjXjM3vuk_4 bear 2sgrwTqPz-Q_1 umbrella 2vC56ILIWK0_1 bear 2w5-fxqKaR0_0 boat 2xzgP87zGDM_0 boat 20nMgEiCqVs_0 bear 223bkVsFvUg_0 umbrella 23-uEh5ygBE_0 boat 24kbYgf2_xM_0 boat 27Yd0qtplBs_0 boat 2_VfwSLic7o_0 boat 3EBKN0vh_8Y_0 umbrella 3EQ8WatEGfM_1 bear 3FBfwZ1vctY_0 boat 3GXWmiQHAA4_0 boat 3Hc48OCKEaQ_0 bear 3ICqGhWY-HU_0 bear 3IOrKwocmOM_0 bear 3KUAz0bb87g_0 umbrella 3KqDceVP3xg_4 boat 3MqGpNqj-fo_2 bear 3M5VwMaIzvc_0 bear 3PN8pPy1PLc_1 bear 3PN8pPy1PLc_4 bear 3PuByhkRjdA_0 bear 3P8-bKeMTDU_0 bear 3P8-bKeMTDU_1 bear 3QQYEFonITE_0 umbrella 3SJI7j-hBwU_0 umbrella 3SbQY-gSjTI_1 bear 3SofVK5wM1k_0 bear 3T5iqGlQLn8_0 bear 3T5iqGlQLn8_4 bear 3UJ24QWw0js_0 bear 3UUo8exclHk_0 umbrella 3VZuzA8i9tI_0 boat 3ZWFSRxFKp8_4 umbrella 3ZwOfZ6mdTE_0 umbrella 3cBiXmqHBLE_0 umbrella 3eH1SNLDT7U_1 boat 3fiWerkBy1s_0 boat 3fm54fM2fh0_1 boat 3kOuqiigfhM_0 umbrella 3khbnSUKCjw_0 umbrella 3khbnSUKCjw_3 umbrella 3khbnSUKCjw_5 umbrella 3khbnSUKCjw_1 umbrella 3leEAIEn6wg_1 bear 3oFuTv4g5QE_0 umbrella 3oFuTv4g5QE_2 umbrella 3ohEBnBnt7o_2 umbrella 3pli8lLuPF0_1 bear 3qGBc-85DMI_1 bear 3q0pJjI8W5o_0 bear 3v6DRHFQTz0_1 umbrella 3yct6bNJF9c_1 boat 3zhjI0Cn1AM_1 bear 3z0lIa162ps_0 bear 31PMTcBL5-o_1 umbrella 31PMTcBL5-o_0 umbrella 32GDx70-6cQ_2 boat 351brnq0Ryk_1 boat 38Tbojzrw80_3 bear 3__l885Wkz4_0 bear 4A-5QKpDBFE_0 bear 4A-5QKpDBFE_1 bear 4BbVz6UbHFY_1 bear 4GTfq2m-SnY_0 bear 4K0agSc78Js_0 umbrella 4K0agSc78Js_1 umbrella 4MUu-MomyB0_1 bear 4N85gqVvlWU_1 boat 4OQGDsYtfSg_0 boat 4QdM0aAdf4g_3 bear 4Qf9iJ-IMDg_0 bear 4R5HjEAW6Y4_0 boat 4ViaowUogyA_1 bear 4ViaowUogyA_3 bear 4VxP7VQ-WtQ_0 bear 4XCmBo2k6Hc_1 boat 4h2kJG8rDAk_1 boat 4h8E8d4P5ms_0 umbrella 4iktvQjNLS8_6 boat 4lyoTIuPa9s_0 umbrella 4rxmIDjvHvo_0 umbrella 4td5npVxACw_0 boat 4td5npVxACw_2 boat 4td5npVxACw_3 boat 4td5npVxACw_1 boat 4u8RQi7_xUQ_1 boat 4zYtj8BG_ZA_0 boat 4z3XNRP4Qvk_0 boat 40Ogw6O8g2M_0 umbrella 42-2FjqvBRw_0 boat 44nxZjEYqLI_0 boat 45HOGdlAVq0_2 umbrella 45HOGdlAVq0_3 umbrella 45HOGdlAVq0_6 umbrella 46Sp7L3iKK4_1 boat 47mMBnGHuOE_7 boat 48IdCSlEHlM_0 umbrella 48pGfV-z-x0_0 boat 5AhKWEjMmUw_0 umbrella 5AzSuHB6_jc_0 umbrella 5Ce6X4i25i4_4 umbrella 5Ce6X4i25i4_0 umbrella 5EaEfiCIEcA_4 umbrella 5EaEfiCIEcA_3 umbrella 5FZykf07mxY_0 umbrella 5FZykf07mxY_1 umbrella 5FviZXBOPWk_0 umbrella 5H6nBOIIziQ_0 umbrella 5IdOF-nnOkU_6 boat 5I2hW9gRRwU_1 boat 5JubFWZKmZc_1 umbrella 5Kf5KxsLCmI_0 boat 5PxBf16_oMg_0 umbrella 5WUSwyO4k7A_0 umbrella 5XWfGTUYLbQ_6 umbrella 5Y3Lrgpl6s8_0 umbrella 5dL3vGF_-ug_0 boat 5e9luwmv6mU_0 umbrella 5g_ugz2HmKM_2 boat 5iYpaHYUElI_0 boat 5iYpaHYUElI_3 boat 5iYpaHYUElI_5 boat 5nMhK15X4R8_2 boat 5rT33oH7aV4_0 boat 5srF-BzF_go_0 umbrella 5suoa4TFYd4_0 umbrella 5vMpwDm27VM_0 boat 5vyqdnOWivc_3 umbrella 52m9SGVaiW8_0 boat 521jpaMoQ58_2 boat 537tF6-uRB4_0 umbrella 561s-m-0mqU_0 umbrella 561s-m-0mqU_2 umbrella 561s-m-0mqU_3 umbrella 582V5-HF4yg_0 boat 582V5-HF4yg_1 boat 597l2xVl9Tc_0 umbrella 6C42Di7bIpE_1 boat 6FG49plD8TQ_0 boat 6FQz5w7HaKg_0 boat 6JGioFiqwww_0 umbrella 6JLdACYt7D4_1 umbrella 6MVLpYA1t8E_1 boat 6MVLpYA1t8E_3 boat 6OEFFwKhAFw_0 boat 6PVjXDW7JlY_1 boat 6Sxb0d7xIys_0 boat 6Ug54vSsrio_0 umbrella 6WP3KFUYTrM_0 boat 6XrW8Yjd16I_0 umbrella 6c0RAJO-AGg_0 umbrella 6inTfRLx_58_0 umbrella 6it-xMMovj4_2 umbrella 6khDUjxTmdo_0 boat 6mvP_NKlIHg_1 umbrella 6qpeBvh9pqs_0 boat 6rowMK5ERz8_2 umbrella 6sN56W9U7tY_2 boat 6tLtEuKyj1E_1 boat 6tQrO26kwOY_0 umbrella 6t0mbpnPPdg_0 umbrella 6t55VfdtMWE_4 boat 6t55VfdtMWE_7 boat 6t55VfdtMWE_8 boat 6t55VfdtMWE_0 boat 6uM7MFSH15g_0 umbrella 6uvJft-l1R0_3 boat 6yCsWwj87QI_0 boat 6zxrdodJut0_0 umbrella 61RreGvIPOk_1 boat 66WmMvvZOxI_0 umbrella 68C7HGRrJ8o_0 umbrella 68kx9VUVhzE_1 umbrella 6-Nh0bY1nUk_0 umbrella 7HD-o1yj47U_0 umbrella 7NXmDbHoJn0_3 umbrella 7NXmDbHoJn0_5 umbrella 7NXmDbHoJn0_6 umbrella 7RcyfoxqADA_0 umbrella 7WKzOMuf3Cg_1 umbrella 7a_nsGmUZNU_0 umbrella 7kSyhlnimb8_0 umbrella 7kaTL52xbiY_0 umbrella 7tlbytb63z4_0 umbrella 7uR1cEVdMDo_0 umbrella 7ydX3wCeOgk_0 umbrella 71k1TftUiYE_0 umbrella 76ljAryU9Bw_0 umbrella 78lA-eJGUn8_0 umbrella 7-ugeb_4vqE_0 umbrella 7_k6DM-PlXg_0 umbrella 8AZtNaOO_8A_1 umbrella 8FhIv4h9D3E_0 umbrella 8FhIv4h9D3E_1 umbrella 8H88MFohrUM_0 umbrella 8SuTrZ6xu2E_0 umbrella 8d_Vt2SWIvg_0 umbrella 8fsRltS2ul4_0 umbrella 8nReKSsSgGE_0 umbrella 8oOer9PS53g_3 umbrella 801xOkfqjkM_0 umbrella 84Ber6V3IrA_0 umbrella 84zKfCKtsDo_0 umbrella 9CGTYEUn-mo_2 umbrella 9JFicuESmEA_0 umbrella 9JiMiflDI68_0 umbrella 9J4O20b9qnY_0 umbrella 9S2mGfudahk_0 umbrella 9UVLb_-RbfA_0 umbrella 9bFrwgSSAkQ_2 umbrella 9bFrwgSSAkQ_4 umbrella 9bFrwgSSAkQ_0 umbrella 98OOq0Wh904_0 umbrella 99uO6qHrhsU_0 umbrella -PaNPkpeFdI_0 umbrella -PaNPkpeFdI_4 umbrella -Z3_Ixwl1YY_0 umbrella -bA7JdKB0LA_0 umbrella -d9Vg5j5vZU_1 umbrella -eJmt-GItyI_0 umbrella -k8FuC01N5E_0 umbrella -0y7A0GDVY8_3 umbrella -0y7A0GDVY8_5 umbrella -0y7A0GDVY8_7 umbrella -3TIfnTSM6c_1 umbrella -3TIfnTSM6c_2 umbrella -98I0B3kkqw_0 umbrella AAVVg5xx0p8_0 person ACB01WGxOSM_0 skateboard ACDc6tGnXXQ_0 elephant ADWNgv6trag_0 person ADznOfGgfj8_0 person AEEVGgiuS5c_0 person AEHbOzlbmOQ_0 dog AEJTsQNMkME_0 bus AFlkSTJ-mF0_0 dog AGRV17_1OS0_1 bus AHsZ4FTQ8Ew_0 truck AIViQtfacts_2 horse AJBtOVA1KSw_0 person AJbQP-rIwCY_0 person AJ9ODXcnhVo_0 person AJ9ODXcnhVo_1 person AKBq0oH8IOM_1 train AKBq0oH8IOM_3 train AL9dFpjFlLM_0 horse AM-TjLTvBSU_5 bear ANA-pgSAzGI_0 horse ANVnK2HmZno_1 airplane ANVnK2HmZno_7 airplane ANeOKwjvX7w_0 dog APP17gURiBU_0 bear APP17gURiBU_1 bear APTYyEYJfOY_0 bird AQD8YBCTSPs_0 umbrella ARaILMtc8fs_1 person ARsokXpl07Y_1 boat ARsokXpl07Y_2 boat ASPK-ZSB9Ts_0 person ASfv8cmreoA_0 person ASfwyHCtnIU_0 person AS5LvQT9rrQ_0 person ATy91FTiYvU_0 person AVF8lCKe6os_2 umbrella AWRcJpWTPwQ_0 person AWtY9Y2mPso_0 motorcycle AWwDsm1WnKE_1 knife AXjDlIFY7ww_0 boat AYAkMpj_MHA_2 bicycle AYAkMpj_MHA_5 bicycle AYAkMpj_MHA_6 bicycle Aax6L0Qqgio_0 bird AcYd7y_-V74_0 person AdY55Q3qVK0_2 elephant AgbIDWiOXQ8_0 person AgsYgmA19z4_0 person AhWU-QUzOOA_0 person AiqGEAjF6QI_0 train Aiu6EH4a8v8_0 train Aiu6EH4a8v8_1 train Aiu6EH4a8v8_6 train AixV6QSGqto_5 bird AixV6QSGqto_6 bird Ajj7WZLukdw_0 motorcycle AjpbAriY8rU_0 person Alab3dEYXM0_0 person AoAoH9yb6zY_11 bear AoAoH9yb6zY_6 bear Ao7Sa2afCb4_0 person ApDgLQUsEqc_0 bicycle ApakHefqWv0_2 airplane AqIG0zk2bpg_0 person AqTXLh7DtcM_0 person AqTXLh7DtcM_1 person AqdoD9jkBFc_0 horse Aqj7VnXQt4s_0 cow Aq4dBqb2SbQ_0 person ArgYRdhvlc0_0 skateboard AsPXe7qUyuI_0 person AuLrPQqrKV4_0 motorcycle AuY8vITQrsE_0 cow AvBm7iHiDdI_2 boat AvSgTHXgSXQ_0 cow AwVdVzh1Eh0_0 person AwvDMOeS7no_0 person Awzt30r0OLQ_1 bus Aw2t3AalW4s_4 elephant Ayh_2ithjCE_0 cow Ayh_2ithjCE_1 cow Ayh_2ithjCE_2 cow AylQiap7dj4_2 bear AylQiap7dj4_3 bear Ay9QToaaTGc_1 truck Ay_a2OkcdEk_0 person AzVvPUazPYk_0 motorcycle AzzlFx32dQs_1 boat A1RSx6j_ra0_9 elephant A1RSx6j_ra0_4 elephant A1RSx6j_ra0_6 elephant A27YZAfJmrc_0 knife A27YZAfJmrc_1 knife A3E72P24pf8_0 person A3cgW1rDOcI_0 person A32Fi06yKpU_0 horse A5U6AHe9_4A_0 train A5pUgLCQq9k_0 elephant A5pUgLCQq9k_2 elephant A5pUgLCQq9k_3 elephant A63BoLTUNAM_0 horse ZBzVnA8zj6Y_0 person ZB45YyN1WUM_0 bus ZFYGhJKiw5w_1 giraffe ZGfOCwbu-PY_0 person ZHTMfW1eaW0_0 cat ZHURcze8rOI_0 person ZIJUWQKzzsQ_0 person ZJgwacILoAw_0 person ZMgP2kxv5E8_1 person ZM3wX5zgKOA_0 person ZNXnJahaXIY_0 person ZOc4wfLX2Jo_0 cow ZOnuSLp6asQ_0 train ZPQNucbAjBM_0 cow ZQITHWk17a0_0 bicycle ZQxmb_nVoH4_1 cow ZRUXj8o10Po_0 person ZSnP5B6NiI8_0 train ZTqDuCZVTmM_1 airplane ZTqDuCZVTmM_5 airplane ZU3AYv2eU74_0 motorcycle ZU4XQbNaYQc_0 knife ZVZWEWzZg50_1 bird ZVjep3tDJjU_0 person ZWL6CshdsuY_1 cow ZWogXn8xs7E_0 motorcycle ZXU4Uua3l0E_0 car ZYOUZjfZMhk_0 cow ZYS0h2pAK6M_0 horse ZYm5iVw0YdE_0 truck ZY8pG-I5Ax8_1 bicycle ZZBBcTBPmis_0 person ZZpckGIvGTI_1 boat Zana4yKDGxY_3 skateboard Zana4yKDGxY_1 skateboard ZbnxzLt8FJk_1 dog ZbnxzLt8FJk_0 dog ZcXtrHkjobw_0 person ZelRUJyMMkw_0 person ZeqhN6ndscE_0 person Ze8cOn59rW4_0 person Ze8cOn59rW4_1 person Zj1TAkYHlQo_0 person Zj7GzCIi_9c_0 person ZlEiOICCDdc_0 person ZlH8Hd961FM_1 knife Zl30Oy50PfQ_0 person ZmXKvpkfHZA_0 train ZmdvunyqJB8_0 bus ZqTkqkEbXEk_0 cow ZrPn3BODZJM_1 person ZrPn3BODZJM_0 person ZuBD3A8Vecs_0 bird ZuEbZKmjxaA_0 train ZuEbZKmjxaA_1 train Zu7udgxuUkk_5 airplane Zu7udgxuUkk_6 airplane Zu7udgxuUkk_1 airplane Zu7udgxuUkk_2 airplane Zu7udgxuUkk_3 airplane ZvadVS1LnQU_0 bus ZvadVS1LnQU_1 bus ZvadVS1LnQU_2 bus ZwLvs9JUsFY_0 person Zw4-vF-vOMk_0 person ZxO4Gd5fhOg_1 train ZxO4Gd5fhOg_2 train ZxX6DBopv30_0 skateboard ZyEA24Ud3EM_0 person ZyM24-ekpz8_0 person ZzBvzlzuw4M_0 person Z03ZC9qmwDc_0 zebra Z1N0xBj_H3E_0 bird Z1ns6XidhT8_0 elephant Z2S6XnfE5vI_0 person Z2kb4LiQJUU_0 train Z2zB-gtDgOM_1 elephant Z22DSYtblFo_0 bicycle Z5rHikLjARg_0 person Z6XKceRI1bE_0 bus Z6XKceRI1bE_3 bus Z6XKceRI1bE_6 bus Z6XKceRI1bE_10 bus Z6qQE2_jsIM_0 skateboard Z68yTt3upjk_0 motorcycle Z8SxFPbnptI_0 person Z8pujku9bPw_0 person Z9vZk0io0fw_0 truck Z9vZk0io0fw_1 truck Z-R7-Ww03t8_0 knife Z_kKBbIzdXM_0 person Z_pwMCnOdk4_0 knife Z_pwMCnOdk4_3 knife Z_0227AsAvk_0 bus A_a1H0EO64s_0 person A_a1H0EO64s_1 person A_pc9ov1cT4_0 person A_weMKVolQM_3 bear BBC4Jmlky4Y_0 horse BBHBoewIXhw_1 umbrella BBHBoewIXhw_3 umbrella BBHBoewIXhw_4 umbrella BCKR989ZYyM_0 car BCKR989ZYyM_2 car BCpaJ-tEv-0_0 car BFP7MT8RM8U_0 elephant BF7cTjrTSwY_0 cow BF8d91cJS3o_0 person BGcAVF0Zi_o_0 person BGzetX8Dz-M_0 cow BHurVVjld8Y_0 person BIUeggZa3SU_2 person BIUeggZa3SU_0 person BIUeggZa3SU_1 person BIfedkd3HEg_0 boat BJaAlMv6b_U_1 motorcycle BKKSiAed9CI_0 horse BKtAnbXVk1E_0 person BLCEb_seyUs_0 airplane BLCEb_seyUs_1 airplane BL8o-tdhlxs_2 train BL8o-tdhlxs_3 train BMhmY9_ltFc_0 person BO7KZKb9bkQ_0 cow BQRwIXopDJw_0 person BQRwIXopDJw_1 person BQswg--xiy8_1 horse BRd8dUMN0a4_0 knife BRmtavy2ZEo_0 person BR0NNg6gLLo_0 person BSo8wjoZ7zc_0 skateboard BTSUQrxC6l4_1 bus BUHULgt_7DA_2 elephant BU3iU3zJnDI_0 person BU8sEPifL08_0 person BVTVHHm7vkA_0 boat BWNTXqGixw8_0 bird BZUE0vDhMvk_1 knife Bb2fkGYxp2E_0 person BckXjb2o93U_0 person BdHNtn10UKE_1 horse BeXziIDAJDc_0 person BgHV_87CxNI_0 umbrella BgXr-bSqMIo_0 train BhO0SwB8Ee4_0 person Bh4m74dLZaM_0 person BlYWgnhwvkM_0 elephant BlYWgnhwvkM_2 elephant BmZNFBFj-ws_0 person Bm2yaWXwgjY_0 knife BpXhq5Awd3U_0 dog BrC6VbCzRGc_1 knife BrHslMc3UMQ_0 truck BscLJpi3AJc_0 person Bv8WeZ_zrJc_2 bear BzEC1EEC2ts_0 person BzXWK-LODVo_0 person BzbzymdK_TM_0 person Bz6Od4GfW6A_0 truck B0DRHTdmeK4_0 knife B31JkzyQDkg_0 bear B5GVudI81dM_0 dog B6nArbkcRek_0 motorcycle B6sR2aqScR4_1 bus B7IP-2uNuWs_0 skateboard B7yxjI6dz4s_0 motorcycle B8iZGZlQcsg_0 person B8opNd6uzmY_1 person B9GQwzI2Eqk_0 dog B92X9Xn1P2s_0 person B-CJ8miJKPs_2 cow B-n15EytPtQ_0 person B_WnXKd-oZk_0 person CADW3z8x4AU_0 skateboard CADyh6laNA0_0 motorcycle CA3wWkrNnRs_0 person CBSNFKeTnpA_0 bird CCyZAt2Js0U_0 car CE-LfFDfGKQ_0 person CE-LfFDfGKQ_1 person CFN40hxKxM8_1 airplane CFPhXPCobFg_0 person CGg2FXjvvOA_0 person CH3phgDW5Fc_0 person CINfsd8LiOU_3 horse CINfsd8LiOU_0 horse CINfsd8LiOU_2 horse CIqkbJoJhBI_0 train CKmnpW6gboU_1 boat CKmnpW6gboU_0 boat CLtQxCqTzcY_1 knife CMgYFnnxQUU_0 horse COcbSVCp4ig_0 bicycle COcbSVCp4ig_3 bicycle COcbSVCp4ig_4 bicycle COcbSVCp4ig_5 bicycle CRF7PcgB2yQ_2 bus CSnhpel7FTA_0 person CSriNtLepLs_1 skateboard CVmBocpXeTc_0 bus CWCfCeYh2bA_1 train CWvjAYt5eR4_0 bus CW9n8Gahfgg_0 cow CXT98GHNtRU_0 person CZ-Sh-SXaRQ_0 person Can5eao1S3Y_0 bus CbB-71R_n9M_1 motorcycle CbpAv8c2Vsg_2 car CbpAv8c2Vsg_3 car Cb3iufTFMEU_0 person Cc2vs8vuPmU_1 bird Cc8E7aTdEVM_0 person Cdain96L-q0_0 bus Cd7g3ZoA5tQ_0 bus CeN22koBQRM_0 person Ce2jOHHBDLk_0 motorcycle Ce7IPtXkNcs_0 person CfqkbrB0Yy8_0 person Cf2jOSj7eRg_2 train CjbhKc3Vjpo_0 person CkEVvGqgVkQ_1 knife Cl13SbLP0hE_2 horse Cl13SbLP0hE_3 horse Cl13SbLP0hE_0 horse Cl13SbLP0hE_1 horse Cl-lB_jS8Wg_1 bear CnMMdc6syXM_2 umbrella Coxzc_S3ID0_1 knife CpLMLRdeJJ0_0 train CpN-qOO6Qm4_2 airplane CpyK9j001RY_0 person CqNEwP8PwS4_0 bear CqNEwP8PwS4_1 bear CqYiAanNpo4_0 person Cqbu8vOsszI_0 cat Cr5p4NYIR44_0 person CttKQip6B2E_0 person CuGu45Z4lt8_0 knife CvszgVrLsgA_0 person CwYG2Hf6-NY_1 cow CwvR1fjMeSU_1 horse CyuollntwZ8_0 dog C1dCZ9W6WIM_0 person C2x3rdWMAyg_0 dog C3lwMd_rlG0_0 person C5MrhYouFTc_0 cow C5SKibJTnR4_0 cat C6dANICzCcg_0 person C6xJeHO8XSE_0 person C7NXymSnEFw_0 bird C8ExRKjU1vY_0 truck C8V2-wEjv5A_1 cow C8sUABBP0Jc_1 bicycle C8sUABBP0Jc_2 bicycle C80bmA0XrjM_0 person C886JwUWvxw_0 skateboard C-Tal1XUc8o_2 person C-zp91eJqtk_3 bird DApDao4fUqQ_3 horse DApDao4fUqQ_1 horse DApauH43Ivo_0 bicycle DBArY7gHuoY_0 cow DBsBTVJNxS8_0 dog DBsBTVJNxS8_1 dog aCNvyXSuG6w_0 person aCVmJCtuPeg_0 bird aCVmJCtuPeg_1 bird aDMk7CwLIxM_0 train aERiDkn_gkY_1 elephant aEwD6TC8S4w_1 bicycle aFEOvm-1KvA_0 horse aHM4Dj-2y8o_0 airplane aI0y0wY4LQw_1 person aI0y0wY4LQw_2 person aJAd-MiEsfk_1 person aJWETVChAE8_0 person aJoKSWtqs0g_0 truck aLYtaO_J2_U_0 person aLbjxTwAV7o_0 person aMDD0PenhaM_0 cow aMgj1BUBexw_0 person aNgAUBTbUUM_0 person aNmgrcJxdw8_0 motorcycle aN2a-rDAYDQ_0 dog aN2a-rDAYDQ_1 dog aOhumbyx05c_0 cat aQcTwMVs1Zk_0 skateboard aQcTwMVs1Zk_1 skateboard aQx68fklEXA_1 dog aSGod2MJ5ww_1 horse aSq5ZqH_K7E_0 truck aTAXvSNkuvc_0 bus aUFxg301s68_1 skateboard aUsTtvWAzAc_0 person aV8S5HLSI_o_0 person aWHaR4ExDpk_0 truck aWIZBHwtII8_0 motorcycle aWgH9T2sGkE_0 boat aWmC8Tbgy9A_0 train aXa5YE_AmKg_0 person aYAuay_bTaw_0 cat aYVEZrX4mE0_2 bear aZRYQJd-5CQ_0 train aZRYQJd-5CQ_4 train aZRYQJd-5CQ_3 train aZRYQJd-5CQ_6 train aaZxOcHxPec_0 person ab_RTkwBG_4_0 person acy4aJnh9SU_0 person ac68trlkEnw_1 horse adsmRxlAJo4_0 dog afE4YqgaPlw_0 skateboard afU2vHgUvaw_7 train afU2vHgUvaw_2 train afU2vHgUvaw_3 train afkiqhwTeRQ_0 person aiOHs3hApm0_0 skateboard aiOHs3hApm0_1 skateboard aij190b9wtM_4 bear akWe9oXeKzA_0 person ak1XT_Nl7VU_0 airplane ak4CfFF9Bpk_0 person albeyJBtKD8_0 person alp0ImrbacI_0 dog al12VKid_P8_0 person amyr6d2Ns6M_0 horse amyr6d2Ns6M_4 horse amyr6d2Ns6M_6 horse ao9LHpxNCqY_0 horse apLT3-LKJgE_1 truck apXNcHROKyY_0 horse aqp_quyEngw_0 airplane aspR9ca28CY_0 person as3DGRDezaA_0 person atElNgnFvlk_0 person at-Ex-CnRX4_0 airplane at-Ex-CnRX4_1 airplane au_kgqsZlMU_0 truck avRC7M3_kuA_0 bird awnORAEMUIg_0 person aytqFnOdBLA_0 person azLbVm88Dzc_3 airplane azLbVm88Dzc_2 airplane azXlb1cxVGQ_1 elephant a1qoB1eERn0_0 person a2-lZhKXx9E_0 truck a3In51YCqMg_0 dog a3T8T1R2wAc_0 bear a45XOJQaDQI_0 person a5dffDLeZsI_0 airplane a7hjIfPGJqI_0 cat a74_tj_B-YA_2 knife a74_tj_B-YA_1 knife a8v0k4Bz_QA_0 person a9jgDU5THOU_0 person a97S4U5ezQw_0 truck a97S4U5ezQw_1 truck a-M2_3j67qI_4 knife a-M2_3j67qI_5 knife a-M2_3j67qI_6 knife a-NeSgN26Zo_0 bicycle bAKQZ0F7LFw_0 person bA10PjxgV3w_1 elephant bBPKh_BPJ50_4 bear bBPKh_BPJ50_1 bear bBW4swLrEHE_0 person bB6tIraYEaI_0 skateboard bCDw1dn7M1Y_0 car bCDw1dn7M1Y_1 car bCWM39xLsYs_0 skateboard bDFkztSgMko_0 skateboard bD6xZhJfhMU_0 truck bFnzGS_doNQ_0 person bGFRHhc7zUI_1 person bGZtGWULlF0_0 skateboard bGZtGWULlF0_1 skateboard bIOpYFVLesY_0 person bJviDDrUSwA_0 motorcycle bKB6ESqkOic_1 truck bKRAinEnagU_1 motorcycle bKRAinEnagU_0 motorcycle bNXcPzWMXsw_0 car bN43crdYDJE_2 bus bOL9YHt5u-o_0 skateboard bOL9YHt5u-o_1 skateboard bOofbwD246U_0 person bPKew4jsGkE_0 truck bPRVRL4x5T0_0 truck bQkneVc9gaA_0 airplane bQ64JFsWSf0_0 bicycle bRWbXGRwlVY_0 person bS1Z1k6laqY_0 person bUqFsPoDKBE_0 train bVP58EONEm4_0 cow bW4nHswGFPo_0 motorcycle bW5IvSesbV0_0 elephant bXR-iz0NfrA_0 cat bZDsNeqNn9I_0 car bZDsNeqNn9I_2 car bZDsNeqNn9I_3 car bZDsNeqNn9I_5 car bZIU-ajwk6Q_0 bicycle bZIU-ajwk6Q_1 bicycle bZ6Tq0KWSsU_0 truck bZ6Tq0KWSsU_2 truck banaB07Fu9c_0 bear bcKUeyEaRPw_6 bicycle bdhq0SKEqe4_0 person bd3b9R30l-E_0 person beDuTpy1tg4_2 horse beDuTpy1tg4_0 horse beLkXAaP78Y_0 train be30TAE-gq4_0 person bfQSyBsTmE4_0 umbrella bgSSzKax51E_1 motorcycle bgSSzKax51E_0 motorcycle bhoUxK8FSqc_0 person bhuPA9toCGY_0 person biIFNnX2Nl4_0 skateboard biu2ssO3dRg_0 bus bjRPge2oFgU_0 knife bjV04dzuqhk_1 elephant bjdIG6B5zn0_0 person bjdIG6B5zn0_1 person blPLp16K1XY_2 bicycle bmJ_QDIRS2U_1 train bmJ_QDIRS2U_2 train bmJ_QDIRS2U_3 train bmLsrJHQQ14_4 knife bnBORorLvmk_0 person bnBORorLvmk_1 person bnVGsydNrg8_0 airplane bnVGsydNrg8_1 airplane bnZbj1dD0qs_0 umbrella bn0I2aJB5Ps_0 horse boMU1mjUSDw_0 skateboard bo8M-OTk4J0_0 person bpw3BCxYYU4_0 horse bqoDChNwIYY_0 umbrella brJqQ_iH2VE_0 person brMVhyEZLfo_0 person bs5AY2jipno_0 train btL-vruELoA_0 person btq7gMuqMuo_1 person btq7gMuqMuo_0 person bvEJDHpRNoI_0 elephant bvVfFv57gN4_0 bus bvVfFv57gN4_4 bus bwhPTEvGmIo_0 person bydgNyGwoys_0 person bziUK-7O0lY_0 dog b0Z6qKhuldo_0 skateboard b0sKQDUFTos_0 person b1s-jYD36GQ_0 person b4Wua_98Y9U_0 person b4d_9Yc0MwY_0 bicycle b4qC2fctnLU_0 horse b4zSrjPtOfs_0 bicycle b5CJtpeG1Lc_0 train b5CJtpeG1Lc_2 train b5CJtpeG1Lc_1 train b5mOcLykYeQ_0 cow b9VOmo_86Ds_1 person b_W4BWH1i_A_1 person b_W4BWH1i_A_0 person cBxo9bPINJc_0 skateboard cCEImigNo38_1 train cDHZtfsI_gM_0 train cDHZtfsI_gM_1 train cDmkhESohro_0 boat cEcTernKOqU_0 person cEcTernKOqU_1 person cGJLuwZIG5s_0 giraffe cGJLuwZIG5s_1 giraffe cGJLuwZIG5s_2 giraffe cGwjfCPO-7k_0 car cH0sXpOxvy0_2 bird cH9u1pCWp2U_0 person cH_SL9CR8y4_3 dog cIxdxFkZ7y8_0 dog cIxdxFkZ7y8_1 dog cJvh4GqZn-s_0 person cKQQVTnOzBk_0 horse cLULEYFoBPc_2 cow cMdjRuUhBIs_0 motorcycle cMdjRuUhBIs_1 motorcycle cMwa9cC304w_0 cow cMwa9cC304w_1 cow cNDYJRBsIOY_0 dog cPlqWSd2TUc_0 person cP-p4R-JZxY_1 bird cRBw9lx-EKA_1 bus cR2-4m174EM_0 bird cR-AWpc5zTs_0 person cTujx-TutbA_1 horse cUrajeQPzpQ_0 umbrella cUrf-ZwPzxI_0 person cUwPVOboe0k_0 person cVng1vleWNY_0 person cVrxfV0w29w_0 person cXZ7JY7YQmE_3 bird cYdqN1oPRdY_0 person cagT3K3Ep3s_0 skateboard cagT3K3Ep3s_1 skateboard ca8rEbHYMXg_0 cow ca-ko46j2fQ_6 airplane cbL66gVAa5Y_0 cow cctYyTO8OtU_0 person cc3mBIHi-GU_0 elephant cdNz1OLa1tU_0 car cf_U0G5W8BI_0 person cggX7PRYUh0_0 person cg_5uaJjLHk_0 person ch_23jXJ_vA_2 dog ciCfkv5831Y_0 airplane cih9W0SPGYA_0 bird ciwNB-l9a88_0 person cjHlHkhg0z0_0 person ckFwzL1Ot94_0 truck ckV9ay1lm7A_0 airplane clZo-o5v1EA_0 elephant clvCQPta7y0_2 bird clvCQPta7y0_0 bird clvCQPta7y0_1 bird cmTPsZ9x3PE_0 cat cmW0Y4KGI7g_0 giraffe cnhhgh_z5NU_0 cow cnqT4u0k3sM_0 umbrella cpK8K6JD_GM_0 airplane cpK8K6JD_GM_2 airplane cprvb4cW5x4_0 motorcycle cqd8PRxMakA_0 truck cqvjKRFEi8M_1 car crys7VEeUgU_0 person cskBHjsDXEs_0 cow cso6B_84BFA_0 horse ctm9x2MaZuk_0 cat cxu1qpzXobY_1 bird cxu1qpzXobY_12 bird cxu1qpzXobY_0 bird cxu1qpzXobY_2 bird cxu1qpzXobY_4 bird cxu1qpzXobY_5 bird cxu1qpzXobY_6 bird cxu1qpzXobY_7 bird cxu1qpzXobY_8 bird cxu1qpzXobY_9 bird cxu1qpzXobY_10 bird cxu1qpzXobY_11 bird czO8IPcAO1A_0 person c1FBptbYp3I_0 person c1FBptbYp3I_1 horse c2T3VDriTaY_0 knife c39xfJcSlxk_0 dog c4kbPHdCIE8_1 elephant c43mnrjx2MU_0 bus c5fPKbV5cAM_0 person c53j9l_w3Cg_3 dog c7gnf6G7Jpw_0 skateboard c7oqQy2Fvlw_0 truck c8JhzKh1i7s_0 person c8JhzKh1i7s_1 person c8gBv0b5g9w_1 elephant c8iU4McayiU_0 person c8iU4McayiU_1 horse c8u5Y95o7jE_0 skateboard c84BjBiic4s_0 motorcycle c93WuBjZeRk_0 person c-nMPinePds_0 cat c_aupqZy-14_0 airplane c_o91IPAB-c_0 umbrella dAHCPltzogA_0 bird dAP6fuArseQ_5 elephant dAtQR4dHPgE_0 person dA0WQ_RubaI_0 truck dBzXNQJRzls_0 cat dCJFMDQBPb4_0 boat dEIuy8LjAxc_0 car dElaQ10vYqg_1 motorcycle dHMFcv4UnmU_1 bus dIP3FoGUXDQ_0 person dJYqTnxujb0_0 person dJnLznNE29w_0 train dJnLznNE29w_1 train dJ9qJezt6do_0 car dJ9qJezt6do_1 car dKmrUcJ9rJY_0 person dKmrUcJ9rJY_1 person dK3_HiQMH4o_0 dog dMFsGGvkSVU_7 airplane dMFsGGvkSVU_0 airplane dMFsGGvkSVU_3 airplane dMFsGGvkSVU_5 airplane dMFsGGvkSVU_6 airplane dNByeKh4gnA_0 person dNJ0q9QKzmY_0 boat dNQYo7REyBU_0 person dOkb5WhLZGU_0 person dO0uu_fVUVI_0 car dO0uu_fVUVI_1 car dO4Jxsf987s_0 bus dO-OrWse3dA_0 car dPCSntP-29E_0 person dPCSntP-29E_1 person dP7je2qU_QA_0 dog dQIlnQxMIKo_0 train dQIlnQxMIKo_4 train dQIlnQxMIKo_5 train dSAlTJeDlfQ_0 person dTvJyUKKshw_1 person dTzaYePj1gY_1 cow dT5gXQAE-Qk_0 train dT5gXQAE-Qk_2 train dT5gXQAE-Qk_3 train dUpoYuxpKPM_0 person dVTCCi__Z4Y_1 person dVte44AGoEE_0 knife dW4RjdpTaJo_0 person dXYYgzjwm8w_0 person dXf-d5rkqdA_0 horse dZv4xXpV6js_0 boat daeBFAZFQhU_0 person dbXKW9_L9sE_0 bird dbwBzQuj1uA_0 person dc5oaWIkfwg_0 cat dc-iaCwezlU_0 train deO0aj59T8o_0 person dfU8DcWDX8U_0 horse dfU8DcWDX8U_4 horse dgcW3TkPLmk_0 boat dilCe3bivVk_0 bus di59PG3l25w_0 bicycle di59PG3l25w_1 bicycle djsh1r_W6ko_0 person djt1lzJn7ak_2 bird dlYwqfTRqoo_0 person dl-bg8WPGZs_0 person dmk3Cedj6g0_0 person dn006hdarCg_5 elephant dn006hdarCg_4 elephant dn006hdarCg_6 elephant dn006hdarCg_7 elephant dn006hdarCg_10 elephant dn7iBi1t7UI_0 cow dn83BrM71W4_1 boat doOsOyiHItw_0 person dpqVH2tgA3E_0 person dqlk6F07Cxw_0 motorcycle drohCN_vwC8_0 motorcycle ds7JGeImFXo_0 horse dtsLwaO2des_0 train dt5TzAZByk0_0 person duROYI-AZlk_0 person duROYI-AZlk_1 person dutryxrzRjE_0 umbrella dvDxOc2VWhc_0 person dvP5Dsp8EZA_2 dog dvTIkEA7rOc_0 person dvvoKcQ5OOQ_3 bear dvx9-0cVEYc_0 person dwQuyR9XFVM_0 skateboard dxcnKYynkEY_1 cow dxmxpyj3WVk_0 knife dxmxpyj3WVk_3 knife dyUVa3ZQVFg_0 horse dzitRPrX410_0 cow dzpcdtcQLfY_0 motorcycle DEnqBEwPykc_0 person DFCqlvY5OFY_1 bus DFXptvzN9V8_3 umbrella DFqSvoSh-qA_0 cat DHEtea1hPBc_0 person DHwUCu0rrvc_0 boat DJ_neeMWAuw_2 dog DLsYDXqthiY_0 skateboard DMBbH5HyOME_0 person DMn3ruRAObI_0 person DMyjVWCLbes_0 person DM6e1vEjYeM_0 bicycle DM6e1vEjYeM_6 bicycle DND0C3XD7mQ_0 horse DOQilAKERwk_0 umbrella DOmE1dA6CoQ_0 person DQJ4cPhVhFg_0 airplane DT895n1nqqY_5 bicycle DT895n1nqqY_4 bicycle DUO7S4ma320_1 cow DUO7S4ma320_0 cow DU9GDCN25lI_0 person DV4bDUzPAIU_0 train DWxidp6TWlg_0 airplane DXhV8uXKo7w_0 cow DXxF81ZJ_Jo_0 cow DX1_rKFVugE_0 dog DYBLqnRCo7g_0 cat DZ2-5rYAUVk_0 train DasqUqgdRv0_0 dog DbNVb8C-Au8_0 person DbcdvAsVI48_0 person DcZSisTgSJs_0 airplane Dc9pWTcUNXY_5 bear DeVQ3mr19Sw_2 skateboard DeYmal3wAoE_2 dog DeYmal3wAoE_0 dog DfOuxNA9lro_1 giraffe DfXOTMc9IyM_1 dog DfbPDcLTZEo_0 airplane Df89T9IxDvc_0 person Df93ocrYlyY_0 person DgBuwqAbIkI_0 skateboard DgBuwqAbIkI_1 skateboard DhA0S7lPFVw_9 elephant DhA0S7lPFVw_0 elephant DhA0S7lPFVw_1 elephant DhA0S7lPFVw_2 elephant DhA0S7lPFVw_4 elephant DhA0S7lPFVw_5 elephant DhA0S7lPFVw_6 elephant DhA0S7lPFVw_7 elephant DhA0S7lPFVw_8 elephant DhEO4MuDBOc_0 dog DhJAQCycHJs_0 elephant DhU-e-L13WM_0 person DhU-e-L13WM_1 person DhU-e-L13WM_2 person DiLGyNCykDE_0 skateboard DjQx_qEnXko_0 airplane DkMltyvC5l4_0 person DmPTbBo32qI_0 bear DmzlB4KBLN4_0 bird Dm-XQKFA-BQ_0 truck Dni4lPw5oH0_0 person DnzZd_9JlAA_0 cat DoB18AvtSxQ_0 train DofzMEokur0_0 person DonLBf92rMc_0 dog Dpp4k_BzZY8_1 airplane DqcEAexhJ10_0 car Dr6LfvQ_qKo_0 car Ds_4eRyQDPo_2 boat DuLk58XzeyA_0 train Duv1XrdytdE_0 cow Du4jlCLKZds_0 person DvjMMfcCq3U_0 person DvuTkGshMjA_2 cow Dvx0WVMuXVw_3 boat Dw4--8weqIA_0 person Dx0LbiFgvPI_0 truck DyY1MPuGf5w_3 dog DzUJVl_Pej0_0 person DzV-LWU5GoY_0 person D0b7xYmwl-M_0 skateboard D0fhKhpAhJM_0 zebra D0jRA5TKT-o_0 person D1vTDW7YDTk_0 person D2hRnCm0JtM_0 person D2oV8BC0iq8_0 person D21mLV716vI_0 person D32GncZb51Y_3 truck D4Jcg1u1Z-o_0 person D5maMxzZBe0_0 person D5m40zCfU8E_0 person D6E0xgBBquU_0 person D68oMT6tpc4_0 person D7H1UQbgDOw_0 cow D9RGgV3fKds_0 bird D_a5TQmLY-Y_1 person EBJ5jExrVqY_0 cow EBLJ9v0QSrU_0 car EBUmagxsoV8_0 person EC8ftAGy2qA_2 skateboard EDBDHaRqToc_0 dog EEZKnzcn-v0_0 cat EEfiTwozdM0_0 cow EExHYyuWa-o_6 bird EExHYyuWa-o_2 bird EExHYyuWa-o_5 bird EFRywDKULxc_1 train EIl3WAxkNwc_0 train EJJXpIiBEuw_0 cow EJrj49l1N8k_0 airplane ELPjTNVxWfM_0 person EL-2TiSSQJg_0 bear ENPh0zyq2wo_0 motorcycle EOAADsR4IpM_0 cow EP3xfG5_2i8_0 cow EQN5hODdb6o_0 skateboard EQ09ewMQn8Q_2 bird EQ09ewMQn8Q_0 bird EQ09ewMQn8Q_1 bird EQ9vXT_IFYQ_7 bird EQ9vXT_IFYQ_3 bird ESxRPsxVX-U_0 car ETxRky6I39w_0 person EVD8F2ZOBbI_0 elephant EVYb5simSY0_0 umbrella EWOehvvAvqU_0 person EXK2mcPIoBI_3 skateboard EXK2mcPIoBI_0 skateboard EXK2mcPIoBI_1 skateboard EXK2mcPIoBI_2 skateboard EXeKX_vOTvc_1 car Ed-cfsA3BsU_0 horse EeQOKiPASgY_0 person EfAYg1FMY-4_0 bear EfAYg1FMY-4_5 bear EfAYg1FMY-4_4 bear EfSd4ucOXKs_0 truck EfbKwoMA6Kk_3 horse EgpujPNldhs_0 train EhQXwVQsngU_0 boat Ej0A86Eu1p8_0 person ElHgkP_L8Eg_0 airplane ElTbW5itOAs_0 car ElTbW5itOAs_3 car ElTbW5itOAs_4 car ElTbW5itOAs_7 car EmvEUer4CVc_0 umbrella EnIkH0jrzaI_0 skateboard En6a3Ed7fvk_0 person Eo5s8ykuzbU_0 person EpBZ77zmngM_0 horse EpPw2JoHiTQ_0 person EqPK8xdf8hQ_0 person EqdBE21XAks_2 umbrella EqdBE21XAks_3 umbrella EqdBE21XAks_4 umbrella Eqz3xG4mWTs_0 person ErN8-oTPkq0_1 person Er-RnWQrUac_0 cat EsvPqOf-zEA_0 person EtIj5IUtn-g_0 airplane EtIj5IUtn-g_1 airplane EtIj5IUtn-g_2 airplane EtMlgBveP58_0 dog EtMlgBveP58_1 dog EtkDITl8mEM_0 person EwlCKB77dYo_4 elephant EwlCKB77dYo_2 elephant EwlCKB77dYo_3 elephant EwqkMKutzBE_1 knife Ew-67eGgZAI_1 motorcycle ExRpjMcFoBY_0 dog EzRrohN-4ss_0 skateboard EzZW0lM284U_0 skateboard E2DbbyoqLg0_0 person E2DxfZPPu5Y_0 horse E2DxfZPPu5Y_1 horse E2DxfZPPu5Y_2 horse E5erp1mhTzk_2 bear E7CsRpWElOo_0 horse E76rAl8oksk_0 dog E9ARkaJcz2M_0 person E9J03vUxTZQ_0 truck E9w2-Y4d3MM_2 truck E9w2-Y4d3MM_0 truck E-ea5keAG3Y_0 person E-jpkZw_MdU_0 motorcycle E_cxlc0vrMg_0 horse FBA18EyY2eI_2 boat FBQpWJPC5pQ_0 person FBQpWJPC5pQ_1 person FBo954IqOlo_1 bicycle FBo954IqOlo_5 bicycle FBo954IqOlo_0 bicycle FBo954IqOlo_2 bicycle FBo954IqOlo_3 bicycle FCICeCD4dKc_0 person FCypWBdHWb8_0 elephant FDKvBZH5LZE_0 horse FD89Oq7BclA_0 skateboard FETKMmV7P70_0 motorcycle FETKMmV7P70_1 motorcycle FEbVjS5-4ps_0 person FEsMY2y49d0_0 person FFuW_UWBVpU_0 train FHRrYqTZExQ_0 person FID77dKUAU8_0 cat FITKtv4tf7w_0 cow FIi2mEV5dfQ_0 skateboard FIi2mEV5dfQ_1 skateboard FIvujc5oqIY_0 train FJDKoEDLbNc_0 airplane FLsLXPchOx0_0 knife FMV_-mdKV8U_0 horse FNNrfAuIQmo_1 horse FNpd4DJ9LBA_0 horse FPrcQJh9INg_0 person FQMXzPIoL14_2 bird FQ-_p0lM-FM_1 elephant FRxSISi7wV4_0 bicycle FSFW4QxV8-0_1 truck FUlVrltDAOk_0 bird FWNxjmydNdU_0 person FYVNE1zYmyA_0 person FZrXRU5CxC8_0 boat FaG9RreeG6M_6 bicycle FaG9RreeG6M_2 bicycle FbF-nKQx0WI_0 person FcP50mFdaYM_0 train FdPApnQkBVQ_0 bird FdPApnQkBVQ_1 bird FdlDAmvsrR0_0 horse Fd1uYmMhzPE_0 horse FedOlGadIYU_0 bird Fgd7fHxPhBs_0 truck FhQLl40AANQ_0 bicycle FhvdS8wJkrI_5 bicycle FhvdS8wJkrI_1 bicycle FhvdS8wJkrI_2 bicycle FhvdS8wJkrI_3 bicycle FiCIZpT08B0_0 cow FiD6UZuDr1M_0 person FjFwrTEJK1U_0 person FjmcQfLBpvQ_0 person FkSfwpb1Gss_0 person Fkhru_XyPSU_4 bicycle Fkhru_XyPSU_1 bicycle FlOaA91Qa2M_0 cow Fm7Z44jVp_A_1 person Fm7Z44jVp_A_0 person FnIpAhpGTps_0 person Fn0IWwSVPlk_0 person Fotm2Ewrdr8_0 dog Fphk_JpP4JY_2 bus Fp2WKSG1qGw_0 person FrFv1rYtAws_0 train Fr298zXE9O8_0 umbrella FshCFVUSBXY_0 person FsiLiUl9I10_1 dog Fs0LVU4qKSs_0 skateboard FtEi5TPqRiA_0 dog FuWY9thbtxw_0 airplane Fu9EsTmh8z0_0 person FvCCkxW3sv8_0 person FvDNYPmcXjQ_0 bear FvDNYPmcXjQ_5 bear FvDNYPmcXjQ_1 bear FvDNYPmcXjQ_3 bear FvHW0PyfZ_Q_1 skateboard FvHW0PyfZ_Q_4 skateboard FvHW0PyfZ_Q_5 skateboard Fv542o8y6aE_0 person FyEliJtlQIY_0 person F0PPPvVTNnE_3 bear F3iJ9TqS-lE_1 bear F3iJ9TqS-lE_0 bear F39H1yTLerI_1 train F4xCJHUMGsE_1 elephant F47hXNWC3K8_0 cat F48wdm2YukQ_0 bicycle F48wdm2YukQ_5 bicycle F5Cc5wQJvhI_0 person F5Tm5BM0oaM_0 train F5unbOiULNM_0 motorcycle F5unbOiULNM_1 motorcycle F9B5cLZb3T4_4 bicycle F-OWsiGzRg0_0 person F_bZObIr47Y_0 bicycle F_bZObIr47Y_1 bicycle F_dg4Hi5ZU0_0 car F_xLwEhMPdY_0 person F_8rnxkAIgQ_0 person F_88eTR1pKU_0 train GAMoEnodBZ8_1 bicycle GAZx8145Hkk_1 person GAZx8145Hkk_0 person GCW28zxN9vk_0 person GDM2ctXPkmg_0 person GD5lsE86vOA_0 car GE2nS7Zbkrc_0 airplane GE6JO6nrE2A_0 person GF9unI6hEMI_0 airplane GGULYyv3_eY_0 elephant GGULYyv3_eY_1 elephant GGVYYc0KNWc_0 truck GHTZcjImEqk_0 person GIJMEjX04dI_0 person GIM6FHDMp0A_0 person GJTjlO1FJpo_3 bear GJTjlO1FJpo_5 bear GKyxtLTjXUU_1 motorcycle GLG6II1JYko_0 bird GLpNrOwqNXc_0 person GLvmwdOjsHE_0 cow GOEqT5_bhls_1 elephant GOVFUFYsINQ_2 elephant GOfP3fxCTvw_0 person GPPKPFCI-Kc_0 person GPSXltbv0f4_0 motorcycle GP5anr-xMfw_0 person GRluMAZzu8c_0 airplane GSlWcX28sLk_0 person GUMAgiab8bg_0 person GUQmoD1aWhw_0 truck GUS7BLoHHPk_0 airplane GVNmuLeQ6pA_1 airplane GVNmuLeQ6pA_2 airplane GWBEjzdOLjI_0 giraffe GWBEjzdOLjI_1 giraffe GWBEjzdOLjI_4 giraffe GXMBH6OujvQ_0 person GYM460lVV-k_0 horse GYQO-VevHpI_0 person GYYxgR_VGFQ_0 dog GZSlxtl9bj4_0 horse GZSnngz0VX4_4 dog GZhWdIsibfs_2 bear GaierMnR4Xk_1 elephant Gbe74-OWIo4_0 person GbwJhzDrFtI_0 airplane GceLsS4AwH8_1 horse GcjSF4Uyl74_0 person GdoD65Qn6kE_0 cat GeOos0BFCSY_0 bus Gf_4plKc8tw_7 horse Gk8oy0G3dRU_0 person GlAH7-Rf8gc_1 truck Gm9yMiay9Is_2 skateboard Gm9yMiay9Is_3 skateboard GnTFmN4UNrI_0 motorcycle Gn6ltyIKgcs_0 person GoXxeDaopwo_1 person Gokzf7T4oVU_0 cat GpE5cmO_2kQ_0 skateboard GpE5cmO_2kQ_1 skateboard Gq7NQWGviWU_0 train GsLJXtf6RC0_0 person GuMiw_OwxlM_0 knife GubE6GTKTVc_0 person GubjV1tFrVA_1 umbrella GvRQ4QZHPGc_8 bicycle Gvjv4DJftts_1 cat Gv5P6ORl-1M_0 person GwAGS0xPZDQ_0 person GwY5WqLjTcM_1 cow GwY5WqLjTcM_0 cow G0C4XEsjKGU_1 bird G0i_9qeBwm8_0 airplane G0sAxRZi6m4_0 car G1doEZFbv70_0 airplane G1gPj-UK_gw_0 cow G107tKapVcQ_0 giraffe G16fmAfdp9A_1 zebra G16fmAfdp9A_2 zebra G2gyuboBt-E_0 elephant G2gyuboBt-E_1 elephant G3jqix8WiYE_0 person G5jg_wMMXmU_0 person G6iN1OKj_eE_0 elephant d0G8DzwenzU_0 person d2ugQO5Z8M8_0 airplane d3_3kfZ7rkc_0 boat d3_3kfZ7rkc_2 boat d4cTjVsUbIA_0 person d44bp_UDYOQ_0 cow d6vOtyrW2eQ_0 motorcycle d6vOtyrW2eQ_1 motorcycle d6vTXY--7zw_6 truck d6xRfIz84Og_1 cat d8GWgCsv0fo_0 person d8kSiPkTvek_1 bus d9IW6kCjfmA_0 knife d9IW6kCjfmA_1 knife d9YRdtwcTOo_0 motorcycle d-CkujEJl24_0 zebra d-6-T4gkBTk_1 cow d_eu3LZxECY_0 motorcycle d_eu3LZxECY_1 motorcycle eBIZSQg7pV8_0 airplane eBSijengaq4_0 person eBVE2h6i3Do_0 person eByIZzEh-DA_1 dog eByIZzEh-DA_2 dog eCzDpCe6xvc_0 horse eDUR6UTxYhk_0 person eFXZRDC38No_0 bird eGVUtZXFcmY_1 cat eJn0yGDjytc_0 cat eKcJ2alScW8_0 cow eL4uMBEG4gE_0 bus eMsvM8G2Z0s_0 truck eM0KTbh6EZE_0 person eN0JRkzxVPw_0 elephant eOeuY4ZbTt8_0 bird ePiG-qPeJ6c_1 elephant ePiG-qPeJ6c_3 elephant eQEBmp37ZMQ_0 person eQ6zyKVuU2s_0 person eROdacH1GEk_1 horse eRsf1_omRf4_2 elephant eRsf1_omRf4_5 elephant eRsf1_omRf4_6 elephant eRsf1_omRf4_9 elephant eRsf1_omRf4_12 elephant eRsf1_omRf4_13 elephant eRsf1_omRf4_14 elephant eRsf1_omRf4_15 elephant eTfXd1DQ6mc_0 dog eU_B2dXyBkI_0 elephant eVAEQdogSqk_1 person eVLFX7RZOJM_0 person eVnnuxmvpM8_0 person eVnnuxmvpM8_1 person eVnnuxmvpM8_2 person eWU6Kk9K6lI_0 airplane eWZHute7e6Q_0 person eXAJwsjltWs_1 airplane eXAJwsjltWs_7 airplane eXvofXrEuU8_0 person eZFqrD8MAKk_0 horse eZFqrD8MAKk_1 horse eZc2BPYt4rU_0 person eZ9Qy0zfLb8_1 dog eaoH4_TdTt8_0 person ea2xP5nm53M_2 knife ea_yr_40TRY_0 airplane ebc-oEY_eDM_0 cow ecksf6PLvhw_1 dog edx1TW6jRFg_0 person ee6Zcz8Pyfk_1 cow ee6Zcz8Pyfk_2 cow efczZtAK28w_1 dog egbQbEuLDlE_0 cat egfoTu4gtZo_0 bicycle egg1WCEyuTw_0 person egmCEe7OgiE_0 person ehxHGWKtaAg_0 person eh9YpbAcMZE_0 person ejRwmx3kUI8_0 person ej0xIcEXWiU_0 horse ekfKlK5w3Lg_0 person ekwoV0dpRwI_0 person ekwoV0dpRwI_1 person ek7bnCHGZq0_0 skateboard elB6RfDJA6M_1 dog eljiGrMEYiQ_0 person eljiGrMEYiQ_1 person emISA6YzHZ4_0 bus emISA6YzHZ4_2 bus eoIk6xjgQ-4_3 bicycle eomNxgG_ivE_1 umbrella eomNxgG_ivE_2 umbrella eomNxgG_ivE_3 umbrella er7oQRfciJ8_1 person euESct6MMNg_0 person euU-dtl6yyA_0 person evyGgkwoEpU_1 horse ex_t3nR28rg_0 bird ex_t3nR28rg_1 bird ex_t3nR28rg_2 bird ezrZuVfbOPs_0 person ezyFfdIkCCQ_0 cow ez5RcUDpMoI_0 bear ez5RcUDpMoI_4 bear e0cc8KmRgDE_0 person e0cc8KmRgDE_1 person e1VJlGQGYTA_0 umbrella e37RxtyP9nk_2 person e37RxtyP9nk_1 person e5Q4wIVJR40_0 person e5a3Z_wlpUU_0 person e6FwS_DOE-U_1 horse e6FwS_DOE-U_0 horse e6xVrcpMa9Y_0 cat e8Bc9zwTFnE_0 person e9G1bOd8GlA_0 car e9QeTOo4XBE_0 person fBYtizIh0wc_0 cow fCVsRanBID8_0 person fDWKYttA3fM_1 umbrella fEA-xCaKqfI_0 train fEWxV64teMY_0 dog fEpH1AFdSqs_0 person fFGF5gVW6UU_2 bicycle fFGF5gVW6UU_0 bicycle fFGF5gVW6UU_1 bicycle fFIVNddMFuc_0 person fFT1LpdsEhQ_1 cow fFmghP5NQVA_1 horse fFw23dFiBDs_0 person fGJKT5ttUQw_0 person fHFCYOUh3vU_0 truck fJJuwfeoaWI_0 cat fJnC2nKYQVQ_0 motorcycle fMl60_fkMfc_0 knife fMu0OmctSTI_1 airplane fNTptXtpsoo_0 cow fOyaDea7Al4_0 person fPA_KgXi5v8_0 bird fPA_KgXi5v8_2 bird fP7EpJzJt0A_0 horse fQRAi5pN1Fg_0 bicycle fQRAi5pN1Fg_1 bicycle fRB4jD1Uecw_0 person fRSu9-lyuaU_0 truck fRoEX_9tHtM_0 person fSB_aY8HhJI_0 person fSFjxB1XU2E_0 person fTd-8VbsXus_1 airplane fUNAhHKf_OA_0 cow fUva5AKNiPE_0 person fUva5AKNiPE_1 person fU8NxbaMKu0_0 bus fWD8TEXWtek_0 bear fYBeigFqN7Q_0 train fYBeigFqN7Q_1 train fYWFh5BSEyg_1 cow fYup3iPmtHc_0 person fbAOGfYPur0_0 person fcFwbcMNdUo_0 bird fcFwbcMNdUo_1 bird fdMa18fwj14_0 person fdQFJz9IOso_0 umbrella fd73v3-Qjqk_0 knife feMxoQY38A8_0 person feMxoQY38A8_1 person feNEI7bD5HI_0 bus feO8Ip4MOn4_0 cat ffQKiGKTDaA_0 bird ffr6_q8liAc_0 person ffr6_q8liAc_1 horse fhVVVY5XhDI_1 knife fhWE0XDoxjM_0 airplane fh9tibERtYI_0 person fiKs6mdtsmM_0 cow fiVKh-Q-iY0_0 motorcycle fkGWb9_HVsA_0 elephant fk85Ace_-LM_0 dog fmE9seWSDfs_0 umbrella fmosIu7__Wc_1 person fmrqs2YvNCQ_0 person fm4syrPib5M_0 person fnKNDlQq-JY_0 person foWPkPNDqyU_0 bird foWPkPNDqyU_1 bird fojim3ViD7Y_0 person fpI0N9Lv5V8_0 horse fpv4fALQXpQ_0 person fqWa-DUPAGw_0 person G8IUU0gjlEI_3 boat G88QbXTQ6LI_0 skateboard G9Sdd3czaTk_0 dog G-kF2D98oms_1 elephant G-2yXvawYec_0 person G-5iXA4ERtM_0 train G__uy4I0Kzw_0 person HAOmPeNNjNc_0 bus HBUeO1WOFFk_0 motorcycle HBbWtsju37w_0 boat HBw-J_3WlCY_0 cat HF8ZrMgnyo8_0 dog HJYmTdBHVvU_1 elephant HJYmTdBHVvU_2 elephant HJ08tJU-IIA_0 dog HKNkm0t39B4_0 cow HKRKZksEGro_0 person HMfFCe-og9A_1 bus HMt7kgP0MC0_0 person HM8XKdebDvI_0 boat HNBF7AppAQQ_0 dog HNheLARZ64w_0 bicycle HNheLARZ64w_2 bicycle HN-3LaZVuCs_0 car HONOO3gmDec_1 person HP6UlpPulc8_0 bicycle HQ3nHqG24O0_1 cow HRF40e3Tbvw_0 bicycle HRF40e3Tbvw_2 bicycle HRRhkyr7U5E_2 train HRcVM9md3Xg_0 cow HTrUPWOXlvI_1 person HTrUPWOXlvI_0 person HULLjmpSRUI_0 cow HUssZ9c2Qvs_0 truck HW8Z7IdfuIg_0 person HYCFQjnuXBI_0 truck HY4XBjJWJYg_0 truck HY9NQ2zNtGc_0 cat HZVvEd_Tg_g_0 person HZngEEoQWDA_0 person HaMmo5SdpUo_0 person HaVnQ_P5HdQ_0 train HacYwonTy6w_1 skateboard HbWinZWeK2U_1 dog HbhmAMorGaw_0 person HeOWa0NNB0g_0 person Hg0fRYqZQ3U_0 person Hi384VDSwXw_1 bird Hjo95Vo38qU_0 person Hksncw-BlKU_0 giraffe HlWb7xQHFKI_0 dog HmH4hitBoc4_0 person HoSTe-9VUJA_0 cow HpdyNV4GqbM_0 person HpdyNV4GqbM_1 person HsGPGwN7vSk_0 person Hugie4Q6leo_0 bicycle HvKC4fLwUYw_1 person HvKC4fLwUYw_0 person HvOisoEmjKg_1 airplane HvU4Jz4Gd1k_0 cow Hv_d6KPoSgA_0 skateboard HwZUDp7yxxk_0 person HxPskaUPSXg_0 cow HyHQRrpWhpk_0 boat HylH7-rD0wA_0 bird HzEm2GlGzhc_1 truck HzTD_opfrqI_0 car H0QTCKxJmLY_1 train H1Oxjm0NqCg_0 person H2GwgpAKbzY_0 dog H3HrWs1HITE_0 cow H3S_DkPBWtw_0 elephant H3S_DkPBWtw_7 elephant H3S_DkPBWtw_1 elephant H3S_DkPBWtw_2 elephant H3S_DkPBWtw_3 elephant H3S_DkPBWtw_4 elephant H3S_DkPBWtw_5 elephant H3S_DkPBWtw_6 elephant H3XF5rAtuJA_2 person H3XF5rAtuJA_0 person H3a-C6RRYyo_0 person H5mmSHRHeOA_0 person H6TuJxifX64_0 train H6w4nf5H4U4_0 bird H6y9C6Ndy2A_0 bird H6y9C6Ndy2A_1 bird H7XZ5716KnI_0 person H7z05uOIPRM_1 train H92s5sHsotk_0 airplane H-4EZAh3ZiE_0 bus IA1FFP5WN-4_0 bear IA1FFP5WN-4_2 bear ICj693xC5DY_2 airplane ICj693xC5DY_0 airplane ICj693xC5DY_1 airplane ICxHfkE0XCo_0 person IDx8_34ETTQ_0 person IEyymbAxp24_0 dog IFS0QSfnbaM_4 knife IFS3ILjlHkY_2 truck IF_auR-0fxM_0 knife IGv9j-RQi0k_0 dog IG0UmL5bvEo_0 cat IHFF7DOpF4Q_0 motorcycle IHmYV5ymU08_0 cow IKEUMXjIyTQ_0 car ILZvGBKYYrE_4 bus ILZvGBKYYrE_0 bus ILZvGBKYYrE_1 bus ILZvGBKYYrE_3 bus IMTbwAOJNIc_1 train IMh4AHUZ2HQ_0 person IM4EBlgTTOg_0 bus INlrdk7hgl4_0 knife IOQt3fFTSVc_0 horse IO7-lFsWvl0_0 bicycle IO7-lFsWvl0_2 bicycle IPEJs-vLCV4_0 truck IPEJs-vLCV4_1 truck IRpgjSP4pLI_0 person IUJGm3Iu0Bs_1 bicycle IUgsoj74aWQ_0 person IVlnjlVA5rc_1 bicycle IXP1ML1tdZQ_0 bus IXRxjnkOJeo_1 motorcycle IXenlPUsqrc_0 person IZvOv7tCr00_1 train IcRjjKSX5uc_1 person IcRjjKSX5uc_0 person Icnle27cmMM_0 bicycle IdVZJW1HC9E_0 airplane IdVkEz2IF7w_0 car Ieb9oZ9eB8I_0 dog IfWSlkR8DbU_0 horse If1zPOV0idg_0 horse If1zPOV0idg_1 horse Ih2gG0269H8_0 bus IjQXXK4uYVY_0 dog IlMHPX2VcGw_0 elephant IluTkrIqsVg_1 elephant IluTkrIqsVg_3 elephant IluTkrIqsVg_6 elephant Io7bj1jNpPU_0 car IpjQJZ42zyQ_0 elephant IpjQJZ42zyQ_1 elephant IpjQJZ42zyQ_2 elephant IpjQJZ42zyQ_3 elephant IpwI5VTWHLc_0 horse IpwI5VTWHLc_2 horse Iqy4PPX-Tlc_0 person IsHTpd2cnvI_0 train Ithz7KSWCxU_0 bus IudK7ch_IIg_1 airplane IvRDw_IA0_s_0 cow Iwve-3lTmMk_0 person IyLshk4jlyo_0 cat IygCvE4_amo_2 bird IygCvE4_amo_3 bird IyjFl1Hhk3Q_0 person Iz4XK2zNDUU_0 person I1wuUCQbXLc_0 umbrella I2DkTg8wPnI_0 person I2WoCDTXONA_0 person I2WoCDTXONA_1 person I2lh579NY2s_0 bird I45pfwCBczo_0 person I6ESaCg4z_8_0 person I6TvXxQTtZQ_1 horse I6TvXxQTtZQ_0 horse I6TvXxQTtZQ_2 horse I8OfOokt6YU_0 person I8XhyDacLtU_1 bird I8m0QjcQlSo_3 bicycle I8m0QjcQlSo_4 bicycle I9ivT_P5G18_0 person I_k5qXHxb0Y_2 knife I_k5qXHxb0Y_0 knife JBkwLPruJe0_0 person JBlDwXJFbQc_1 umbrella JDZiLsus2es_1 skateboard JDvfPX9cFDg_0 dog JEpTSJRO3co_0 person JG2tVzjxhao_0 bird fsAEg5w8xTg_0 person fsCwAYYI4js_0 person fsKTO8ksQ90_0 person ftMQOwvHDF8_1 car ftns38_MSTM_0 cow fvxc7ruCiYk_0 cow fvxc7ruCiYk_3 cow fv8aFklHmko_0 skateboard fwEvL-luHlw_0 airplane fwEvL-luHlw_1 airplane fwt8LzF8Mic_0 person fyZImQFj_Y8_0 cow fycK7kJWV1I_0 umbrella fzr3kw3BDDo_1 airplane fz6ONSUlvNY_0 person f0i5E4DOFc8_0 bus f2SctRCBZQc_0 car f3Z5d9I7rIw_0 knife f4fxmsxPzrg_2 elephant f5LEkr56Efg_0 person f5Uz-TuMQ0Y_0 horse f5ZpGBYuJ7o_0 boat f5kAHBPObsw_1 cow f6fZjMRJgoM_0 horse f63aow5BRAI_5 bus f65rTlprptk_0 horse f7yNS6ltUFk_0 person f8H7Ns8cw-c_1 train f8rXEKktSCg_0 elephant f_VqZJyJ4GM_0 motorcycle gAHcWn06srk_0 person gB0-eGpMj50_0 person gB2asNpe3zY_0 person gB7jSQgkcMM_1 horse gCDC8R7IB7k_0 person gCwe-o1nqBc_0 motorcycle gCwe-o1nqBc_1 motorcycle gC9z8IzG83s_2 bicycle gDEk1TWuZug_2 person gDG5Xr2p2y8_0 elephant gDHnBnqogX0_1 airplane gDHnBnqogX0_0 airplane gDbZj1O36VU_0 airplane gDihz5aZLyA_0 bus gDihz5aZLyA_2 bus gEkiX2yFQm0_0 cat gEnLlmMhxfE_0 person gGNmKI2M8i4_0 person gGd6hYCKdEs_0 bird gHMCfvdZzMM_1 person gHYzGPx8f_4_0 zebra gHYzGPx8f_4_1 zebra gIx12Q8A3p8_1 person gJwtAwSqEow_0 train gKAPbj9esXI_0 skateboard gLqb3YuVttM_0 umbrella gMRigFNGMeY_0 person gNfQargrILo_1 car gOFgWsujZaI_0 cat gOWc7VBEwMo_0 car gPEMf91dil8_1 horse gPSB23kv5Uc_0 person gPhL52Mj1_A_1 motorcycle gQ1qmNZzaTo_0 boat gRDFlfzM_iI_4 elephant gRDFlfzM_iI_6 elephant gRDFlfzM_iI_1 elephant gRDFlfzM_iI_3 elephant gRMJhsEuiAc_0 motorcycle gRMJhsEuiAc_1 motorcycle gRMJhsEuiAc_6 motorcycle gR29_U82QeE_1 horse gSJbrV0vy8M_0 person gSz16yrF9yA_0 person gT0yjYUmf90_0 cow gUGlSiBvfOs_1 motorcycle gU8s5nxyBDk_0 airplane gU8s5nxyBDk_1 airplane gV3CcNeVZcY_0 elephant gV3CcNeVZcY_1 elephant gWkTSRUqxoo_0 person gW6HdCsty0U_0 knife gYLohMps12s_0 elephant gYLohMps12s_3 elephant gYLohMps12s_4 elephant gYLohMps12s_1 elephant gYLohMps12s_2 elephant gaKGYmLxJVU_3 bicycle gagJEV--3Pw_0 person gdAVi92ZfSc_0 horse gdx96NpU6BY_6 train gd4UfPes3YI_0 cow geEXytMwfq0_0 person gePAI8wYSdw_0 person gfTVuceAzNs_0 elephant gg8YzsSulrQ_0 truck ghciPMerSc0_0 truck giWDg00GIDw_1 skateboard gig9B4ecK3w_0 person giy_SOmkBY8_0 umbrella gjnyg97XwnA_0 person gk-cycr3xjo_0 person gmVDmxVI7n0_0 elephant gpV4Qlx6YrA_6 bus gqLSqmK3m74_0 motorcycle gqZYY0m_TuM_0 motorcycle gsrvWcnpNP4_1 motorcycle gsrvWcnpNP4_0 motorcycle gtVr7urU8c8_0 person guDQk0hVgU0_0 bird guFTeFvjr9Y_0 bird gu3DTnVjNQM_0 knife gwXwH2Cs3BY_0 knife gxHGnBrpPZs_1 airplane gxHGnBrpPZs_2 airplane gxKuLTUNhp4_0 horse gx7PFNpHd_A_0 person gyaP7qiRxfY_0 cow g1OZWFLSspQ_0 motorcycle g1rQZNA6yyo_6 cow g1rQZNA6yyo_0 cow g1rQZNA6yyo_1 cow g1rQZNA6yyo_2 cow g1rQZNA6yyo_3 cow g1rQZNA6yyo_4 cow g1rQZNA6yyo_5 cow g3HXJNMlAsM_0 airplane g3oqxu4AhBw_0 person g3swsx-acTI_1 dog g3swsx-acTI_0 dog g3vbaqnLXn8_0 cow g4bayrAEhIU_0 umbrella g5rUJOptHXQ_0 horse g5ty_7So5Dw_0 cow g51pzrSssl4_0 person g8M5d--ghFM_0 person g8vKB3IU1JY_0 horse g8wHQVpij-I_0 person g9eN0FHn4-E_0 dog g-EAZ6gVcic_0 motorcycle g-pVcRyPQG8_0 cow g-yHAyCA2KI_1 horse g_C47ek7TmI_1 knife g_C47ek7TmI_4 knife g_C47ek7TmI_5 knife g_QHWoQgmFQ_0 person g_QHWoQgmFQ_1 person g_Tk-SESaYI_0 person hBHt6mnfUeo_0 bus hBMZHx3_cTs_0 train hC69bGTvLBo_0 skateboard hD3Bn03GXNQ_1 dog hFNAxcRpGBM_0 skateboard hFSygfNIY_Y_0 skateboard hFex_TS-aUo_0 person hGnscWmehTI_0 car hG9efPyerw4_1 horse hHdBCtElIQg_0 boat hHlqyr11RiI_0 person hIWM6v4zcSM_0 elephant hKoGkl1wyCU_0 person hON0t9Dzay4_0 motorcycle hP1ViN_WadY_0 cow hR-utsUhYSg_0 person hSAUbt6-Yjc_0 knife hSAUbt6-Yjc_1 knife hSeHymINF98_1 bus hTaEY4YCVqM_0 airplane hUjzfhyM30Q_0 airplane hUjzfhyM30Q_4 airplane hUxguQsLvcs_4 knife hUxguQsLvcs_5 knife hUyAVmRxAzM_0 person hU_dAA1A0X0_0 person hU_9cs_qw1w_0 person hVjyHhYH6Ss_1 airplane hVjyHhYH6Ss_2 airplane hVowH5-Ss4I_0 train hV4tEsm-F5s_0 airplane hZdxBk4cjmg_0 bus haiW7jpl3wY_0 person hcJBaxNIvE4_1 person hcJBaxNIvE4_0 person hcV4RZPeRbo_0 airplane hcuLD1cn9GA_0 person hdUc4uUYh0E_0 boat hfWfYFG2O94_0 person hgagtwzScGQ_0 person hhFOwnYOLl0_0 giraffe hhLyE41H8nE_0 motorcycle hhNlg3Ws9Dc_0 person hhyVc2wsXVk_0 horse hhyVc2wsXVk_1 horse hh432zDMgPo_0 train hiKbm0rqEb4_3 skateboard hiN_kULL84o_5 umbrella hiN_kULL84o_4 umbrella hkEV_E85Jzw_0 car hkSv_YxmN7w_0 person hlZDJrpJzPU_0 person hljwk2WbXGY_0 person hmSeUlyLLak_0 train hnZvUHrA3CY_0 person ho6sg-47RD0_0 airplane hqNhKf3a69Q_2 truck hqYyvTeOvas_0 bear hqaNlwG0DNU_1 person hqrmbVw_EwQ_0 cat JIuyqZCU5zY_0 cow JKiG_pk4lSE_0 person JKmvEldBeEQ_0 cow JKsodtdUW-o_0 boat JMLFZcONQAs_2 skateboard JMLFZcONQAs_5 skateboard JMMci7hryUQ_0 motorcycle JMMci7hryUQ_1 motorcycle JMMci7hryUQ_2 motorcycle JNUhCGqPlFg_0 bicycle JPHPd13gaL8_0 car JQrDalAaP4w_0 person JQrDalAaP4w_1 person JQz6IarIr4E_1 person JRAVv2LgiGo_0 skateboard JRUvqZtBMrM_1 knife JR0QfXOOmaA_0 person JSml3dguiUk_0 motorcycle JTFT_iJGFUE_0 person JUdUxjC2LRE_0 bus JWU6vdEt_OU_0 person JWgjcmMh62o_0 train JWgjcmMh62o_3 train JW0-hEA4v9A_0 person JXIh3fJ4Jv0_0 person JX8ODdMUi7g_0 bird JZC15tOV-eg_0 horse JZMOzYwcTA0_0 person JasH0KtinHY_0 airplane JasH0KtinHY_3 airplane Ja5jdE_8qio_0 person JbyTZ-esDXM_0 truck JbyTZ-esDXM_1 truck Jb93SMKg5-k_0 person JcVOyLTTvKA_0 person Jc18AfXzLZU_0 person Jc18AfXzLZU_1 person Jd7uOTcPvY8_1 car JeWRfjjRMQk_0 person JerVzlWZwac_0 bus Je-lnjK_8fk_0 person JfjkltN0lZc_2 horse JfobA6aKaas_0 dog JftQEHHdO5w_0 truck JgaE8KDwg7k_1 bird JgaE8KDwg7k_2 bird Jgc2PQ8Swbo_0 cow Jgkj9pj3-tc_1 horse JhdyYrqxn_g_0 motorcycle Jh7o2iR-lRg_0 person JijsSnHthXE_0 train Jio_xBodQxY_0 person JjQ8bdq_eXk_0 person JjtkwX4npyw_0 person JlG7Wzz4uU8_0 car JlG7Wzz4uU8_2 car JmkUuTj-Nks_0 umbrella JmtuhGXlqmY_1 airplane JnNJksYeB18_0 car JoKod4XDE6o_3 bird JoKod4XDE6o_0 bird JoKod4XDE6o_2 bird Jp6_g7oF2lQ_0 cow JqEprl56N4I_0 skateboard JrIoaRmcs6o_0 cow JrNq6Z5YSoc_0 person JrUHo8zVwpo_0 bus Jsjz8hiE_iU_0 person Jt7Ojtx0TMs_1 car Jt7Ojtx0TMs_3 car JwBYrXUHdZ8_1 horse JxTKws5Dx_8_0 cat JxjXZYfiem4_0 dog Jx9mLWFxpnc_0 dog JyYBZBogBvs_1 boat JyduNnkZOiY_0 person JyrP5u2MuSo_0 motorcycle Jzcc0pjgA5c_0 person JzjRC1xYwy8_0 dog J02u46SlewE_0 person J1GtEDNcsHQ_1 horse J2JOoOxaJdw_0 person J2bB5BgR-5Q_0 bus J2hdK_vuyyw_0 motorcycle J2ycUTr0lJQ_0 cat J4T_QA6J7kw_0 boat J4T_QA6J7kw_1 boat J4T_QA6J7kw_2 boat J40neYxbEYA_0 skateboard J5-Z9tNISPw_0 car J6klPNMhLKc_0 cow J7I-QXddTIk_0 person J7hnNI0jtws_0 person J8ITxacusCI_1 person J8ITxacusCI_0 person J9-8Qe3BWoI_0 bicycle KARqX_agLpU_0 knife KAgU6SrQTlQ_0 umbrella KAgU6SrQTlQ_1 umbrella KArVkjxSGpM_0 person KBCIbwknDew_1 bicycle KCeuwWEv3ZU_0 person KCi4f4Hp6oA_0 airplane KC5ECqMiTLU_0 skateboard KD84e88aqHU_0 person KD84e88aqHU_1 person KEpHRYH8r28_0 giraffe KGdIJzBVugY_0 truck KHqFOBeHCwU_0 boat KIOilXstQLY_0 person KIOilXstQLY_1 person KJ2kEj3C5HU_0 airplane KKWUDcCI6yU_0 cat KML2msVr5mE_2 elephant KMNAnjpGqv4_2 truck KNIVWRv3awA_0 truck KOmUta2sIgk_0 person KOsm1GUs46s_0 motorcycle KOza4PGcE0M_1 bear KPLDdfk8hIg_0 train KPLDdfk8hIg_1 train KP7RzxyTTAU_1 airplane KRKxqkfpetI_0 person KRNWPLnvZz4_0 person KR7Ah1hw5gA_0 person KS8S3STq2W4_0 bird KS8S3STq2W4_1 bird KTkhMglNlCE_0 person KTpwnsz498Q_4 horse KTpwnsz498Q_6 horse KWYD2iyUmgk_0 horse KXIJLUzQi5Q_0 person KXMlBQiVeEg_0 train KXPGShfFlU8_0 person KX9MjIikBU8_3 bicycle KYc-vKtN0DI_0 person KY4mXNDM8I0_6 elephant KZdOpoUJ3Nk_0 person Kcg7gY3WD7M_0 person Kcg7gY3WD7M_1 person KeJWqAV0EgA_4 umbrella KeJWqAV0EgA_6 umbrella KedkADy9tBc_2 knife KedkADy9tBc_4 knife KgDguip9mZM_1 horse KgDguip9mZM_2 horse Kg0XH4mez1A_0 cow Kho8jpdZzTs_0 skateboard Kjd7D98QULc_0 airplane KkdLE8EkzQ8_0 cat Kkw7ZPCEz5w_0 person Kk-2ajLfeh8_0 cat Kk_LtYOgQXA_0 boat KmLYFD7xykY_1 car Kmwqg1uRPRE_0 person KnQuff1ffzM_0 skateboard KoRqIzHBQks_0 train Koq5YYiN1tc_0 train KpHpGcL_jEc_4 bird KpHpGcL_jEc_3 bird KpfTioA2qKw_4 elephant KpfTioA2qKw_5 elephant KpfTioA2qKw_0 elephant KpfTioA2qKw_1 elephant KpfTioA2qKw_2 elephant KpfTioA2qKw_3 elephant KppX5i4QRZ0_0 umbrella KqsBJAhU_Dc_0 cat KrRVwTPG26w_3 dog KsE43Lli_3U_2 horse KsE43Lli_3U_3 horse KskL-dN784o_0 airplane KtfQRtfJQ8s_2 skateboard KxDh7a8_AmU_0 person Ky4ahEexJUc_0 airplane KzDLvBPcQew_2 knife KzMFSHS4xVs_0 bird KzOxVUsduDY_3 knife Kzt2eSUr1rY_0 dog K0IvSLIQbgQ_0 bird K0SktTNMXQU_0 motorcycle K2WsSTHs45g_1 elephant K2WsSTHs45g_3 elephant K2oIvJd-d-A_0 person K4IN8pNA--U_1 person K5C2Y3JvXCU_0 skateboard K7TOmJ6RB_8_0 skateboard K89ScUqJx5E_0 person K8_u8_NkoAk_1 train K9L-BYQcepo_0 bear K9pgB6KH-EY_0 cow K-laAofNBgs_0 horse K-xigT3f2VA_0 horse K-0pug6xNEI_3 train huFyV9NBOBY_0 person hua1XfGRDoc_0 horse hulGMGXPaBE_1 elephant hvXgMKsetW8_0 elephant hxBjbg6s174_0 person hyNwXcKelY0_1 train hyNwXcKelY0_0 train hzUpr73wZz0_0 airplane h0jkFTI3qmI_1 horse h1Hv9HnMe70_0 car h1zuISckIeI_0 bus h10iwpJO4pQ_0 train h2vHhQ7_MT4_0 skateboard h3Fo82UBMRY_0 dog h3IHNdoTXT0_0 person h3PBWibdVUc_0 train h3RgUc0oY-c_1 knife h3RgUc0oY-c_2 knife h3t75PNg778_0 person h3uSlke3koc_0 motorcycle h4qpt2FEbC0_1 elephant h5JnAInpuSo_0 motorcycle h5JnAInpuSo_1 motorcycle h7_4qHh7Vas_1 truck h8TnGCoSVeQ_0 airplane h8fKxUGKz8k_0 motorcycle h8fKxUGKz8k_1 motorcycle h-pm7wD31Ss_3 train h-pm7wD31Ss_0 train h-pm7wD31Ss_1 train h-pm7wD31Ss_2 train h_VG9OpleKc_0 motorcycle h_VG9OpleKc_1 motorcycle iAZV9nCf3RE_0 motorcycle iA7evYzMygE_2 knife iDBpYSvahjE_0 person iDHjOnhAKA8_1 skateboard iE75sptNwbs_1 truck iE75sptNwbs_2 truck iFVwtlc6IYE_0 horse iFdOAHM4xDg_0 person iFwPDZE4778_0 skateboard iG4PvtWoxG8_3 cow iH6Vlg0k330_3 dog iH6Vlg0k330_5 dog iH6Vlg0k330_6 dog iIWFuFa7Z4M_2 person iIWFuFa7Z4M_0 person iIWFuFa7Z4M_1 person iIzXR3qRt48_0 person iI08dGJAOMs_4 elephant iI08dGJAOMs_3 elephant iJcf4PhS_SQ_0 person iKzpo0D7b_8_0 cat iK-7fByPADo_0 person iMdJ5Xlz0hU_0 knife iMeNXU67sVg_1 skateboard iNiiX6P-kqA_1 dog iOxVi3Tq4ts_0 train iPlXCYJ6F7w_0 skateboard iQ-tckw9_uk_0 truck iRzm-CyyW-E_0 person iSNNmpWe3LA_0 person iS7wej_vrvM_0 person iVBDQ5wm-0w_4 airplane iVTAxc633DE_0 person iXrLhQgf8HM_0 elephant iXrLhQgf8HM_1 elephant iX4gVag7ShI_0 person iYL_l0MxgMY_0 bird iYlgi1z6nYI_0 truck iavLgJ3_05c_3 horse icVQnqL0xPI_2 boat idkGZQeYvJ0_0 skateboard igbftnGj4-o_0 bicycle igg-y1toBvA_0 truck ihkqhIpO_hw_0 person ijbDg16cIC8_1 bus ik4t0sIEmTI_0 person iltKgr5JKI0_0 person il5UMLzlQts_0 bus imDfH3So8XU_0 car imDfH3So8XU_1 car im4bCIqpJns_1 bicycle im4bCIqpJns_2 bicycle im4bCIqpJns_0 bicycle ip1Y5qjDYfQ_0 airplane ip_oGEZ6zMw_1 person irvGAW8bqAw_2 bus isbtQ06yVM8_0 truck itNqceL9dLM_0 cow iuii5XHcAYA_1 dog iulQVUJanzg_0 skateboard ivGBks6evlo_2 dog ivSQWqs_u1I_0 bear ivpPLs-cqxA_0 car iwHJDgGVuCA_0 airplane iwHJDgGVuCA_1 airplane iw7zrlRPMo4_2 horse ixgGTHdobNI_0 person iyDedQNhiYI_0 cat iyaI71EqLsg_0 person izHN9JUwtJ8_0 boat izQ74nq9zh4_0 cow i0QLe6YR7yo_0 person i1OlP2Sq0a0_2 truck i1xqjStfSsc_0 person i2SgjtgmsE0_0 person i5DfO7_n0q8_0 cow i5GkqX44npg_0 car i5JWZKdNOac_0 motorcycle i5JWZKdNOac_1 motorcycle i6mzD2HGWOA_0 airplane i6sR2IY4-Ck_0 cow i8JA178zd0s_0 cow i8Z9-KSMCTA_0 bicycle i8syjc7Erco_0 motorcycle i-EijejS9Oc_0 person i-eCNLw3hVU_0 bird i_l48nIXjxw_0 horse jBYa-gqwSeY_1 cow jCiTA9oIryk_0 elephant jCuDdMn9sYA_0 person jDGrgBt83DU_7 car jD33e45nuRw_0 bear jD5K1zGLtvc_0 skateboard jEE_ZlDJ4cc_0 cow jEzxW8ylxK8_5 airplane jEzxW8ylxK8_1 airplane jEz3EToUAg8_0 person jGCLsWhdTds_0 umbrella jHhJLxyr960_0 bicycle jIqTFAgBLpc_0 dog jJkZrKOehcQ_0 person jKD0oOyMl2g_0 person jLO5kFd36OY_0 bird jMLgjCQWQY0_0 person jMmH8xfY1kw_0 cow jMyxNu6YkEQ_4 boat jN5jdXmBv2Y_0 bird jN5jdXmBv2Y_1 bird jN5jdXmBv2Y_2 bird jN5jdXmBv2Y_4 bird jN5jdXmBv2Y_6 bird jPouarzO-e4_0 cat jQPz-9OfXRM_0 zebra jRQuCIsXz1c_0 airplane jRUeQo3V1bk_0 person jR366TYYsuo_0 person jSkwPkAAiFM_0 person jTNzSUl_zOQ_2 elephant jUzhGHE_jgE_0 person jVYzDs5YRM4_0 cat jVoxxEKEOFo_0 motorcycle jX_taNw8FFg_0 skateboard jY4Dh-UAAaY_8 skateboard jZBMDKFS5D0_0 person jbp8mHJfHGI_0 person jcYNP_FWkA0_0 person jcne18p2r2c_0 cat jdttJqwg_3o_0 motorcycle jfSY_UCtq-w_0 motorcycle jfTXT98Naic_0 cow jgQiUggCu7A_0 cow jjTgUBAd4D0_0 cow jjq2PAHcLiA_1 person jjq2PAHcLiA_0 person jlBGbg_CJz0_5 train jlBGbg_CJz0_6 train jlOOUqYlNNY_0 motorcycle jlgECDznb0g_0 bear jl7oYVm0X34_0 bird jnU2n55I_LU_0 dog jouq30Wmqxg_0 motorcycle jouq30Wmqxg_2 motorcycle jo6o9BwKsUQ_1 elephant jqPPsrUULY8_0 horse jtWUSSp-JiY_0 truck juS7DvjMPoo_0 person LCzQs5ybibU_0 horse LDwE_VIc9Zc_0 cow LEL3OcoqV8k_1 knife LEPsxGhXYxY_2 truck LEPsxGhXYxY_3 truck LEXpJRLTRak_1 bear LFWlRG2B-w0_0 bus LFWlRG2B-w0_2 bus LFWlRG2B-w0_3 bus LGvjU4PVcC0_0 boat LGvjU4PVcC0_1 boat LGvjU4PVcC0_2 boat LIC3D63R3HU_0 person LIhhU9j6MI4_1 cow LLD46pbwbiU_0 person LLiy-k-4-OM_0 train LLvpoIlozKU_0 horse LLvpoIlozKU_1 horse LO0IsJZeXhU_0 elephant LO0IsJZeXhU_1 elephant LPzXMvYB97A_0 person LTh-XAE8m3M_2 train LURSawdSS9k_0 dog LUsb9vk1q6U_0 knife LU539OYJ_z8_0 person LXHO99b-uAQ_0 horse LXHO99b-uAQ_5 horse LX0HL9qztic_1 umbrella LYPeAbFVTQw_0 person LZCq31MG3yY_0 person LZEMKs6H53w_0 person LZNlxXE0_2s_1 skateboard LZNlxXE0_2s_2 skateboard LZNlxXE0_2s_3 skateboard LZ3S39QfkKA_3 bicycle LbHrVQR9f24_0 cow LcvMMvrPIug_1 cow LdNi4yjT3yE_0 person LdusiqJFR6I_0 person LesCJsHdAU0_0 cat Le2725PKYQk_0 dog LfUSKsg8JoQ_0 cat LfhPiqIDAcI_0 person LgbwFATbwhs_0 cat LhF7TJOwt8o_0 motorcycle LhOMGvkzP28_0 person LhOMGvkzP28_1 person LhkFN7f676g_0 airplane Lh1QrEwtBxU_0 skateboard LiS31CevvvA_0 person LiS31CevvvA_1 person LjRWmJThZrA_0 person LjyZ7Djyq1U_0 person LkP8lgpmCJc_0 airplane LkfML7bjGg8_0 person LmCzQ6WrePM_0 bus LnYz8cQsrWk_0 cow LpTBcxby8_U_0 cat LpT4VBLapqM_0 car LpjbdSyW__A_1 truck Lqm0JTDlIaU_0 truck LtIW9sP55N4_0 person LuC8ON_75l4_0 person LuRLF2TroVk_1 airplane LunFMJp3_Uc_0 cat Lup2fypzuD4_0 person LurlbycI8WQ_0 person Lvd7WBHnDpk_0 truck Lwxi57QRroE_0 person LyPkKroSsaU_0 bird LyPkKroSsaU_2 bird LyPkKroSsaU_7 bird Lz7uf7cmfAU_0 horse L0Y9j9DtU1o_0 dog L0mqjqU7pmw_0 person L1C1GJZuI6U_0 horse L1TihVYcfII_0 bear L1xr5gaSzeQ_0 bicycle L2lJenTKrLU_0 truck L2lJenTKrLU_3 truck L2lJenTKrLU_5 truck L22pyXEUjv8_0 bird L22pyXEUjv8_1 bird L5px8rMqxRY_0 motorcycle L8Q0lJgaUi4_0 zebra L-3-1978GvI_0 knife L-6R2vuKWhc_1 truck L--TMS61Zvw_1 boat L--TMS61Zvw_5 boat L_dOv3wd1ZM_0 person L_nI4_2RbTU_0 knife MAmHLoJdmc8_0 cow MENNFokPNbU_0 airplane MG8-IGrKVxc_0 truck MG8-IGrKVxc_2 truck MG8-IGrKVxc_3 truck MG8-IGrKVxc_5 truck MH1GdFqE_lo_0 horse MH1GdFqE_lo_2 horse MH1Kct5RCRg_6 airplane MH1Kct5RCRg_10 airplane MIkxezmilfY_0 person MI6x6FrXJqs_0 knife MI9BIgkOBjI_0 horse MJ9vJFTTV5c_0 person MKiCrBXtflw_0 cat MK8Jm3I4In4_0 dog MK8Jm3I4In4_4 dog MMiSt9MNne8_0 train MNve0XPgcGA_1 bird MN1A5E3jNSE_0 horse MPJu68gBGfI_0 person MPMudxdiIds_0 train MPfgu6-snaM_0 bird MQ1o_7gpp5E_0 person MQ1u8IEmFSA_0 person MQ3HhLmsCik_0 person MRNJmLLkjPc_1 motorcycle MRNJmLLkjPc_2 motorcycle MRqfEOhWW48_0 person MSItPvVCUN8_0 cow MSd5Ecl5-W0_0 person MSnEnQ0psW8_0 car MV6MGXhQwFQ_0 cat MWbnSN-7WG0_0 cow MWt4P6HWxMM_0 horse MXEcQSFwng0_0 cat MXTzea4MeHc_1 car MXoVDyewPBE_0 person MYFPnJIKK5k_0 person MYpdq9KvK8o_1 umbrella MasaNQLCMGE_0 person MbRvEKuvR04_0 skateboard Mb6r1es0AbU_0 cat Mcdl3s6oQrc_3 bear Mcdl3s6oQrc_1 bear Me-clc6PGkA_2 horse Me-clc6PGkA_3 horse MfYpMzLWST8_0 cat MgFhoihDD1U_0 person Mkmpoid1BvA_1 train MokOHR3wImM_0 cat MqBTk3ITQ8c_5 elephant MqBTk3ITQ8c_3 elephant MrWZEUtDBq8_0 dog MuyIuhdszH0_0 person MuyIuhdszH0_2 motorcycle MvKMtFVP5NU_0 person MvbZEiffy8s_0 person MvuGj1qR4Ic_0 motorcycle MvxUj_Du2IY_0 horse Mw6Cu1mPanU_1 cow MxtJwd0GBkA_0 airplane MzTsjMauBH8_0 truck Mzrv2OCC2GE_1 person M0TTCr9jjgc_0 horse M12KvkF1Nec_0 person M40gbbuNuL4_0 truck M5p7jyvEgPk_1 knife M52oDxJEXk4_2 horse M52oDxJEXk4_0 horse M7Kcv9fUrhA_0 cow M9CCnnc8m8k_0 giraffe NAInb4dMC_E_0 airplane NAInb4dMC_E_3 airplane NAsDBYDNhwY_0 cat NDxs_vxhhME_1 person ND-VrJY7mU0_0 person NEsCBcZFajg_2 airplane NEsCBcZFajg_5 airplane jvlyXCBSuCk_0 person jwYviTYbJYs_0 cow jxL3F-iB2S8_0 bus jxmsNv20V50_0 train jyrY4oyyA7M_0 person jzNOBsi5TtQ_0 cow jzeFDGEt_iQ_0 person j4UJ80q_s3c_4 skateboard j4UJ80q_s3c_5 skateboard j4t-Otp9ES8_0 person j6XmNyG8nYE_0 bear j8SM6uLadmU_0 motorcycle j8aX3NuEnxc_1 airplane j8aX3NuEnxc_0 airplane j93wwDC_a2I_0 skateboard j_tT90ISNnc_0 skateboard j_6ZWhyOOcA_0 person kBKG0SaNbdw_2 cow kBYFlPJJx-s_0 person kCHOoDF-pXo_0 cat kCQIRLEi88s_0 person kCefZaEK9M4_0 person kCt3G72NjyY_0 motorcycle kEx2sgiyKpY_0 dog kG5vclMyg7w_0 skateboard kHIZAi1E9gU_0 cow kH3Hwla_MUM_0 person kI7523l1Tu4_0 horse kI7523l1Tu4_1 horse kLwsGbEsMjs_5 elephant kLwsGbEsMjs_1 elephant kL52zPMgsXM_0 truck kMIRREOoSt0_0 elephant kOqKBgGRd_c_0 boat kQu7xcJmp6w_0 airplane kRLl2HLijWc_0 elephant kRqsESioKVM_0 person kSWUU8Ef-Rg_0 cow kSXkd4PYX9M_0 bear kSm9E8WwGYY_0 person kTT6onfYUug_0 bicycle kZcfsku1oJ4_1 bicycle karZg0Iifks_0 skateboard kavU8zKXrEY_0 elephant kbD6iXQ3P6M_0 cow kb4GuHpwuSw_1 cow kdPgKSrjVYQ_0 train kd9Tn_hyeb4_0 dog keka7aToy_E_0 person ke2Ap6Zvq64_0 cow ke2uXJrB9WQ_1 bird kfL1KEY53AM_0 person kfMMMSNZWeM_0 giraffe kgcb2y-aw8s_1 truck khicinfB1nY_0 person khr1-lWZOOw_0 bicycle kixX1ga8yrw_0 person ki51QTz_6iw_0 bus kjhcR5ljaDU_0 car kksfStf04pc_0 person kk41Jjw-BpQ_0 horse klxQpVdft5E_1 bicycle kmIUPZSNl5A_0 airplane knFBzlhmDMk_2 skateboard knFBzlhmDMk_3 skateboard koomOoaIF0Q_0 motorcycle ko4el3e0QFI_0 bird kqE2rNzUnvU_0 cow kqJJ6_2vGtU_0 motorcycle kqiHy-EzdcQ_0 airplane kqiHy-EzdcQ_1 airplane kqiHy-EzdcQ_2 airplane krD5WtdljCc_0 bird krR-lFUTXHo_0 cow ksbdMzGs-gs_0 person ksbdMzGs-gs_1 person ktCRlGt6408_0 train ktcXRj-Vz6c_0 bus ktcXRj-Vz6c_1 bus ktvaX1ALzwE_0 motorcycle kwMNSTE0h8U_0 bus kwMNSTE0h8U_1 bus kwyn-eed9l4_1 bird kx2jH9V7vYM_0 train kz0gVW9uWkc_0 skateboard k1C25MTUso4_0 person k1Y6Y1yocF0_1 knife k1qT5GtPmQo_0 bear k2fCUP9H4cw_0 skateboard k24lvYKkK5g_0 boat k3hYFu55iGE_0 person k3hYFu55iGE_1 person k3pTU4KNdvE_0 train k4tqy4pdlNs_0 horse k5MmpG9afSM_2 bear k5UoGZZb_RY_0 cat k5oey7bw5kA_0 person k5-IPGgeCPc_0 person k5-IPGgeCPc_1 person k8OboASs470_0 skateboard k8OboASs470_1 skateboard k9COlD7u1tI_0 knife k-tdE0VAFkc_1 person k-tdE0VAFkc_0 person k_E-cIymiis_0 train lAZQZSK_9bk_0 cat lCc5-WmCZJk_3 dog lCc5-WmCZJk_5 dog lDWAsuKkv5Y_1 bird lFObiVRO-BQ_3 airplane lGAGodreVjQ_0 train lGJB2hhw5pI_0 cat lIbOGzXhSW8_2 horse lI-A6pFtkLQ_0 train lI_jxWxWivM_0 dog lJXfbIuwTIQ_1 cow lJccP5OJjZ8_0 train lKBO-dakd8w_0 train lLyfm0vbHrw_0 train lL_4QscWdx4_0 person lM0yKqnWblw_0 person lNJbOSFK9N4_1 skateboard lOFTlhNmKD8_0 bus lOQf3A_3lPI_0 horse lOWmL3mpSeA_0 train lOvB2zlHw8w_0 dog lO-XTKPQb5I_0 train lPapZHOAdzk_0 bicycle lP5lgBlsH0U_4 airplane lP5lgBlsH0U_1 airplane lP5lgBlsH0U_2 airplane lQDy9Mri-18_0 person lQsTpo0uOIw_1 boat lQuFC-E7VUM_0 person lQuzpkDKFQ8_0 person lRuif4Zc7CI_0 boat lSZa4pAHgV8_0 horse lS-5gEkB0_o_0 motorcycle lTTquh-jLwM_0 car lThBPb6HI1U_0 cat lVeIr8AFTjY_0 person lWT2t48q164_0 motorcycle lYSpeuL7-oo_0 umbrella lZOTAg9Fofw_3 bird lZVwQoLPjBU_0 giraffe lZVwQoLPjBU_1 giraffe lahDGDRe7X8_0 horse lcKDCt1eWqg_1 knife ldQGB8gzRjA_1 cow ldhdyBduVoU_1 cow lf_tYVzrap0_0 person lge9f_bgAOk_0 person lgzIpgcvPvU_0 person lhNv9zDa1ug_0 car lhadIxHkaVg_1 person lhadIxHkaVg_0 person lhnQuOIF-2c_1 person ljLO1myCfoA_1 knife ljayNZQpp-I_1 horse ljayNZQpp-I_5 horse ljeTwRM6DWE_0 person lkvdy3Hejpw_0 person ll6gTyUguMY_0 horse ll6m5MTpf4o_0 person lmpKSF0cXSc_0 train lnfEV2dRfm4_0 motorcycle ln0_FGR8B08_0 person loVlMj9Dhkk_0 truck lotZh71qMks_0 person lpcqEaZD_Xk_5 bicycle lpcqEaZD_Xk_0 bicycle lpcqEaZD_Xk_1 bicycle lpcqEaZD_Xk_2 bicycle lpcqEaZD_Xk_3 bicycle lpcqEaZD_Xk_4 bicycle lqu4tjd3Zg4_12 bear NE9AhZPTVFY_0 motorcycle NFF4UemeH8g_0 truck NFSj66emNbM_0 cat NGS9BrtLJ0I_1 boat NGvpnRrWSKc_1 bear NHLBjlX2jeg_0 person NHgh88y4e80_1 car NHpM-oBMIRk_0 dog NHrjnZsJWOw_0 person NID_0E0tn_g_0 cow NJQNZ36lsvw_2 truck NJm81cIGO98_0 skateboard NJ22Hynv9s4_0 umbrella NJ22Hynv9s4_1 umbrella NJ7MXR2AaoY_0 cow NKQfFcfr6Ko_0 person NL1iy1TKtRI_5 car NL1iy1TKtRI_1 car NL1iy1TKtRI_2 car NL1iy1TKtRI_3 car NL1iy1TKtRI_4 car NMCijcIa_XU_2 knife NMhR_Z4Rq7g_0 person NNbRF02KnGM_1 skateboard NQiMeD83sMw_0 truck NQiMeD83sMw_1 truck NQsnyZmQoPw_0 elephant NQsnyZmQoPw_2 elephant NQve9Yujb14_0 person NRaAEznVIxQ_0 person NTGqC7kOGAw_1 bird NTRX6gLV_04_0 bus NUSnWbhvmQs_0 cow NVzCor2-ZpI_1 zebra NV-p8Vp-bdA_0 horse NWAQ1is2w98_0 airplane NYIqB-l8eKk_0 train NZ5OIYTIoYQ_0 person NaCksn1bbv4_0 airplane NaCksn1bbv4_2 airplane NaEokN7Nh-U_2 knife NadzcUmXDTk_0 person NbJ2gM5KJTM_0 cat NbJ2gM5KJTM_1 cat NdXmkm9jcPA_1 airplane Nd6ceCmRYBI_0 bird NeXVfNsggZw_0 cow NfEzlo6-i_4_0 train NfEzlo6-i_4_2 train NfEzlo6-i_4_3 train Nhi9730yIzM_0 dog NhskHQ9bqlo_0 cat Nhvr0y1tqjk_0 person NiP4AEjiwxs_1 boat Nio43-cQPh0_0 train Ni_TSyCk1Ak_0 cat NjknyzAAQpM_0 person NlOjGoYPj9Y_0 truck NlTLvOcpoEA_0 elephant NlVEu_8kdoI_0 horse NlVEu_8kdoI_1 horse NljV4UjnFJc_0 motorcycle NnRWY12wxUk_0 person NnVFfTO9-q8_0 person No84NOV3Pwk_1 skateboard NpZj-n9_STU_1 bird NqwxEAASrCo_1 airplane Nr9t7GeBwQY_2 skateboard NsbG9FcyTFk_1 elephant NsbG9FcyTFk_4 elephant NsbG9FcyTFk_2 elephant NsbG9FcyTFk_3 elephant NuKyL_c3YcQ_0 cow NulXMVhoGhU_0 knife NuutxSJHULc_1 cow NvkF9R1HsJc_0 car NxTnPIBFKdE_0 airplane Nxjnp7dqCdc_0 cow NxqGplqsmNk_0 person NyKq-nq-KlQ_0 person NzAEnNO5-fo_0 bicycle NzAEnNO5-fo_3 bicycle NzAEnNO5-fo_4 bicycle NzAEnNO5-fo_5 bicycle N0LEywKxW9o_0 cat N0e8A9q9tyU_0 train N1OYtZSKdKQ_0 train N1OYtZSKdKQ_3 train N1pTdHcekjU_0 car N28sspen6dM_3 bird N28sspen6dM_1 bird N3ffRSq8s7M_2 cow N6nP6NLTaG0_0 motorcycle N7Bv6ZMyBrU_0 skateboard N9vkS7ish9k_0 cow N_5Xf4hpanE_1 dog N_5Xf4hpanE_0 dog OBQQMo8mWLE_0 person OCA5rhgrl48_0 person OCLVaKMFCZg_1 bicycle ODI8kcB_dSs_0 truck ODJSlRRM1Uo_0 cat OD4XsgCwIKk_0 person OD9vhbbeBAE_0 horse OEhrO1p2agU_0 person OGOf9vbNJB8_0 person OG8Nfns4uh0_0 cat OHEyq1pCfZ8_0 truck OIV8ASYsqZc_0 skateboard OIV8ASYsqZc_1 skateboard OImLl2ufWqI_0 cow OJktr2-sJmY_0 motorcycle OJktr2-sJmY_2 motorcycle OKbNtRotT5w_2 horse OKbNtRotT5w_5 horse OKbNtRotT5w_7 horse OK-2ALhNWts_0 bird OLpvIpNUgY4_0 person OLyGncmosSs_1 horse OL_lZw3lqE4_0 person OMm3ReCUyGA_0 person ONlvohUS-io_0 cow OOC45SMJl6M_0 bus OPIxLQwJLaM_1 cow OPbyoGG-M_E_0 horse OPm_iAWIO2o_1 knife OR4OEYlOndk_0 motorcycle OSRtFznjiro_0 motorcycle OSUOKZdfiXQ_0 person OS6SXRjK0rU_0 horse OUeSqgMRLUg_0 bird OUrVDMMYK-4_0 person OWBXMvAtmcA_0 cow OWqaj3O-u6E_0 train OWqaj3O-u6E_1 train OWqaj3O-u6E_3 train OWvRHFQJ-5g_1 train OXjc7JlWYwk_1 bird OXpPVrdEoko_0 elephant OXpPVrdEoko_1 elephant OYCDyQPt5rU_0 truck OYRmTydmqZo_0 cow OYugCmogPD8_0 bear OZver3igS6U_1 zebra OZy-0MSWC7o_0 person OZ5z2K-vIYg_0 motorcycle Ob4ur_FS9xM_0 dog OdLj2La07lM_0 boat OdnylLd12pU_0 skateboard OdsXUxBBISo_0 airplane OePFLxtDg7k_0 horse OflyVi689KA_0 skateboard Og9LiinXMtw_0 bus Ojx6OtSIA3k_0 person Omdbd0YsB2o_0 airplane Omdbd0YsB2o_1 airplane OnRL69PzM4I_0 bicycle Oo3Uhz6L-cs_0 person OpEMSVRTyxk_0 dog OpJl0GUiLQI_0 person OptQqflXY_g_9 elephant OptQqflXY_g_0 elephant OptQqflXY_g_4 elephant OptQqflXY_g_5 elephant OptQqflXY_g_8 elephant OqmbWcekMxo_0 person OrPfakDZX64_0 person Orwr1k0mKho_0 person Orwr1k0mKho_1 person OtHHLfag4xg_2 knife OumTAMPogf4_0 person OvQFDkMjctE_0 person OyDNx0iCGUM_0 truck OyKi2PGJERI_0 person OyKi2PGJERI_1 person OyhAS52bQMA_1 person OyhAS52bQMA_0 person OzORAIgrZOg_1 knife OzQFkM92we8_1 dog O0o_u_t5Y6w_0 bus O2TgLtQU7PI_0 knife O3GPSL92hYw_0 elephant O4UhXpMuxJI_0 person O5PlzlxQuPc_0 dog O5796OHwBy8_0 bear O6cWlrockUQ_2 horse O8s1bsDJrwc_0 person O9dxeSLiF9A_0 skateboard O9dxeSLiF9A_1 skateboard O90WVIgQwww_0 person O9_riOoIpKo_4 train O9_riOoIpKo_6 train O9_riOoIpKo_10 train O_hypcyZCFo_0 airplane lryNU4SKncc_0 cow lrzxlHguluE_0 bird lr7T4YcCWSU_0 elephant lr7T9GuNUMY_0 cat lskWmTPa9Gk_0 person ls34lS6fGzw_0 person lt7kXXW5D-c_0 bus lvdU2uEdpnA_0 boat lv6aYZguv6k_0 person lxXwMvanqo4_1 boat lznoTW8tuLI_0 bus lznoTW8tuLI_1 bus lznoTW8tuLI_2 bus l0J9Km2lk2I_0 person l0TirY4L7Es_1 horse l0TirY4L7Es_3 horse l3yFwpak_LA_1 horse l38pNVKwDeo_0 bird l4sdxYUAiJQ_0 person l4_P74HRriU_0 person l5GlzRyX39s_0 person l5GlzRyX39s_1 person l5WawiGWVxg_0 person l6cEGnOtFZg_0 airplane l682n6ZmpNk_0 person l7Mmo3ow8qo_0 person l7kq2yqxPQc_4 horse l7kq2yqxPQc_2 horse l8r-mOc3-3U_1 person l9QgZQGtQWI_0 motorcycle l-4jrxgMGTQ_0 skateboard mAEnlKe67pQ_0 bicycle mAhzB1TH8mU_0 truck mAj62XUNkIM_0 horse mBgSYaKydZY_0 person mC5X6MO2y9A_0 person mDf5zsFFweg_2 knife mDf5zsFFweg_1 knife mFbUnWMAreQ_0 person mGDfepYDRRE_0 person mHFxPudSk8c_0 motorcycle mIFnGYdf0po_0 person mJm2UYBiD8w_0 cat mJo7aqOfRww_0 airplane mJ6qCcS_-AQ_0 person mJ-DsFbUPUg_0 motorcycle mKBs2L-xwdU_0 person mLVHfKExUNU_0 boat mMdGNbPpLKQ_0 truck mMy70TxInmA_0 person mNpEoUW_OPI_0 knife mOFqvrGzJiE_1 elephant mOFqvrGzJiE_2 elephant mOkmKyBZoXI_0 person mP6-RR-Vuv0_3 truck mR1y0XlZhQ4_0 person mTeNKWTwFcs_0 person mU7E6pi9PFU_0 bear mU7E6pi9PFU_2 bear mWeNwTJwEmo_0 person mWhw719wEH4_0 person mXBKJjrxqmc_0 knife mXekeIascCc_0 person mX_4T1I2ux4_0 dog mYwEvpKN2-Q_0 train mZ0VxiELg9A_2 motorcycle mZ0VxiELg9A_0 motorcycle maiqraHgwgg_0 skateboard mbZZ48h5pnY_0 person mboIIChd8tY_0 bicycle mcR2Fi6wQj8_1 train mcR2Fi6wQj8_0 train mciQ3fR1QTE_0 truck meAfvCGeyyU_0 person me-WjezBU4U_0 motorcycle mflX-nwtpzs_0 skateboard mgSJL9uL49w_0 bus mgSJL9uL49w_1 bus mhDnVhRMCHc_5 cow mhDnVhRMCHc_0 cow mhDnVhRMCHc_1 cow mhDnVhRMCHc_2 cow mhDnVhRMCHc_3 cow mhDnVhRMCHc_4 cow mhIULm3ssFk_2 airplane miJ1b0bNn9M_0 person miLapj3u_5g_0 cat miR8Xeb7SM0_0 umbrella mi4j0PrR-Gs_0 truck mi4j0PrR-Gs_1 truck mjSUb46nTjs_0 horse mj2ClgQE_Q0_3 skateboard mj2ClgQE_Q0_2 skateboard mj_R3ENyiKM_0 person mnOoqy7I3L8_0 skateboard mns4vFzs4_8_1 skateboard mns4vFzs4_8_0 skateboard mnwyrMq92so_0 person moBNY2JjuEQ_0 cow moc2yPvW_JU_1 person mpA3PWbdVWc_1 bus mp-cHp44pXo_0 bird mp-cHp44pXo_1 bird mqI9CDpsCDE_0 cat mqYD18pFqm8_0 person mrnDERbyZcM_0 skateboard mtO9ioY8AHY_0 person muk5R25UV1A_0 person mungFWJMSsg_0 dog mwRNyFvem8g_3 truck myYMS85ltwo_0 skateboard myiCWmM3XN4_1 dog mziKTFuKVco_0 person mznC1uLm_j8_0 skateboard m0z25TJV2vU_0 person m1VAqMAJ-Lw_0 elephant m2DUDsR4tWA_1 bus m2Sr_Q8JpcI_0 horse m2Sr_Q8JpcI_2 horse m2Sr_Q8JpcI_3 horse m2-nK6oZ08E_0 horse m2-nK6oZ08E_1 horse m3u_pETGaMw_0 train m4Ozpr8E1EE_1 train m5mSFt43spE_4 motorcycle m7VhCUoV_Dw_0 person m77tPf0Ulb0_0 person m8THukZrE7w_0 person m86BSOvJvS8_0 person m9hdxJE9HQE_2 train m95nb4Vl_R0_0 elephant m-Ry10-IgWg_0 horse m-sLdoVujlI_1 bird m_25GAJYGHE_1 car nAO2Y4kF7b8_0 bicycle nBllCINiO-4_0 train nF_NlCSUpFo_0 cat nIO0ZNZi6n0_0 person nIiXsRSLxZI_0 person nIiXsRSLxZI_1 person nJO5eQXPS0M_1 horse nKfhxWUyc4I_0 elephant nKfhxWUyc4I_2 elephant nLUyCQwkCds_1 motorcycle nMW7WsVKd_E_0 truck nO14Z3ggnZs_0 truck nO16C5NBMQQ_0 person nO16C5NBMQQ_1 person nPJJOI4j3UQ_0 person nQAqVHkffhY_6 train nQAqVHkffhY_7 train nQAqVHkffhY_1 train nQAqVHkffhY_5 train nQrJJZvmF74_0 cat nRu8IVZXzCU_0 airplane nR1Ng3PnYoU_0 cow nSUBF0RYH1o_1 bicycle nTfgyYqyO_Y_0 person nTtqkLze7eY_0 horse nTtqkLze7eY_3 horse nTtqkLze7eY_4 horse nW4sAWZ6dHQ_0 bicycle nXYeq3IDOFo_0 truck nXgq-W7J6ho_0 person nYGQy8peDYk_0 person nYHjMb7HoK8_3 bird nYIUSRVmY30_0 person naMdRxX0924_0 train na6hNW8gSx8_0 bus nbojUStyLvY_1 person nbojUStyLvY_0 person ncZiTQHehfk_0 person nefS_k9oFMI_0 person ngE_mlmsaqY_0 person nh4AR9Mjwmo_0 bicycle niQ2DNNlBSM_0 person niUnVyYTszc_0 person njOQqZ1pBGM_2 boat njP6uuU-G6o_6 bear njcuqdNTGfM_0 person nj8ALe3wC9c_0 horse nki1SdWtdCI_0 cow nk6FezKWYSY_0 bird nmNSM48p094_0 knife nmRZQdp3xRk_0 person nn8WcALmZ7c_3 bear noTnh5A2OHo_4 boat noTnh5A2OHo_1 boat noWsAcioI8g_0 train noe-qNQfJBo_0 bird no-b9_3kXiQ_1 dog npAPemisdEI_3 boat npGL0Kl16f0_0 person npGL0Kl16f0_1 person nqZya6Vk3iY_0 cat PAdHnsQ5png_0 cat PAi_eJ_z59w_0 skateboard PBPViL9vBZQ_0 motorcycle PBS3-SzLV2A_1 horse PBwR_Jdod_g_0 knife PCJWOz32Js8_0 person PDmAbS9Afkc_0 truck PE8yxnkayr0_0 person PE8yxnkayr0_1 person PFKrDvQuKII_1 car PFb83m0smRg_0 person PHunbTKqKwk_0 train PH5VqmGrnXs_0 cat PIG9w10uliw_0 bus PIo5FlB1sf4_3 bear PIzyVPr2kvQ_0 person PI_spS2t57M_1 horse PI_spS2t57M_0 horse PJK-c0HQksg_0 bear PJUvXC0Eumw_0 airplane PJsCV-lA78A_0 elephant PJ0Y1xQ7ZJo_0 horse PJ2kZmkL25Y_0 person PKGRn71TQGQ_6 airplane PKGRn71TQGQ_1 airplane PKtLlpi00cM_1 skateboard PK_UdRSa36U_0 motorcycle PMDSUC0_Ytg_0 bus PNxobv7rkRU_0 person POWngj1oBhQ_1 train POpePYwyHWY_0 bus POu1oPwNd4g_0 umbrella PPeaYnqzi9g_0 person PPjAhD3i-v4_0 bus PPqkkhaUIdE_3 bus PPqkkhaUIdE_0 bus PPqkkhaUIdE_1 bus PRaq5kZmO2A_0 bus PRyc4Vp0s00_0 bird PSyuR_D5C2c_0 cat PTLtv0VJ0_s_0 person PTM6VrBcP80_0 dog PTewrgfas9o_1 train PT6u63wHOhs_0 dog PT_tMCTzlSc_0 person PV_FZhj_0hI_0 car PWZIO2hdNRU_0 person PWiyz8b24es_0 airplane PXxs6Hzx7Pk_1 zebra PZ3X20r0oVc_1 bird PZ3X20r0oVc_0 bird PdPZkfHUOq0_0 person Pd9bh2hiWAk_0 person PeA8729U1jg_0 boat PeJxY7YFBTA_0 knife PgFIqGCjnc0_0 horse PgNvdw3Zges_0 umbrella PgjeF-iHzLk_0 person PgyVMv-RRL8_0 truck PiI1e3aKeos_0 person Pkju9RRBRAU_0 person Pn01hUEOICo_0 bicycle PoI-RFl6jqU_0 bird PoI-RFl6jqU_2 bird PpX6lJOP6ng_0 person Pq1kVNudVJo_0 boat PsPMm45bDZA_0 bird PskTcGACgjw_0 person PsrCCNATJd0_1 elephant Ps9peKxde4U_0 dog PvCZZzw4FKw_0 person PvQVqhtqTVk_1 person PvQVqhtqTVk_0 person Pv3IqqHid-w_0 airplane Pv3IqqHid-w_1 airplane Pw7zlPV9yh4_0 motorcycle PytUHdEhipQ_0 airplane P0FylASL6h4_0 person P06NLpHGLb8_0 truck P06NLpHGLb8_1 truck P1FTUN2gJkY_0 person P3JAtlf2-VA_0 cat P3MhJa_p-dU_1 truck P5MpdcJgQrI_0 skateboard P5NEco_Rqas_0 motorcycle P5NEco_Rqas_1 motorcycle P5v3n_5s-F8_0 horse P7i0pgLo9kg_1 car P8E7gprJa1s_1 skateboard P8_7-uFl2Go_0 bicycle P9dDbodBY8s_2 motorcycle P9dDbodBY8s_0 motorcycle P9dDbodBY8s_1 motorcycle P91LJh-_E0Y_0 cow P-FrYGR7Bf0_0 person P-phCIDPeWw_0 horse P-27cmR3CZE_0 knife P-_MzAIxz2E_1 knife QBAxag8dq6Q_0 cow QBfotDmdDkk_1 skateboard QBrAST1Q2iE_0 person QCCt8ooY4qg_0 person QCjqG8908mY_0 cow QEDWauqnaSk_0 skateboard QEGY7Dq2x9s_0 horse QE0MjXjSFjU_0 boat QFS35qERdLE_0 person QFeMKKxurVg_2 horse QFxep-yih-s_0 truck QFxep-yih-s_1 truck QGN2-Iqa4QQ_0 person QHPYpnJSf2s_0 cat QHhkx3CSiWk_0 person QJ1W4Pajbv0_0 person QLmFsJCZy_o_3 knife QMRFisCEGQc_0 person QM9Kddu2XcQ_0 train QObG-uf4v68_0 motorcycle QOjAwmQ_7vA_0 person QPtMbvxzFuE_2 bear QQC7AIIJg2Y_0 elephant QQLrVBS8VSo_0 person QQLrVBS8VSo_1 person QSTf92HwJS0_1 dog QSTf92HwJS0_0 dog QTjiYkMuDGI_0 knife QTqvJZS8ZNo_0 elephant QUIxOZH8N8c_0 person QUUgu5YvS1c_0 person QU7X6RkjKPE_1 boat QVUI5ZkkDsA_0 person QVnam2Ma6mY_0 person QY1rz6k86s0_1 person QZS3V-7xnAA_0 person QZWqiN4OA_A_0 person QZk1HSA90KA_0 knife QaUHYb5os4U_0 person QahBgQXhNfo_0 cat QbOvfWFyPzg_0 dog QbOvfWFyPzg_1 dog QbPvdKEmnrI_0 person Qb4RNeQYfPc_0 boat QcLa-GP2ITc_0 person QcLa-GP2ITc_1 person QdeUvHCiXwc_1 horse Qd0chk9vUQ0_0 bear QeISQLJERxg_0 person QfJeJLieLew_0 cow QfJk-eDxmKE_0 person Qfkb-gc72qg_0 cow QgPao5AkXFU_0 skateboard QgiX6-1aN-4_0 bus QhGx_MwYnWs_0 person QhIp71nr7Vk_0 dog Qk_VhG5lt1Q_0 cat QmRFPW81gZc_1 truck QmfJmQuF1-I_0 bus QmuLT1MpdP8_0 person Qm2yaeiexlI_2 motorcycle Qrd-Q3XrT3A_0 train QszBg-eN7F8_0 cat QtBYK8AxWCw_1 person QtpKcTyf4n4_0 knife Qtq2m-MV2q4_0 cow QvY9ysq30EI_3 elephant QvY9ysq30EI_5 elephant QvY9ysq30EI_0 elephant QvY9ysq30EI_2 elephant QwJNOYFZ3W8_1 elephant QwTIODgGfOM_0 person QxLFtmn_Igw_2 bear QyyPl-aCFUs_0 cat QzETtzOBUaY_0 person Q0HpPvC0bKA_0 person Q0M_Fog02Yw_1 horse Q0UrlXLNioY_1 umbrella Q0tQtb1npx4_0 car Q0x55aCCNxA_0 person Q31q8b3CSN8_1 skateboard Q4rAM1058Z4_0 horse Q4rAM1058Z4_1 horse Q5G2n-3zXX8_1 person Q5G2n-3zXX8_0 person Q5X1kisU8Qo_0 person Q6hwtMw2jkU_4 skateboard Q6hwtMw2jkU_3 skateboard Q7SViqj0bEg_0 dog Q83xNK10WK0_0 bear Q-lTGQgTOEg_0 person Q_rsZh5VqdY_0 person RANBJV7BN3k_0 person RAmxGTzr25A_0 person RBccU2wq7Qs_0 knife RBclSX-7rYQ_0 person RDiehz1pFVA_0 knife RD7nVPZTGEw_0 skateboard REBfrgEC_3U_0 knife REh7f-__WqU_0 cat RE40E9-qdHE_0 horse RFO8tA6rfbo_0 truck RFbhEQ4qN-A_0 person RIfxXKT-_88_1 skateboard RJZgo3_JEPs_0 person RJi5ZRGQb-A_0 person RJxPTuKUKjk_0 horse RKFpQfRSYIc_2 motorcycle RKFpQfRSYIc_3 motorcycle RKFpQfRSYIc_4 motorcycle RKFpQfRSYIc_6 motorcycle RKFpQfRSYIc_7 motorcycle RKFpQfRSYIc_8 motorcycle RKFpQfRSYIc_9 motorcycle RKFpQfRSYIc_10 motorcycle RKFpQfRSYIc_11 motorcycle RKFpQfRSYIc_0 motorcycle RKFpQfRSYIc_1 motorcycle RLcZcFP03fA_0 person RN6TzMbUlyg_0 airplane ROdg8e5a0Fk_1 cow RPwZjkygYo4_1 elephant RR-fksDmQTU_0 dog RSLwmLbf3No_0 horse RSO2IDZGDus_0 person RSQ7pHT5sU4_1 cow RSWyviTCTqk_0 cat RTAQO62dbRo_0 horse RTONY5PqRUo_0 skateboard RT0mh9U0YDc_0 person RUAbb66fW18_0 bicycle RUW8xYh84q4_0 dog RU0u42rf0Hw_2 truck RU0u42rf0Hw_3 truck RU_8ryQNxC0_1 bird RWJfJx1nXNQ_0 bicycle RWo2zaceWcc_0 bird RahqzUIhIkc_0 cow RawtpxzAbmM_0 person RdZGVs8pH40_2 skateboard RdZGVs8pH40_1 skateboard Rdge7lmfdc8_0 person RfVv6ECZ78Y_3 bear Rfa2If7RJTY_0 knife Rfa2If7RJTY_1 knife RfvNPPjs-bw_0 boat Ri3O4rz5S2o_0 boat RoMemRfbKkc_0 person RoNJ0fP0VUU_0 person RqAANAYxYz0_0 person RqqaUsDM-aI_0 person RrnixlsQyn8_0 person Rr6AsTlUNKQ_0 person RspILw0UAM8_0 person RsyjwcMkRrY_1 knife RsyjwcMkRrY_2 knife Rt1reRy5GVY_0 person RuFIanBmYzM_0 bicycle Ru9ksAvNYc0_2 cow RwVTAYsyWMo_0 person RxWOvD9i9Ig_0 car RxtS3kGOYoc_0 bicycle RxtS3kGOYoc_2 bicycle RxtS3kGOYoc_4 bicycle RxtS3kGOYoc_6 bicycle RxtS3kGOYoc_9 bicycle RxtS3kGOYoc_12 bicycle Rx9YjtdgOEI_0 person RyVdNK-PCyg_0 person RylJTxUTfF0_0 skateboard RzdsXt87bVE_0 dog R0biK134LTQ_0 person R0n9cqLQE4E_0 skateboard R3rDAaPE_s4_3 truck R45uCINxuVY_0 person R7IE_IohaIk_1 airplane R7IE_IohaIk_6 airplane R7IE_IohaIk_0 airplane R8Zg4uo1QpM_0 person R9d1vlii7cs_8 truck R9hRCG8pAHM_0 horse R9hRCG8pAHM_1 horse R_xLhXpHgp0_4 skateboard SAeiSpeFynU_1 bus SBmb0VU07rs_0 boat SCLi5OFtzQk_0 skateboard SCaWHsWzxqY_0 person SC18zgZ9Diw_0 bus SDCTiDVOdW0_0 bear SFA4mVjImxk_0 person SFoil_6CvbI_0 bird SGsRwH8YxQg_1 airplane SGsRwH8YxQg_11 airplane SHSsDGmwywY_0 cow SIZ3AYCr7PQ_0 person SIv3Hcq1ge8_0 elephant SIv3Hcq1ge8_1 elephant SJkZwyPxUTg_0 cow SJqduSR9h4g_0 elephant SJwgIeOkfTM_1 horse SKoDZimqLV0_4 bus SMF8aDGwELI_0 giraffe SNbBUZtngzM_0 person SNnofRkUk8w_2 boat SNqtno2pOzc_1 dog SNqtno2pOzc_2 dog SQn8ueHVBWc_4 elephant SQn8ueHVBWc_6 elephant SQn8ueHVBWc_1 elephant SQn8ueHVBWc_3 elephant SQ4tDbbdzr8_0 train SQ4tDbbdzr8_2 train SSjgAjilS8g_0 person SSwA_nC9rr0_0 person SThjw6JeBnQ_0 person STuEo8vap08_0 person SUHEgX-8bo0_0 person SUwLfCebumU_1 bear SUwLfCebumU_2 bear SVUAFI7bHqQ_0 person SWedQv5UnQo_0 person SXWo-zKZICs_0 person SYT4odK3Dwo_1 bird Sc_CAareVEI_1 elephant Sc_CAareVEI_6 elephant Sc_CAareVEI_7 elephant Sc_CAareVEI_2 elephant Sc_CAareVEI_3 elephant SdzIWTR-rkc_0 person SeBOeRzwqrQ_0 skateboard SeU_71ydaeA_0 elephant SehCD9wP-Pk_0 person Sf9OdV3i3I4_0 person SgglaVke5lo_3 boat SgySshdgJrQ_0 motorcycle Shves64RCp4_0 cat SiotcXGUwAs_0 person Sj56u4dFe4k_2 person SlR9qCk_m9k_0 motorcycle SlR9qCk_m9k_1 motorcycle SlZZmtOGyeE_0 airplane SlZZmtOGyeE_1 airplane SndDcPzB8Hc_0 cat Sn2SGmheI-Q_0 person Sn9gOBw9bf4_0 person SoiA6jtejG4_0 dog SpbyBYH0OjI_0 person Sph2g6B-X2M_0 cat SpjssmEyc_o_0 airplane SqHtdCP5Oao_1 horse SqHtdCP5Oao_2 horse SqLiHZHzp9w_0 person SqoR7vKYzCY_0 horse Sq-Xok-ea7U_0 person SreiPFJ6vBw_1 boat SsMS0eIy2Ws_0 person Sse7vXMMO6E_0 person Suaush4Da4s_0 person SvPL8gOREaU_0 knife SwaILKCtBVA_0 truck Sw4B_VFic3M_0 skateboard Sw7L3wImbSA_0 person SyldRIQbAGU_0 person SzVyFmQ28Xo_0 car SzkobSwGTMk_1 bird Sz2bTIe9kTo_0 airplane nrEv-Plh45s_0 bear nt_BXwq_xhA_0 giraffe nuCdww9iIOs_0 horse nuMeNIi1MPY_0 person nuMeNIi1MPY_1 person nui8beXjUlU_0 elephant nui8beXjUlU_1 elephant nvMXQKwroRY_0 person nvaO13WFhos_0 person nxBkP48NgKY_0 motorcycle nxclZ6iCf7o_0 cow nyogtZp3kIk_1 airplane nzf12QyuD4E_0 truck n0tx4V2rF3I_1 giraffe n09NxJcTEYQ_0 person n12ITkwyzvM_0 cow n15n46culQU_0 person n19nqH4078Y_0 bear n2F8uNrgh1U_1 elephant n2daSQR_dTI_0 motorcycle n3Eb6Cf77Vg_0 airplane n3aHtfCo_aw_0 person n3fhSGUvtH8_2 knife n5alwWwFPb0_0 motorcycle n5osSY0_BSo_0 person n5-RrJI-Lxw_0 person n6I0k52pV18_0 bear n8xNf-PRHnc_0 truck n9AUV2KuhLo_0 cow n9zSAZMj2Mk_0 knife n-I-WnLfnqE_0 horse n-QBM6yD7RI_0 bird n-eDiuWYJUc_0 person n-1FhryZboM_0 person n_Cv1LzGol0_0 person oBixVhXVcmY_0 person oBjIRWu_BWA_0 truck oCCV0-mP2R4_0 bus oDlSzIkDJGM_1 car oDnobYn8maE_0 person oDrYXyIN9xs_2 dog oEcyeE0kNFc_0 horse oElAgrukyOk_0 person oE0bjG0z-nk_0 person oGDp2b_LvDA_0 bicycle oHu9fCIhAjs_0 person oIQuiXJzEUI_0 person oIYCDBqfT6I_1 elephant oIZHf-r5C3w_2 bird oI3ETWYxCi8_2 person oI3ETWYxCi8_1 person oJAivZwYxDE_0 person oLTHGMleOxk_0 car oLTHGMleOxk_1 car oMZczwLgR1Q_0 boat oMZczwLgR1Q_3 boat oMZczwLgR1Q_1 boat oMZczwLgR1Q_2 boat oNMf32fzYvo_0 person oOi9E4se4ww_0 person oOp7fTxc8qY_0 person oOp7fTxc8qY_1 person oQcVQukPVdA_0 horse oRacxmfNaSM_0 cat oSwwku39aC0_0 skateboard oXHr2yBfL3Y_0 cat oXfOERZ2kMs_0 cow oXlK1t1qisA_0 person oYw8UE0VSFk_10 elephant oYw8UE0VSFk_1 elephant oYw8UE0VSFk_5 elephant oYw8UE0VSFk_8 elephant oY5CyHk-QEo_0 person oaHCd7KI_Fc_0 airplane oaK_EfFOb7o_2 skateboard oaK_EfFOb7o_0 skateboard oa5NT5mX--c_0 person oa838tg7QCk_2 elephant oa838tg7QCk_3 elephant ocJUmpBIBOo_0 person oc7XeYj7dOE_0 skateboard odjK5W70JaE_0 person oeYHzAMgoQ4_0 skateboard ofynEJHRTz4_1 person of1ISNDelz4_0 cat ogJGxnVqTWY_0 cow ogNqc-uHzQ4_0 umbrella ohkrDDXUwjY_0 person ohrYGLaImow_0 cow ohxeFH800SE_0 skateboard oiftoNj28hs_0 elephant oiwU7UpO9S4_0 person oi4GfdQBxyc_0 person ojiIyU5ibT0_0 person okPcGR4BRQM_0 person omsmPSC4u3A_0 airplane onH8ELLteHg_0 motorcycle oo3eTJKpErU_1 elephant oo3eTJKpErU_2 elephant opWm4bW5B9k_2 truck opYiNVXmySg_0 skateboard opkxXg1s8ZQ_0 horse opkxXg1s8ZQ_2 horse opkxXg1s8ZQ_3 horse opkxXg1s8ZQ_4 horse osYXdQYkiPQ_0 person otKNUa-KgUg_0 car otKNUa-KgUg_1 car otOxAXKskbI_0 boat otU4Zd1n65g_0 bear otqOLpbz4LQ_0 airplane ouK26Crplso_1 car ouSUKHZs1Dc_0 person ousG5WHZq8I_0 elephant ouwAzKpUG7k_0 train ovQiwCBG8Eg_4 elephant ovZ4In0kLUg_7 bear ovZ4In0kLUg_2 bear ovZ4In0kLUg_6 bear owW-da7Tdls_0 person owtKQFT_gNk_0 person ox0mlEooWI0_0 skateboard oyuMudJ9EM8_0 person ozRJI9h3tks_0 horse ozRJI9h3tks_1 horse ozvxKPrfdo8_0 dog oz11xvTIbvM_0 person o0QRA7gPhBI_0 giraffe o02m7tfad28_0 person o02m7tfad28_1 person o09Ks_UmmkY_1 train o3eHOnTMxnU_0 airplane o4PVsZPaxOM_0 train o4PVsZPaxOM_1 train o4VOx1SeRKY_0 bicycle o4VOx1SeRKY_2 bicycle o4VOx1SeRKY_4 bicycle o4VOx1SeRKY_5 bicycle o4VOx1SeRKY_1 bicycle o4VOx1SeRKY_3 bicycle o7wb_t8x0D8_0 person o8KS5SYj0GE_6 bird o8YfQD0GA00_0 person o9gD7-MVkJ4_1 bus o-IwJTgdr_A_4 bird o_sONKO9OMk_0 person o_7RumsdAcE_0 motorcycle pAVwx70oxIc_0 person pAthLZfnXaM_0 person pAthLZfnXaM_1 person pBWgDW8f6II_0 person pB5-haagdS8_2 bird pEUCkpfCcaw_0 boat pEtOW-iQZCA_0 person pE-OFVB2lzo_0 train pHsAHiqdb-c_0 bird pIHbW9IMV2E_1 airplane pIHbW9IMV2E_0 airplane pINK56mkS-E_0 cat pJBMnX2HBFo_0 train pJ6wkaE8-iY_3 elephant pKFd8IXz4K4_0 boat pKnRcv--qEI_0 cat pLvGIJc0ETk_1 cat pMKMeBQzCC8_0 dog pMKMeBQzCC8_1 dog pMgX9KscZSg_1 train pNG0qeNr-Vo_0 person pNWXXO380uQ_4 dog pNWXXO380uQ_10 dog pNWXXO380uQ_1 dog pNWXXO380uQ_2 dog pNWXXO380uQ_6 dog pP84ZurhiFY_0 umbrella pQAJTPvkPj4_0 bird pRArAdUzaKg_0 person pRVlgxVhtuA_0 cat pRy6kU2p41E_0 cat pS5AzmSvRPY_0 horse pU9s744_T6o_0 truck pVR9b-qG1Ig_0 giraffe pVR9b-qG1Ig_6 giraffe pVR9b-qG1Ig_7 giraffe pVR9b-qG1Ig_1 giraffe pXLbIBluyAQ_0 bus pXfO7xO-99w_0 cat pYXDml6lcAY_0 motorcycle pZCCPMu42GA_0 person pbFuk0oX6a8_0 bicycle pbFuk0oX6a8_1 bicycle pbFuk0oX6a8_2 bicycle pb3p83fw9bg_0 person pcUV4ja1VRc_0 truck pceUU6aj_ao_0 cat pdyhFh6-rCo_0 bear peBxgn7gXlw_1 motorcycle peHZd4qdOMI_3 boat pe00hbvqjDI_0 person pe_73GR1-NI_1 airplane pfED6WafVwQ_0 bear pfpKoO-GjGI_3 truck pfpKoO-GjGI_1 truck phXjZ1yxWD0_0 bus phec6_yC2HY_0 person phjJhuKxT5Y_0 train piGT-hRYHHQ_0 horse piN1RiueJhY_0 horse pjLei6UAHsE_0 airplane pjLei6UAHsE_1 airplane pjZqJuEX1ow_0 airplane S2FTgueR-80_0 person S2FTgueR-80_1 person S3U383sqlRs_0 bicycle S4UDIyyqmlY_2 motorcycle S6h6E0IKO6Y_0 dog S73sRU7b2dk_0 person S9QmlxGGxGM_4 knife S9goDsKFXAg_0 person S-qgaqzenIE_0 person TBpnes8Z-3s_0 person TCtRzPGrwls_0 horse TCycfRWpg0s_0 elephant TDKDtLliMhg_0 person TDlLgW8Fjes_0 person TFcak4kNd2c_0 person TGFSBSitWNw_0 cow TISjnLr1r-k_4 giraffe TISjnLr1r-k_5 giraffe TISjnLr1r-k_3 giraffe TJsLSuQcb7E_0 horse TKadOIk-uPI_5 truck TK61mJMHqTE_0 train TK61mJMHqTE_1 train TLxcXucOpWw_0 skateboard TMaLrtjFU34_3 cow TNNXwm3Bt5I_0 bicycle TOLyNcTSGPA_0 person TPglVxQN85I_0 dog TRH4PZkAkiE_0 person TSl3wSreplo_2 bird TSl3wSreplo_0 bird TVuX76wWzwY_0 person TW9LBSqxNWo_0 bicycle TW9LBSqxNWo_2 bicycle TW9LBSqxNWo_6 bicycle TXD-idarfhU_0 person TYsJu2G5WVY_2 knife TZdDUMDyozA_0 dog TZfFEYUY5_0_0 boat TZsigdW7Qfs_0 airplane TaL6ssJD8z4_0 airplane TalhQQ9B7vc_0 zebra Ta-JBO0InZk_0 horse Ta-JBO0InZk_1 horse Ta-JBO0InZk_2 horse Tbm_BFLOPic_0 train TcRl6wotFw4_0 horse TcR9fR_SWLg_0 bicycle TeiC-tObc4o_0 bicycle TgRRY3Mn0Ro_0 person Ti411VXWtAc_0 dog TjCiDUNoDi0_0 skateboard TkktEeCiSAo_4 knife TkktEeCiSAo_5 knife TlXSJmmN3dc_0 motorcycle TnB8G7eZm24_0 person TnY1qP0YQQ8_0 person Tnc7CCuk78Y_0 person Tn4trDBJAqE_0 person To8VzjtX70s_1 person To-lnvpzIKY_0 person TqKcS4Cx7wc_0 bird TqvuyyM_x4E_0 bird TqvuyyM_x4E_1 bird TsM45PkaTj0_1 bird Ts4iqmKVRy4_0 knife TtI1W2xFQ5k_0 person TtI1W2xFQ5k_1 person TtnuIzV01ek_2 train TtyfhN-jWcc_0 person TuEArk4EFWg_0 person TuEwZSEUe5A_0 person TuOnAlE6TRs_0 airplane TubHgt_FxYo_0 person TufSi0uSU8M_0 person TvUmQi32j08_0 person TvUmQi32j08_1 person TvuhORVyaL4_0 person TvuhORVyaL4_1 person TwH6hv5zVIU_0 airplane TwSnlq5Kma0_0 skateboard TxV4qpdgJ3Y_0 airplane TxV4qpdgJ3Y_1 airplane TyIzjLHGvjo_0 person TzUMxAOWWcc_0 bicycle TzVawH7veiM_0 bicycle T0WCoXgklkw_0 person T0r5yfzMs4g_1 bicycle T24d3EHv2GE_0 bird T406qi8vIlk_5 airplane T406qi8vIlk_2 airplane T6XxSbeAl6Q_0 motorcycle T8e9Qi4dcNY_1 bear T95G52MuPFU_0 horse T-PL14w9TV4_0 cat T-cOBQACeAw_1 bird T_2A3L49ah4_0 dog T_2A3L49ah4_2 dog T_2A3L49ah4_3 dog T_2A3L49ah4_5 dog UANkhHNWM-M_0 person UAnl6TGZhxs_0 cow UA5VCImEZ2Y_0 dog UBdNIuCPaZ4_0 car UBdNIuCPaZ4_2 car UBsG3-ocU64_1 boat UE40h6VhUaU_1 bicycle UF8l_MU2rj8_0 person UGCPxfU7FKM_0 person UG5FFY29OV0_0 cat UHO129a_p0U_0 airplane UHYwdGF9W-0_1 horse UHYwdGF9W-0_0 horse UIvJPTYu6Hc_0 train UI4IvmmFIPQ_0 person UKExOybWiRM_0 motorcycle UKExOybWiRM_1 motorcycle UKkr05PKrb0_0 bicycle UKlB9mDIXss_0 person ULdZGJs5ta8_0 motorcycle UMsR07JXCYs_0 cow UM446G0Lud4_0 knife UOUaveJ_TWA_0 person UO_zNFtEt3Q_0 person UPkEE2dnlkU_0 elephant UPkEE2dnlkU_1 elephant UQAJPD_gH7g_0 cat UQDXdgIlpDg_0 knife UQibn_ZNp9Y_0 skateboard UQibn_ZNp9Y_1 skateboard USAjeRaDlJ0_0 person UTqlz0i9KIo_0 person UVTPHohbCV0_0 person UX4dpwv6qWE_0 dog UYAtAlnvVy4_0 skateboard UYc0lVVxayQ_0 dog UcCtmXy5F4g_0 dog UcbWaG8GwRs_3 airplane UcbWaG8GwRs_2 airplane UceYFW8-zZM_0 train Ucse975FqUA_0 elephant Uc5PAhXhIzk_0 umbrella UgsSu7wC28w_0 bird Uhj0HRMHPXY_0 person Uhsh3JUb_aI_0 bicycle UisVwousE8g_0 cat Ui8yPflhqHs_0 person UjMTd3LCxyQ_0 person UjMTd3LCxyQ_1 person UlFA0xDQcS4_0 skateboard UlhZSONgFCI_1 cow UlhZSONgFCI_2 cow Umvp1XgX6Qc_0 person Um-FzEOyncc_0 person UnUlhJaHWlA_0 bear UnyyMjT0BCc_0 horse UsCJdEa7tq4_0 dog UsCJdEa7tq4_1 dog Usrv7_ONvi0_0 horse Us6dL_WD7xg_0 truck UtyaA_QRIrQ_0 truck Uu9k1VohpvA_0 horse UvptsJcl_ms_0 person UwtHiozuyRs_0 person UxPh-hnwal4_0 truck U2LvNquzuZ0_0 bicycle U2LvNquzuZ0_2 bicycle U4LhReaGH70_0 person U64eMon0R9w_1 person U74o2HGsFeI_0 dog U853uMV0qAY_0 person U86p5VtUC6c_0 knife U9YbGyTBb5k_0 person U99ENpOmVGI_0 airplane pmrTy1xQ5kI_0 person prJIAYsv8bQ_0 truck pramqy_Y1gA_0 boat prlcpxzCoyc_0 bus ps-nNC6Equg_0 cat ptF2Hqj7DGk_1 motorcycle ptF2Hqj7DGk_0 motorcycle ptPi712LDq0_3 bear ptU4EDudgg8_1 bus pt6v3JZFi4c_0 bird puifEp7W50E_0 motorcycle puifEp7W50E_1 motorcycle pu0G99aVryc_2 car pu0G99aVryc_0 car pwFqv42foTM_0 person pye4y8sPr9I_0 person py0U90-ZTkI_0 cat py2dhJjpOaI_0 bear p19EU6tw9oM_0 person p2DntTqvGT4_3 car p2DntTqvGT4_1 car p2QsmFuYxdI_0 train p2TTKNDiGv0_1 bicycle p4pf9W4qt8s_0 person p40Oqh_akS4_3 bird p43GludvR_g_0 bicycle p5F9hHDkbKc_0 train p7UAl7_bv4s_0 bus p8KQvF1DyLg_0 person p8YhfWsz1JY_1 person p8YhfWsz1JY_0 person p8gE3VpTAR4_0 person p84Z-poVaAw_0 motorcycle p9ixpjYEEag_0 motorcycle p-J_LbVq7CU_0 person p-SJ_Ym5pTA_0 cow p-XasPaki0k_0 cow p-cJamorAiY_0 person p-2rgSte1DI_1 bus p-2rgSte1DI_2 bus p-6u3d8YV70_0 person p_YVPahadQ4_0 elephant p_YVPahadQ4_1 elephant qDP6_m4bDRA_0 horse qD8NS4r2Gd8_1 train qEjyhyeCIR8_0 cow qEjyhyeCIR8_3 cow qEjyhyeCIR8_1 cow qEjyhyeCIR8_2 cow qGiLjP8-EVQ_0 person qHYuGyp8_HU_0 bear qHZsnSLmqEY_0 person qIJo1R3rHmQ_0 person qJI7mnjOp0A_1 umbrella qJOaXM8s-Yo_0 knife qJOaXM8s-Yo_1 knife qJugj62heF8_0 airplane qKqEqxMZHVg_0 person qM566R4U4Ug_0 bird qQbEwbtvdRg_0 person qSR2E4eqjqI_0 skateboard qSiMwC5e5_I_0 person qUGXSXCXUbw_1 person qVCH1ozivyk_0 person qV9Ll-N_rpc_0 dog qWpIdTdBIQU_0 boat qWpIdTdBIQU_2 boat qWpIdTdBIQU_3 boat qXaS7daelL4_0 person qXfnmaLtO-M_0 airplane qXwXdnrUo5w_0 train qXx4Vj-HwkU_2 bus qYf_XBAUa_o_2 elephant qZFwurCX4DM_0 train qZH-IY7bBzg_0 person qZQcY5PTh10_0 cat qZVUho1xBlo_1 truck qZVUho1xBlo_2 truck qZVUho1xBlo_0 truck qbYjOWN6n70_0 horse qceiUxIt1VE_0 car qcjVVDAbHUI_0 person qcmbCgcy3co_0 person qdNXPwWD9_Q_1 person qdzu1EFDYUE_0 cow qel4U0nmQOI_1 person qfp7BvAtQa8_0 person qgKnno5T6f0_0 motorcycle qguyMwcAj4M_0 person qhb1bts1fSM_0 bear qheo-lRVpfk_4 knife qheo-lRVpfk_0 knife qheo-lRVpfk_1 knife qheo-lRVpfk_2 knife qheo-lRVpfk_3 knife qhmscyJC8dM_0 elephant qh8xnvGfllE_1 bird qh8xnvGfllE_2 bird qipZi2kaQyA_3 person qi3hoxEao_g_1 person qi3hoxEao_g_2 person qptB3_MZagA_1 horse qp5tJGAi9h0_0 airplane qqL9gnwx87g_0 cow qqL9gnwx87g_1 cow qq4_m1S3AOI_0 person qt6FFVa8DGM_0 person quoX4193twY_0 dog qvMRVm660LM_0 person qvZGFb3CbxA_0 bird qvcNxorHqCc_0 person qx647iZCsoE_5 umbrella qyQFBM_7mBw_0 bird qywYqT8IzaQ_0 skateboard qz4S2Tn1Jkk_0 person q2qEXqY43ws_0 cow q2v3AmGBH-M_4 train q2v3AmGBH-M_1 train q2v3AmGBH-M_5 train q2v3AmGBH-M_6 train q3TB2Rnymkg_1 truck q3pYgC4-lrs_0 elephant q35X7FnaiGw_2 bear q5BC4AVKV4c_0 person q6nXZqEmQGQ_0 person q9MXoyUF-BU_0 person q9d2hPrip6k_0 dog q_dqx0-AtKk_0 person rA595TIyUgY_0 bird rBko9NgVOX4_0 person rB2323YW1iA_0 cow rDQ2hcIWoBY_1 train rEXtAqxJj8c_0 person rGVf1BsLfng_0 cow rHvp_Dghuho_0 person rH33U6qgd9M_1 umbrella rIqhuv94Zuc_0 person rKN5E25jozk_1 person rKN5E25jozk_0 person rLbBCTSGdzc_0 person rOoxhMEKcgc_0 bear rPEIT9eAAMY_2 bicycle rPEIT9eAAMY_3 bicycle rPUzTjaLdkk_0 cat rPuPm0ctC3s_11 train rQHtu5_Piv4_1 cat rQKV6GBQuag_0 airplane rRH0VLQDJZQ_0 person rSF1UQ01lZc_0 person rSSbdX8817Q_3 dog rSu82skaMJQ_2 skateboard rSu82skaMJQ_5 skateboard rTIN784f0CM_0 train rTIN784f0CM_1 train rTIN784f0CM_3 train rTV3ev-xyuk_0 train rTYmEM2Lhew_0 bus rT4P9ZJeBG8_0 train rT4P9ZJeBG8_1 train rT4crgFLycE_5 bicycle rUJ7zeax1zY_0 person rV1Baq6-C6Q_0 elephant rWyf2iqpfng_0 horse rXf2T3VO-kI_1 cow rYkLuW5NLic_0 train rZi9k9F8S1w_1 person rZi9k9F8S1w_0 person rbIYpEELMQc_3 horse rbIYpEELMQc_2 horse rbMVAO2mJiY_0 person rbn7_DeuItc_0 elephant rcF4-O7o_Qk_0 person rcF4-O7o_Qk_1 person rc96rbja6VI_5 skateboard rc-e_NDrZDM_0 person rdBSfuG2KBA_2 boat rdBSfuG2KBA_0 boat rdQvGZDUDJA_1 person rdhiEKvYF0w_0 car rdnDsUHCZSY_1 cat rePM3_x9tqw_7 person rePM3_x9tqw_4 person rePM3_x9tqw_5 person rfL51BZGldc_6 truck VCkpd_d1z4U_0 airplane VE-3PfVw5-Y_1 airplane VG2QbeXEwec_0 elephant VIQGgTWrg00_0 person VIr_rdbfvQQ_0 horse VJVWk9wyMjI_0 cow VJmgPBopcB4_0 horse VJ0by87MRoI_4 bicycle VJ0by87MRoI_7 bicycle VLSeTnShp54_0 motorcycle VLSeTnShp54_1 motorcycle VLSol2tA9WY_0 elephant VLcSoFR7qBw_0 car VMDBBz7G-Pg_0 motorcycle VMmtrv5OtMQ_0 boat VMxS4op_OBg_0 person VNCLtdahLmI_0 bear VNCLtdahLmI_3 bear VNHGw5Sj0Qc_0 person VN8_N7Ceofk_0 cow VP0WD1miM00_0 horse VP20LIiI9S4_3 horse VP20LIiI9S4_7 horse VP20LIiI9S4_1 horse VP20LIiI9S4_2 horse VP20LIiI9S4_5 horse VQWxUc9QOjU_4 bear VRtl4gAWELM_0 skateboard VRt9s3OQPzo_0 person VSLdNogDia0_0 bird VSrmwgo-veI_1 boat VTqoizpYNeI_0 car VTqoizpYNeI_1 car VTqoizpYNeI_2 car VTqoizpYNeI_3 car VT11p8szxZY_0 cow VUVAbtGJbuE_0 person VUh5jCDWj08_0 cat VUl6vkX7PRU_0 airplane VVn3XeSqijk_2 motorcycle VWTes_MfrOc_0 knife VXNEqQb5C4Y_0 motorcycle VXT0TH9jfZo_0 elephant VXZscyYzxqw_1 person VYYS45KWEgo_1 dog VYr49ml0uaE_0 person VZj4RHsnOWU_0 person VZqdzb_qI2g_0 person Va81siK4zeI_0 umbrella VdLqI43E7eY_0 cow Vd5pCJuOoDM_0 car VfBrelUfLFg_0 cow Vgpm6fwLIns_0 motorcycle Vhc7DKkRHOo_0 dog ViQIgBdCkh8_0 car VlBlBgxUa-U_0 horse Vlq4fYmrr6g_0 car VmVN4E_qtfM_0 person Vm9-f0pXycc_2 bicycle VngapMBo560_0 cow Vou-Sfzlpu8_2 train VqdeO4pa_rc_0 elephant Vqj-Qv5bVyE_0 person Vr1Wqz5_UA0_1 cow Vr1Wqz5_UA0_2 cow Vr1Wqz5_UA0_0 cow VsAo8VBzDTM_0 person VsOw_U6hYRY_0 motorcycle VsOw_U6hYRY_1 motorcycle Vsyd7-_CUA0_0 person Vs2JphYinjk_0 giraffe VtdrYDJFw-Y_0 person VtkV11WZWEc_0 cow VuDA6sPAa9U_0 person VuLf3ZTqniM_0 dog VuW2wDK-uZI_0 motorcycle Vv-z9_l8_ms_0 bird VwdZHZPjlT0_0 cat Vwkf0U9PZvI_0 airplane VwppYMiCI1g_0 umbrella VwvER7iR2YI_0 person VxG5gvk1mfo_2 elephant VxH52JoUd0I_0 person Vxyq13mC_uk_0 person Vxyq13mC_uk_1 person Vyf_VJEQ1jE_0 airplane V0CjVa5_1P0_0 horse V0sliERbCxI_0 person V0sliERbCxI_1 motorcycle V0w_hBBqe-g_0 person V1ufPW4ictQ_0 skateboard V25H8smvzbM_0 dog V56RVnEPG54_0 motorcycle V6rg5et7Q14_0 cat V6rg5et7Q14_1 cat V6_XA2w3sTs_0 boat V7CVQjk9-Xc_0 skateboard V8Pv-I4ovPs_0 person V9m1dMbXxug_0 truck V9qvycn1a3E_0 train V-ZKLxW5cuM_5 horse V-ZKLxW5cuM_2 horse V-ZKLxW5cuM_4 horse V-iFCgvAuCg_0 person WBcYTIQ65Ow_0 person WB6uQ708AxE_0 bird WCNpGdfG8nk_0 person WCZ4ZQ5ohf4_0 motorcycle WGw94BtHxYE_0 bird WGw94BtHxYE_1 bird WG1DuTb70bQ_0 cat WItuBm7azO0_0 cat WKpjUNNgKG0_1 person WLZkZ-4Y9fY_0 cow WN5u1Y1yGkA_0 airplane WP5JXCVRe9g_0 person WP5JXCVRe9g_1 person WQ603pEp_1k_5 airplane WTEO_Ywn9AI_0 umbrella WTw46mBWjOw_1 airplane WUvTKLEimNw_2 truck WWcVr4lbq3E_0 person WXETP4eMyD0_0 cow WZWh1M3qGAc_0 truck WbXmf511q4E_0 horse Wb9i7jssQsY_0 motorcycle WcUFxXISmb0_1 motorcycle WcUFxXISmb0_2 motorcycle WcgQXl6I-Ks_0 car Wc6RwJ_8yts_0 person Wc_-Q9ba0zs_0 airplane Wdh2SMcRQ2M_0 horse Wdh2SMcRQ2M_1 horse WfZR-VRmSB0_3 boat Wfl0LOShC_I_0 bus Wh9avYClECA_0 person WixZlWbnBdM_0 person WkvpcaxQTSg_0 dog WlFD1z5akJc_0 person WlK6sU21od0_1 dog WlP5_pcua1U_1 truck Wl1vbjfAxeA_0 dog Wl1vbjfAxeA_1 dog WmNKtcf5iLM_0 person WpxEmYBfqSU_0 elephant Wqb84sv1P68_0 cat WrClMyPxaDk_0 person WrSS3nc07hE_0 cat WsFZj4Bgtwc_0 bicycle WvGCvwHutAc_1 airplane WvUiJ8ZRRfc_0 bird WvUziN47FfY_4 horse Wwx2Vce-1oM_0 car Wx0zNFqSUZo_0 horse Wx1qid26zsw_0 dog WzCI6AqY7cg_0 bus WzrI82-Ak4I_1 motorcycle W1juH0nZ8v0_0 airplane W1yEDHYLG1Y_0 truck W14Nt0_EGQg_0 person W17CFtB5Oy4_0 truck W1-9iBLd1lg_0 person W23FACVBLgI_0 person W3Bv11o03TQ_0 cat W4cKlmHvXZ4_0 knife W4gR7_z77A0_0 person W4iSCn6ILJs_0 motorcycle W7xlWK7cuEI_1 skateboard W8U3FkkaVbc_0 person W8d2hNOMHpQ_1 horse W8yL4Qnuo4k_0 elephant W86rN6nrllQ_0 person W9lLrNUFQ9M_0 person W975mcNRX7c_0 boat W-sCMBY47ck_0 horse W_QxijO2VBw_0 zebra rftE7M9tNqI_0 person rftE7M9tNqI_1 person rhWLgPl3lt8_0 person rhjcRHB4crY_1 bicycle riNqBOlFCuw_3 dog riVZCbT4LDE_2 person rih7ECmHfRs_1 cat rkIzABhjHkA_0 person rk1ByqQSwtI_1 elephant rlWlgyP-3-s_1 umbrella rlWlgyP-3-s_2 umbrella rlWlgyP-3-s_4 umbrella rlqtE0bF9nk_0 bicycle rmVxFro55IQ_0 skateboard rmxx9X1ytcA_0 airplane rm4XeENehOU_0 skateboard rn9-fIMYEkA_2 motorcycle rn9-fIMYEkA_0 motorcycle roUwF9YU21U_0 person rsne3z-CaDw_1 train rtjlk_iOmdE_2 train rtjlk_iOmdE_0 train rt4Qm6HPVTY_1 boat rvBm-SnbjVI_0 cow rwQl_jKPcyM_0 person rww5DvtCsG4_0 horse rwzjQSTLmhk_0 person ryUMZWWwJUk_0 person r0P-2rp1Hpk_1 bus r0vIwhp5RLo_0 knife r03Za0dP0d8_0 person r09YKBrwa8M_0 horse r3PUq_cy6Mc_0 truck r3cOrAN6BI8_0 train r3cOrAN6BI8_1 train r7WW1Fl-s6s_5 bus r7WW1Fl-s6s_4 bus r7WW1Fl-s6s_6 bus r7WW1Fl-s6s_7 bus r7WW1Fl-s6s_1 bus r7xw4qHLKIY_2 horse r7xw4qHLKIY_1 horse r7yOsosLuHI_0 cow r8NwODfEuhI_0 dog r8NwODfEuhI_2 dog r9LAMeOEcsI_0 person r9jyOtbfWs8_0 person r9osF8drSbo_0 person r-Dva6GT-a0_1 dog r-tFy30HVCw_0 person r-0UD9KQhvY_0 car r_sRdP_5WaM_0 skateboard sByCUshWhWs_0 dog sB613NHl89g_0 elephant sB8zpg-GrRo_0 person sD_9McrL3UQ_0 skateboard sD_9McrL3UQ_1 skateboard sEzZ3JnSzaM_0 bird sFxTS449nUg_0 person sG0q9rphsoY_0 cat sIIFHk89TT0_0 person sI17jkxX6tE_3 skateboard sJyknuUaIOg_0 skateboard sKCW1p03okE_0 person sKD6TBNqy6s_0 person sKD6TBNqy6s_1 person sKJ0JtWZeWw_1 cow sKJ0JtWZeWw_3 cow sLZh8XaxoYw_0 person sLfyo1VrX3g_3 knife sLfyo1VrX3g_2 knife sLnYAS4LAY8_1 person sLnYAS4LAY8_2 person sMVMaH9aWHw_0 horse sNV29dtSqYs_1 umbrella sOfNz788QiQ_2 horse sP4jeoUjHZM_1 motorcycle sRb7OHsI6s4_0 bird sV9L8gpGDmA_0 motorcycle sWbk2Sw9Rew_0 person sWfMpwviOCA_0 car sXSjs2EV61Y_2 knife sXw73oA1Tq0_0 horse sX5GCwZG8d8_1 bus sbkHA-DWPSI_0 person scyRfbyCzJU_0 cat sc15m4_lcvw_0 person sdAAObJErSA_0 motorcycle sezamC2zGqg_0 bird sf76JIFYKB0_1 cat sgHdQYSWPXg_0 car sgU4wTZ6k5s_1 person shXeONsfVmU_0 person shiIdcOonRs_0 person siFucH6jjIs_0 boat siFucH6jjIs_1 boat sj7NOYq8KBA_0 person skEWWsL6k9g_0 horse skl1lsZUG4k_0 person sm346w9J4zA_0 knife snZjH03fjVk_1 person soNDR07vxhQ_1 person soNDR07vxhQ_0 person sofKbpbuX84_0 person sofKbpbuX84_1 person spVw0PNXErs_0 dog sqLiQtbkEO4_0 cow sqv-uPhtxwk_0 airplane sq-wqsIw5hw_0 train ssspgc75B08_0 giraffe steKGH-8MZw_0 horse steKGH-8MZw_2 horse sts2vAv4BQo_0 person suERIXWx_z0_1 person svCBYM2zl80_0 horse swuFjNkTmQY_0 dog syZTh043BkQ_0 horse s0YqBVjRDyU_0 person s1Pd7evRn0U_2 dog s2PyqAoOqrY_0 cow s2x8llFphNY_0 elephant s3WiR_wFUBE_0 cat s3ijyNmvxpE_0 person s4rr5OrSI4k_0 skateboard s5I219neN7c_0 person s5jmkD6lkbU_0 dog s5n7L55KpWE_1 skateboard s7or9ZhEyXE_0 person s74eu-v6aqA_0 person s8W4NK7dWe0_0 person s83wzR7ySyM_0 skateboard s9G4llLAJiU_0 skateboard s9OmvmQH9hA_0 elephant s94ng_sG6Dg_0 boat s-Jnbfjkmak_0 skateboard s-Jnbfjkmak_1 skateboard s-guJTrtfSU_0 skateboard s-yjgHx_YWg_0 train tAGvlfgdOsI_0 skateboard tAGvlfgdOsI_2 skateboard tBlPdyu-syw_0 bird tBlPdyu-syw_2 bird tBryhvKADFQ_0 dog tGyP_SbWsVA_0 person tHA_VdGe90Y_0 airplane tHA_VdGe90Y_1 airplane tHcqw8Cejs8_0 person tHfOMcj62SY_0 zebra tI2i9_rBdwo_1 bird tI2i9_rBdwo_3 bird tKpbcnqu6bY_0 bird tK0pl2_wbWU_2 elephant tLJpuELQgxY_0 person tLa4F5ekKW0_0 cat tLzUBeOwhyM_1 bicycle tMojfxB-9zA_0 person tMp5Y1zucfI_1 train tMp5Y1zucfI_0 train tM3FYC5IVPo_0 motorcycle tNiu2o7-KPY_1 car tOK5TnF8eHQ_2 bird tOL0kPV03Uw_0 train tOlXErF8Z4o_0 horse tPCRXfE_aGo_0 bus tQj85vHtmeE_0 bus tQnUccPTkck_1 truck tQ_Vy-9pvoQ_0 skateboard tSlXTInFXss_0 person tTSVU8IU10c_0 motorcycle tUdWqmNDeY8_0 person tUm_oehvEpM_1 person tVOS6wht6oQ_1 horse tV17SBx-oqE_0 person tXBDRj1c-Uc_0 person tXf9xVs5ZGk_0 train tYKrjpIMYb0_1 skateboard tYciFvRQuec_1 truck tYciFvRQuec_0 truck tY-4fAv_YRU_0 horse tY-4fAv_YRU_1 horse XA65Kh83GmE_0 cow XA65Kh83GmE_1 cow XBNPaOqVqds_0 bird XBUvxtvKWM0_0 cat XByg_hQRQDM_2 bird XDNVcbDkafM_2 airplane XDNVcbDkafM_3 airplane XDNVcbDkafM_4 airplane XD0ydIAwgGM_0 cow XD_iMe4m2vQ_1 person XGX6SRd3ZkE_0 bird XHu9PxuBpXg_0 airplane XIzQLXQTsRo_0 cow XI3_0lXrnfY_0 cow XJq9qp3jhq0_0 motorcycle XJq9qp3jhq0_2 motorcycle XJq9qp3jhq0_1 motorcycle XLgI0VgtzEw_0 cow XL50qkg4qdA_2 elephant XL50qkg4qdA_0 elephant XMIsf8xuMh4_0 train XPi83QmsR90_0 cat XQliC40rP9M_0 person XRKZRwdqhNo_0 bird XSMGAlakHWY_0 person XS5wfvz6XZI_0 bird XTWeBFPqdh0_0 person XT0t6ims_FI_2 skateboard XVabRVMuX4Q_0 motorcycle XVabRVMuX4Q_1 motorcycle XVabRVMuX4Q_2 motorcycle XVabRVMuX4Q_3 motorcycle XVabRVMuX4Q_4 motorcycle XYA6HKrVVQQ_0 cow XZBFfRl6DkA_0 person XaVZr4HPh2M_0 cat XalkAzccT5I_0 person Xa6tjMVGH2I_0 motorcycle Xa6tjMVGH2I_2 motorcycle Xd9tLIFo_7E_0 cow XeIssB-JkcU_1 bicycle XeIssB-JkcU_2 bicycle Xevq2dskQWo_0 truck XfUIrHPVj-s_0 cat Xf09qM8SYBc_0 truck XgDJ16iRhxs_0 elephant XgDJ16iRhxs_1 elephant XgDJ16iRhxs_2 elephant XgFaXb7Vb58_0 elephant XgxYznR79R0_0 dog XhOx4rgdI-8_0 bird XhTWW9CwFzM_0 motorcycle XiSjHcHG5IU_1 bird XjXFktrwSOk_0 bear XkpxlUwx4oc_5 truck XkpxlUwx4oc_1 truck XkpxlUwx4oc_2 truck Xkr3OHSz_CA_1 person Xkr3OHSz_CA_0 person XlIxLJTiphI_1 airplane XlSvIczm3JA_0 person XlcJsAWbsyA_0 dog Xmwv-NZZat8_0 person Xm_CKSNQE3E_0 bird XnfAvhHnH6M_0 train XnfAvhHnH6M_1 train XoWHAeOAXg0_0 motorcycle XoXMpm6Yxfs_0 person Xoa_dCJDiTE_0 motorcycle XocaP_gyqJU_0 person XopbyM2SJbc_0 bicycle XopbyM2SJbc_1 bicycle Xr_3UPISgT0_0 skateboard XsK5KxttYBA_0 person XtTLGRBrm3I_0 skateboard XtVTdegdzvI_0 motorcycle Xu6xzBcJySk_0 person Xu6xzBcJySk_1 person XvvA9Zc1TMA_0 person XvwOXlVdehA_1 person Xwqm_wzZDQI_0 cow XxkkXeLqqu8_2 airplane XxkkXeLqqu8_0 airplane XxmNQjB1D_Y_0 cat XyldpxZmUN8_0 dog X0CZDjRqcKg_0 horse X02e7Fj9BLM_0 umbrella X0-n3maCrZU_1 dog X2uXOY9J_UU_0 person X3HCAEcRaW8_0 bicycle X3qbUW_qT7k_2 airplane X4SbOXRpo0A_1 dog X7xm2nZL7jc_0 bear X79vSvy6SOQ_0 skateboard X9L-jwA6Ozg_1 train X9L-jwA6Ozg_0 train X9a5wEDFXc8_0 boat X_TnIuY27eM_8 bird YA4-rm-dcsw_0 person YA-N841dD-0_0 person YB1trUAUzhg_0 person YB2wzBLh7MU_0 zebra YCU3daBCWsU_0 umbrella YCXHNoYaQRc_3 skateboard YCXHNoYaQRc_4 skateboard YDd_skWNTMs_0 skateboard YDyc1Yv9j_s_0 person YEPfw3k3vEw_0 person YEvBzZ5KBYY_1 horse YEz7v7toUwM_0 truck YFQlAc3qTBQ_0 motorcycle YIHcQxH9e1o_0 train YIzqB2G1UvY_0 person YI4lmC3imb4_0 horse YJiqdRcs_gU_1 person YKlWROFtcxc_1 skateboard YKlWROFtcxc_0 skateboard YKoT-GgRSw0_0 elephant YKoT-GgRSw0_1 elephant YKrdwZe1vq8_0 dog YL97h6yps6w_1 knife YMbqULxZJpg_1 horse YMbqULxZJpg_2 horse YMkOJNatD88_0 person YNEDPsAWm5I_0 person YQXwRsP0zvE_1 person YQgUV8TrYcw_0 person YRWC7Tdc5oI_0 person YTD8j8z44qQ_0 person YTd8Rxtpt1E_0 train YTd8Rxtpt1E_3 train YTd8Rxtpt1E_4 train YTd8Rxtpt1E_6 train YTd8Rxtpt1E_7 train YTd8Rxtpt1E_8 train YTd8Rxtpt1E_9 train YTzuVYGpDhA_0 motorcycle YUhgrCNuMGQ_3 bear YVDCTyDcjjA_1 cow YWRbi_v93Mo_0 person YWhwljQ3efA_3 train YWhwljQ3efA_4 train YXeaiwTZ3ZE_0 cow YXz7CDJ11jY_0 bird YYUo7EkkJeg_0 bicycle YYUo7EkkJeg_1 bicycle YZmhYkqgBi0_0 skateboard YZmhYkqgBi0_1 skateboard YZmhYkqgBi0_2 skateboard YZ3kcrHk4N8_1 horse YZ3kcrHk4N8_0 bicycle Yax1xdgRbt4_0 person Ya2zfpe-_ro_0 bus YcjMrWCSRSA_0 person YdooYDhKq00_0 person YeTYMiaLkWY_1 cow YfvvO_T8j8k_0 skateboard Yf9jBSXQTLo_0 car Yf9jBSXQTLo_1 car Yf9jBSXQTLo_2 car Yf9jBSXQTLo_6 car Yf-okdUBk9g_1 bird YgM058nmMnQ_0 person YjZoPTjqDGw_0 skateboard Yj6XWsgomO0_0 cat YluDona_474_2 bus YmlQVVQx4SA_0 person Ym3lE2u4vxE_3 skateboard Ym3lE2u4vxE_1 skateboard Ym37vW7b0U0_0 cow YnZU-Qa6yeI_2 bus Ynyd8SBB5Wg_0 knife YoFfsRgrNeY_0 person Yof6XFKNuNY_2 horse YorREGtes1I_0 person Yo9XVrgl_GM_0 cat YpDsXa1kNZU_0 truck Ypb0U6Ga5pk_3 train Ypb0U6Ga5pk_1 train Ypb0U6Ga5pk_2 train Yp1kl6xU-Og_0 person YqvGb_tDI38_1 bird YrhvCSxifRc_0 car YtrNZ4mlMw4_0 elephant YvAlZo3quqE_0 person YvwW9T4Qpek_0 motorcycle Yv3YH0nImQI_3 truck YxRG0JQrpwI_0 person Yxia21K4O6I_3 truck Yy0lIDbLxQ8_0 elephant Yy0lIDbLxQ8_3 elephant Yy0lIDbLxQ8_1 elephant Yy0lIDbLxQ8_2 elephant YzTl0Nf0Kpw_0 cow YzT_UsE8Mhs_0 airplane Y0Hz5Hw1AiM_0 person Y1lKSppJhdI_0 cow Y16c_yGYw1M_0 elephant Y16c_yGYw1M_1 elephant Y2jXJzRVhMI_0 person Y2x6ow80IkQ_0 person Y3TtBVfW6gs_0 person Y3ZDfyDvFi4_0 elephant Y3c_6Zv0dxg_1 knife Y3mx4jYyagQ_0 train Y5Atu2VWemQ_0 train Y5BEvakwvuM_0 dog Y64ky0LNHko_2 elephant Y-YU80ccuXg_0 elephant ZBJsNXYIQ4o_0 person taPyucc_cOU_0 person taPyucc_cOU_1 person tafdN9GXP0g_2 skateboard tbLnjlX1xF8_2 bird tbuu2U3o02Y_0 person tcOx8KjmHPo_0 person tc98WTYT-VI_0 elephant tdIWlg4_01E_1 bird tgRYkhC-gJU_0 person thZqLw7IxVw_0 knife tj2-fSeuMRI_0 bird tmch--OGZhY_0 giraffe tmsInTqqzHI_0 zebra tof4QiBHPQQ_0 person towJyxwm3wE_0 bird to8OyPMfkaI_0 person tpQv6Sn5z3o_0 motorcycle tpcuQY4eNaI_1 bus tpeBIe69wr0_1 bus tpeBIe69wr0_3 bus tpwUnqxQYjo_0 train tqy3XprB11s_1 horse tqy3XprB11s_2 horse tq9WP-2U1QM_0 person tsMTiOeM52E_0 cat tsg-S4Hk2go_0 person ttzJbLLAR34_0 cat tvSJKUR21UM_0 train twewRZpG7Fs_0 cow twxvNeK9FZo_1 bear txDhTthoXSk_0 motorcycle tx0mtmimu0k_1 person tx2PSvwf7FU_1 cow tyem40ZMKGE_0 person tygG1C5DURU_0 person ty3iURJku9k_0 person tzH_tvBDeJA_0 skateboard tzPForR9Ejs_1 train tzvKjCoHBMI_0 bird t0TW8zZxCWQ_0 person t1N1ijCr5NE_0 bicycle t1N1ijCr5NE_1 bicycle t4FZmjCINtw_0 bus t4naVz1a0sg_0 train t4zuUZQozs8_0 horse t5B7vIbyRNQ_0 person t5kzdnId2sI_0 horse t5s4Fs07WLM_0 dog t50QLEhcZCE_0 person t6C6ukC_zEA_1 bird t6C6ukC_zEA_2 bird t6C6ukC_zEA_0 bird t7YFOxuWxtg_0 umbrella t7YFOxuWxtg_3 umbrella t7s424DNznk_0 cat t8MqK7LWqs8_0 airplane t8mVwobdP40_0 boat t_qvtoXbLRI_0 person uAWXGcWWgSU_0 person uAZF38u6SOo_0 umbrella uAzws057QjE_0 skateboard uA1sb8QyXuU_0 skateboard uCZi19CC7rk_1 train uCZi19CC7rk_2 train uCZi19CC7rk_3 train uE5rIJoAafE_0 bird uE5rIJoAafE_1 bird uH0jKXHq7Lw_0 horse uH35b2DEXFw_1 skateboard uH9vcwYxL2s_1 person uIu2jQswp94_0 person uJcu-YlAtbc_0 bird uKJqU3gtIWM_0 umbrella uLPuf056wH4_0 horse uMAkaCYTDuc_0 truck uMYGWhLdrlc_0 boat uMiNpG3NcEw_0 person uMpufBdwRn8_0 giraffe uNpHGE63PdQ_2 truck uNpHGE63PdQ_8 truck uOmCLzEMPGc_0 train uRFXE4UfdTE_0 cow uR8MqB3VgSI_0 truck uS1QmKXc0uY_0 person uTsfiR5FPdM_0 person uT9uk3mtt98_0 bird uUU-VpxxSiM_0 cow uVrW8Mm2xGY_0 person uWyTGtedEqU_1 person uWyTGtedEqU_0 person uarSTtaV_Ps_4 boat ua6Xyj9aWT4_0 bear ua6Xyj9aWT4_1 bear ua6Xyj9aWT4_2 bear ubHgpaAseuo_1 elephant ubijaVodfKg_0 person ubijaVodfKg_1 person ubsr27_dQOk_0 elephant ubsr27_dQOk_2 elephant ubsr27_dQOk_3 elephant ubsr27_dQOk_1 elephant ucUearjcPHk_1 airplane ucfXE6fw3go_0 cow udlyGSCujUU_0 truck ufB4EORClps_1 knife ufMXT_CmtK4_0 airplane uhm0JnSA-kQ_0 person uiLBqX72k4k_7 boat uiM-lDuYaeY_0 person ujoJwRvjEdI_0 person ujz4u55Tp1U_0 cat ul47aFS8dQE_1 motorcycle ul47aFS8dQE_2 motorcycle ul47aFS8dQE_3 motorcycle umkNI2_0Lqc_0 person umxZfostBlE_0 train um22CD4bkqo_0 cow un6QDPagbfo_1 cow un6QDPagbfo_0 cow up6VT6l38-A_1 skateboard uqn85v1WM7A_0 motorcycle urAYVS5Lz7k_0 person usAsP-m-qs4_0 dog uuhWeHmlvt4_0 person uu3KluYuhc0_0 person uu3pH95cmtk_0 person uwXhzSsAIJw_0 person uw9TxuXeiP0_0 train uxgUbys1eD8_1 bus uzMFzDPfsws_0 knife uzsdMqrgiL8_0 person u14Sp3wCQew_0 car u2BHvsjQGjw_0 person u25Jazd2yJM_0 person u4KPFsw5W5c_0 motorcycle u4oma0FVycA_8 knife u69KRu61wXM_0 person u7xTeWelI-U_3 knife u8mmwwrdNb0_4 airplane u8mmwwrdNb0_5 airplane u8mmwwrdNb0_9 airplane u80Y4lA5xT0_0 dog u85tUrDgmOQ_0 bus u9HkSfjYpnA_0 motorcycle u9rfXD33UIM_0 person u9_P9HFh_NY_0 dog u-_A36Ha04o_0 cow u_D1eyd8AOM_0 car vAUSfFO5UI4_1 dog vFMzMNDlnBs_0 person vGIYDcnNTvA_0 knife vHQkxg7kPUk_0 dog vH0ZiiuSQzU_2 person vH7sKynwjD4_0 person vJypzwSdyN4_0 train vMt5AD41SKM_0 person vMt5AD41SKM_1 person vOY2IRNsjYg_1 person vOY2IRNsjYg_0 person vQ6eOB8rxUE_0 person vRjErSbQNNY_0 person vTa2zdbIyUw_0 person vT2JpCnT6rg_0 boat vWqexY1OdWg_1 skateboard vXbTARLug3M_0 person vYN_Gy6fUbI_0 bus vYhPihwivZs_0 person vaaqJVWoSf0_0 person vadASNfLl9I_0 dog vas3iNRcsK8_0 elephant vas3iNRcsK8_1 elephant vbLhfzHqEKc_2 horse vbSnjtc3vIs_0 cat vcALsxetYU4_0 airplane vc-_aAQAXs0_0 knife vdXD-HTzyFM_0 cat vfeKOPKE6l8_0 person vf7NtV1T5Jc_0 train vf7NtV1T5Jc_1 train vjb_l1_hEXk_0 person vjojFy4rPeo_3 car vjojFy4rPeo_1 car vj_BAwFKqtQ_0 umbrella vklwqjQis8Y_1 cat vlPgSny76H8_0 person vlflI5iuszQ_0 person vnD3gELVAq8_0 person vnyBVn70QLY_0 cat vnzsKpfAS_M_1 horse vpBxBDjiJxw_1 dog vvamB_-Z0so_0 horse vv3gfxFz2zw_0 person vwe8ZaV-4z8_0 bicycle vwtokH03eW0_0 skateboard vwxzh1lJ7iw_5 motorcycle vxmdsyEpU6A_2 bus vx0oKJcOQb0_0 train vx0oKJcOQb0_3 train vx0oKJcOQb0_4 train vyLqolkoVIM_0 person vzBbUEwED60_0 person vzBbUEwED60_1 person vzU0GH4cZM4_0 cow v0tUEeE4RGc_1 truck v0xTNbrYZY0_0 giraffe v01IvIxWXTo_0 person v1iIhTWRjg8_0 boat v1-PGfS1YCY_0 boat v3LIQHdveBA_0 person v4H5VwQyKEU_0 train v4H5VwQyKEU_1 train v4QYOX-FHhY_1 motorcycle v40pc8KBg0I_2 horse v5YzVj25_hs_0 truck v5lUHsxx0mc_1 skateboard v50Qa_KMCzQ_0 truck v51CdpETaug_0 bird v6UDfM50GIM_1 truck v7XVyg16ens_0 cat v8Kp0jhKsKk_0 person v8ceKkKdqrE_1 knife v8hOOgLXRjg_0 person v8kyeMoFLqk_0 horse v8rj3jIndSE_0 dog v8tktR3aE38_0 airplane v_yEG5_Qm8Y_0 person wCu6xsT18qo_0 person wDHRro9mXuM_0 horse wDcnUJFHguE_0 horse wE8LYkzcq0o_1 horse wE8LYkzcq0o_0 horse wGPW8I8nGmc_0 train wGWIrs5ja0Y_0 bicycle wGyJeWBe8VA_0 umbrella wIapUcRvgTM_0 bear wIapUcRvgTM_5 bear wI0a0fzgy3w_0 horse wJdfgWlSY5M_0 person wJdfgWlSY5M_1 person wK7yIg1qfZ4_0 person wLA244rmq6g_0 cat wLHLSvMwmjM_0 skateboard wL0z6-jkCcc_0 dog wL0z6-jkCcc_3 dog wL0z6-jkCcc_1 dog wL9iOnWhckI_1 skateboard wL9iOnWhckI_3 skateboard wMShicf3N_E_0 person wMyAEfVE_u4_1 elephant wNKWZ43SioQ_0 airplane wNKWZ43SioQ_2 airplane wNWW59wDinQ_1 train wNcjU9-ck10_0 person wODzPBxcT0A_0 motorcycle wODzPBxcT0A_2 motorcycle wOLrGAo0vFo_0 horse wOSL7OPRBXM_1 dog wPRCf3v0EfI_0 motorcycle wQtHgysmmFg_1 boat wQvPlByUvB0_1 knife wSSTL6uuM9Y_0 train wSmVgAahSUw_0 skateboard wSmVgAahSUw_1 skateboard wSmVgAahSUw_2 skateboard wTMj2Gp8wz4_1 bird wTMj2Gp8wz4_0 bird wTtXB0Z2eMk_0 car wV1VMLQfTYo_0 skateboard wWpNKbsF6q8_0 bear wa1KdARQXXg_0 truck wa3jVRzsWGo_2 truck wbmT4LB3lVQ_2 knife wb9x3QDpcYA_0 person wb9x3QDpcYA_1 person wcOuc6Y3Gek_0 train wcjnFIBHoc8_0 bear wdb2-oX7HqU_0 boat wdhqMpQcsjc_0 dog wdhqMpQcsjc_2 dog weH4PvRo2GU_1 bear wgZbNzu2Mdw_0 person wguspvl5Ioo_0 person wg1ZFP15W8U_0 horse wg6XS3q4Vg8_0 train wifl75i2zGw_0 person wiiV9QdYsYM_3 bus wjfHYr4lXU0_0 cow wmfJAE6gu7w_0 person wmjfHsCs1CE_0 person wmn4YG9rirU_1 bird wmn4YG9rirU_0 bird wmx0UeWsPyU_0 person woEUh2mzEkE_0 horse wqD1WkfidVw_1 bear wr5b8Op3LUM_2 bear wuAwZ_wX7jk_0 knife wuFVuJjgpLk_0 airplane wvadJ-1Ls80_0 person wymDvXB08SM_0 person wzBmon2jJxI_2 bird wzlA0qMLDV8_1 cow wzlA0qMLDV8_2 cow wzlA0qMLDV8_3 cow wzuQhwWLllk_2 bird w0JzCkELpj8_0 cat w0bfVrI7CPQ_0 bear w1j-YVcZpfc_0 person w2WW3bYmA7s_0 truck w247rqoLoGg_0 bear w3F_8A8kY7o_3 elephant w3F_8A8kY7o_5 elephant w3F_8A8kY7o_6 elephant w3adXMIxupk_0 cat w35-xR0Vn_0_0 zebra w5Pb_ORVLKI_0 airplane w6A2W9VQeZk_0 car w6JEUZI5Vh8_2 skateboard w6JEUZI5Vh8_0 skateboard w6JEUZI5Vh8_3 skateboard w7IKxGLuaQA_0 horse w7g5pDCGteg_0 person w8zrFmMpPmc_0 motorcycle w8-ovxjadNo_0 train w93q7lv9In8_0 person w-eAEp0TUi0_0 horse w-eAEp0TUi0_1 horse w_euwPW5ukA_0 bicycle xAUupk4sGI0_0 person xAedjC0r5KY_0 person xAfxJQL2_aY_0 zebra xDgoaE-g50s_2 bear xFnFWM8KXcE_0 person xFzsK94M68U_1 person xGbFeCuGypE_0 person xHOcerZTZxM_0 person xIUJ8zlr0TU_0 bear xIizuktSVrM_0 truck xJ_xdRV9lzo_0 cat xKd8dHsveKg_0 person xMiQuC8eKGU_0 person xMp4dCjzI08_0 cat xMuQzm__4bo_1 person xMuQzm__4bo_0 person xNBT-PZEMH0_0 bicycle xOLvPvBg-8U_1 horse xOtxf0cmHyA_2 horse xPDDIKF9T3A_0 person xRJNEyms-F8_0 train xSIjCyHBypw_0 umbrella xSIjCyHBypw_1 umbrella xSL4NZUmhW4_0 person xUB3mR57tLE_0 bicycle xUtGzUu5Ryc_0 umbrella xU_2MZdWfxM_0 cow xVuNCF2vbXs_0 person xWWnn5OWp4I_0 airplane xYVriT4YV0M_0 person xZLHtt1yjYk_0 truck xZZ_W6fRi8E_0 knife xbL4hiu8qh0_0 horse xbQZucd8eu0_0 bicycle xbQZucd8eu0_3 bicycle xbQZucd8eu0_2 bicycle xcY11ewiUMM_1 horse xd_raY9PCHM_0 bus xd_raY9PCHM_1 bus xeAkz6Kg108_0 bird xeBhbPbmS8w_0 person xfzxTuJ85A4_0 airplane xfzxTuJ85A4_1 airplane xitZyv8gMgQ_1 horse xjdEiJ_z4T8_0 motorcycle xj3FKNXP-cw_0 bird xkKoATbAX0w_0 dog xkeTuOlBIMM_0 cat xlT93OXr3uc_0 person xlT93OXr3uc_1 person xlfOatU3OyY_0 boat xljqBqpwIHo_0 person xl110TqE0kQ_0 cat xmWAmSXnWCY_0 car xo54E-kQcoA_1 boat xpGDfRYqtSE_0 cow xpcNJG8acpU_0 dog xp_ShmZCoDw_2 airplane xqNQIYHzAGk_0 person xrGm-1D2Zqk_1 train xsrHSco3Zcs_0 person xsrNtKa0oZg_1 person xs1kBHxDpxU_0 train xs1kBHxDpxU_1 train xs1kBHxDpxU_2 train xtHE1-GIP_w_0 person xtXt8Vm3Qps_2 dog xuAm_BWnXRc_1 motorcycle xuAm_BWnXRc_0 motorcycle xucBFquWbi8_1 bear xv4fy9zyuNE_0 person xv6NQvvvIhk_1 bicycle xxEtEzi7YiY_0 bus xxcJJA7hCQY_0 person xxdOVyEU-c4_0 person xyg1xFLohGI_0 cow xyyz5QJ7wi8_0 dog xzC5_r9raeY_0 person xzFcPnglQf4_0 person x0RxwpR4wIc_0 bird x0RxwpR4wIc_1 bird x0nlchdJVJw_0 bear x0nlchdJVJw_1 bear x0q0JMiiw1A_0 cat x0xsHmQGaB8_0 dog x1RBYEheBRQ_0 person x2MJ_zDJY3k_0 person x2Tfa1fMOyE_0 person x29EcPsdK1Q_0 dog x29EcPsdK1Q_1 dog x4h9pGwdSMU_0 horse x4r2tx9_9wQ_1 person x4r2tx9_9wQ_0 person x4uX_33GiJk_1 truck x48Ogx7C31g_0 person x4-I_EckNls_0 bus x4-I_EckNls_1 bus x4-I_EckNls_2 bus x4-I_EckNls_3 bus x5nImw1YH94_0 person x6sZc4EoI8o_0 person x6298plJ-7M_0 cow x7jo9uCmWA0_0 bear x8VC2CXIDBI_0 person x96LXIEQ3SM_1 cow x96LXIEQ3SM_0 cow x-2AUxPCkVM_0 person x-26Z1zy1-E_1 person x-26Z1zy1-E_2 person x-26Z1zy1-E_3 person x-26Z1zy1-E_0 person x_CImXdwsg4_0 truck x_XV2Y3pwDA_1 bicycle x_XV2Y3pwDA_0 bicycle yCYtcDx1zzE_0 umbrella yCaJQKIGAjg_0 motorcycle yCz3VdCGZMA_0 person yDw-9GLrYj0_0 person yF0X9hui-Go_0 person yGD_BY9mQlM_0 boat yIkwS9Vkq-k_0 elephant yJOGbyQ8qs8_0 person yJZU3h3_06M_1 cat yLFd8GdaqBg_0 person yLL5Dv2F1rs_1 elephant yLL5Dv2F1rs_5 elephant yLL5Dv2F1rs_0 elephant yLNuhB7I5iI_1 knife yLNuhB7I5iI_2 knife yLkMk9nMaos_0 train yLkMk9nMaos_7 train yLkMk9nMaos_1 train yLkMk9nMaos_2 train yM9_GnJpXsM_0 airplane yNnOUMUIIno_0 bicycle yOrqtKYEfNs_0 train yOrqtKYEfNs_1 train yOrqtKYEfNs_2 train yPscRV8ebRg_0 person yQLGypU_WiY_0 knife yTZekxz2awI_4 airplane yTZekxz2awI_1 airplane yT-tBu_wqEo_0 cat yVO-nlNYxrU_0 person yV1EsNcE3kY_0 airplane yYIY-K1Hk-0_0 cat yYUnGStTnHE_0 train yYUnGStTnHE_1 train yYr5tuCEb3w_0 cat yY6S-xTKWGc_1 person yaNT5d8H3ho_0 person yahVo8Nqxks_0 person ybCbkJl7tog_0 person ybt9EtMfrdI_0 person ydxMYuiOJAI_0 person ygK39Pz1tKw_1 motorcycle yhp30idsPKU_0 boat yiCMaealOnQ_0 cow yiujj_fUOg8_0 person yjOTRS1-3Is_0 cow yjUDTPRe-tg_1 person yjnR7dP-hxE_1 bird ykQnvD35jxs_0 bus ymoggco-rpw_1 elephant ynHMWKjfsNk_0 car ynYz6f5FCOk_0 motorcycle yoTs9WxR0mI_0 person yo3wwD8VMLA_0 person yo9gwC7gpEk_0 boat ypC9L5um-ic_0 person yp9kACFk9KU_0 car yqWKo_T-YsM_0 person ysb6LLJ0t-c_0 person yssYMx-tQs4_0 horse yu2v206waMs_0 person yvDdzmW5jGs_0 cat yxURDHgvWrs_0 train yxURDHgvWrs_7 train yyMtxTJNnUM_0 skateboard yzE2GgYffew_0 person y0HZlHGSvHk_0 horse y0ptIotKNVU_1 horse y0qGszhFtUc_0 bird y2BOVk7bg7k_0 cow y2BOVk7bg7k_1 cow y2xzls--cC4_0 person y2_iaWWx-C0_1 zebra y3VNGZBlDb0_0 cat y3hSeUaVwAY_0 bus y34cSfArQnM_0 cat y6nBJ0OUtDs_0 person y6nBJ0OUtDs_2 person y67A9YHKh1U_0 person y8ib31rVZA0_0 bicycle y8ib31rVZA0_1 bicycle y8r2SJltJ1M_0 dog y9hu6CyRi5s_0 airplane y_O1AiuRLGA_0 umbrella y_5uacneFuc_0 horse zAvoyJ0_PSA_0 cow zBtuA6r8o0M_0 cat zCG95maa310_0 person zCnZg9VP1xw_0 truck zDs4lXFLJuM_1 horse zD59UHvdpmY_0 person zESRFobSQMU_0 truck zESRFobSQMU_1 truck zHRsZ9HlcBk_0 person zIDehNZ1yiM_0 person zIvzY3cVVbM_0 person zI5cBWlyAMo_0 dog zI5cBWlyAMo_1 dog zJdOWFEL_CQ_0 person zLflV_7noSM_1 airplane zMhr8GZ1QeY_1 airplane zMjW-G29IRA_3 bear zMjW-G29IRA_1 bear zMjW-G29IRA_2 bear zMjW-G29IRA_4 bear zNFb--FJ2A4_0 person zNF5YxfaNTk_0 cat zNfVxQPGrvM_1 elephant zN8rF-AchY0_1 motorcycle zN9Tz6jp7AY_0 person zOLTybhsJ5s_0 cat zORNq_7nmVQ_1 giraffe zORNq_7nmVQ_0 giraffe zOoxYmqzDyc_1 dog zPvrRc94j6s_0 person zP2DkEcgJFo_0 person zP8Recx-KgA_0 boat zQbeiOf9ljM_0 person zU0g6JCyxAs_2 elephant zVVQ63dPpe4_2 bicycle zWQQBElMPYI_0 person zX9OX5I2574_0 person zYvjN5ShZDI_0 person zYzASiLjHgY_0 person zZ8f7oFIg_c_0 person zbtsVe8RQqI_0 person zb8-yrB5SlI_1 bird zcgArp_fmjc_5 skateboard zcsREBhC1Rc_0 dog zdWtCunlv1c_0 cow zdqJTtHvwk4_0 person zd3rNWQ-OUQ_0 person zgJHKszSf2o_0 person zgJHKszSf2o_1 person zgRxry9FvEk_1 horse zgSx8Y5FaPI_1 knife zhDC_SqN7lQ_0 bear zhNNahIXxC8_0 bear zjQG5PadkFQ_0 person zj4cs0_VpTk_0 truck zkSIG3AE7tY_0 elephant zmDkkM7Buuo_0 cow zmEU5n2Dy8Y_0 dog zmdKmfMPuvA_0 bird znTYxWfU2XM_0 truck zpEtPFxxD5M_0 horse zqE3Jnn6_gw_0 person zqYLN7vCqcw_0 train zqq508NRpOY_0 person ztMFfJj7jb0_0 knife zt3ojCKnIYM_0 cat zwSnaqQ-5UU_0 person zxiZnbMo3io_0 motorcycle zxiZnbMo3io_1 motorcycle zxzApvuo8Lg_0 person zx0RzA6ts8U_0 cow zyXxWBoTuww_0 person zyXxWBoTuww_1 person zyftQz018g0_0 bus zy0lNSoVB0A_0 cat zzRnX2EiOYU_0 cat z0Tl2FDG69g_0 elephant z1kOi92oBDI_0 truck z1kOi92oBDI_1 truck z1qQ7Ma5C5U_1 truck z1qQ7Ma5C5U_0 truck z18s4h6yW2A_0 bird z2M6XJGE1QM_0 dog z2RqakqNnIM_1 skateboard z29ijVd-dvc_0 airplane z3rcLKwHCxM_1 truck z5-nsuFvaR8_0 motorcycle z7FTg1R3Hik_0 horse z7mLqljZMP8_0 person z709zOu3tM8_0 car z9HO__A5ryw_0 dog z9wpJN1R63w_0 person z-iM0zVi7a4_0 bus z_CQX_gwU_o_0 person z_w1gsSfZhQ_0 person 0AroA_SBRtQ_0 person 0BUPQDR99KY_0 bear 0DDYOUzExSY_0 person 0DGPzzGhUgI_0 person 0DHLS1VDcnA_1 bear 0EeBXB53BQE_0 airplane 0EnI7ZqJvqI_1 car 0EnI7ZqJvqI_2 car 0GzrKbW6Reo_0 person 0G0mSrzOZ2M_8 bus 0G0mSrzOZ2M_9 bus 0G0mSrzOZ2M_10 bus 0IHYTCKh8HM_0 person 0KWfi9m1uZg_0 horse 0KWfi9m1uZg_2 horse 0KWfi9m1uZg_1 horse 0L0JFDbAEZg_0 knife 0Neg9vT08to_0 cow 0NtpuqPU3YI_0 airplane 0N7yCdf7DPs_0 truck 0ORpOxJZo-Y_1 bear 0OqnKMwSULM_0 skateboard 0OqnKMwSULM_1 skateboard 0Pk8OLmmqrM_0 motorcycle 0Pu-_5lNYZM_0 bird 0QKe3M6GiT4_0 person 0Tu3KWEm4SE_0 cow 0Tu3KWEm4SE_1 cow 0TwpPpqiVQ8_0 cow 0U6SmZC1j40_0 person 0VKozmEWjZ4_0 person 0VaX_g70BaY_0 motorcycle 0ZGdpgF-bGI_0 bus 0ZQ_-4ia7z0_0 person 0c-Cwr5rI_A_0 elephant 0c-Cwr5rI_A_1 elephant 0fyRjxenSfY_0 bear 0fyRjxenSfY_1 bear 0f4alYlvEQw_0 person 0gelRcDsNio_0 airplane 0ghRNQFgHow_0 bicycle 0gl1mPRzCqo_0 person 0h9x35zsnyo_0 bird 0iLR3BtDujk_0 train 0iYm4g4D2wY_0 person 0iv0Xw_u-sc_0 bicycle 0i-Nv28lRT0_0 bicycle 0kZSWqFOr0c_0 person 0kidYsWSVvc_0 person 0mbZJnNhckg_0 person 0omh-B4giqI_0 umbrella 0owf_YERias_0 skateboard 0pAMIiK_RDo_0 person 0pm7YRiUKTc_0 horse 0qVc1Whb3GA_0 person 0qwRoiWnwmQ_0 person 0rQzfr4WVKc_0 cat 0sA23Q_HQr8_2 zebra 0sA23Q_HQr8_1 giraffe 0sA23Q_HQr8_0 giraffe 0sfu67JuBFg_0 person 0ss0_Sgy72g_1 skateboard 0tNuUAe5sNE_1 person 0tNuUAe5sNE_0 person 0txAuEdZYTI_0 motorcycle 0uJKDzuaiys_0 train 0urYbdFc55k_0 train 0utGbb5enqA_2 dog 0utGbb5enqA_1 dog 0vQFT9tfq40_0 person 0viKlMZRKdk_0 person 0v7GMl2k-Sk_3 train 0yCCEL3tl24_0 elephant 0zmzEkQWyps_0 boat 0zraBBQY8ew_0 umbrella 0zyhohOeIM4_0 train 00xcm8_ZTBc_0 person 01CYScp2Yc0_1 horse 01mkUffAvo8_0 person 02zor_ScZfo_1 person 02zor_ScZfo_0 person 03p9Ao9JvpY_0 train 03p9Ao9JvpY_2 train 03u5BWTYiRg_0 train 04Sh9tJvOAc_0 airplane 04UO1jSx2p4_0 person 04gNIg-kFI8_0 person 057f0LfDVoA_1 train 08Nunz5Qngc_0 bus 09jyC-o18uU_3 elephant 09kq3b7cMwc_0 cat 1AcsNm2kiok_0 horse 1BfbSv9ZCu4_0 knife 1BfbSv9ZCu4_3 knife 1BiqFD2BD7Y_0 horse 1C3_qaiKlwo_0 truck 1DHXDdSkk0s_0 bicycle 1DeIbpIRrAc_0 knife 1Dfkbv8bi9k_0 person 1Dz4x50F-RQ_0 dog 1EYL4Mm3dfA_0 bear 1EiH3PTqhLE_0 person 1ExRnJBXYP4_0 knife 1FVN3QOPlR0_0 person 1FVN3QOPlR0_1 person 1GJ0iwyNHIc_0 airplane 1JWHb6FAbmI_0 person 1Knz9s55vjc_0 car 1Knz9s55vjc_1 car 1Knz9s55vjc_2 car 1Knz9s55vjc_3 car 1LmCkh8Dd-o_0 dog 1MmlnQKtd6g_0 umbrella 1M6GhIT94zE_0 cow 1M6GhIT94zE_2 cow 1NThnoBEkmc_0 person 1ONRbj8GKJ4_1 bear 1ONRbj8GKJ4_2 bear 1ONRbj8GKJ4_8 bear 1ONRbj8GKJ4_10 bear 1ONptqLyHxQ_0 dog 1OSa1ptYmzE_0 train 1OSa1ptYmzE_1 train 1Ob23hwFaDg_0 motorcycle 1PSIOY62FBg_1 bear 1Pe9JpKgjGY_0 car 1P8yUGru9R4_0 knife 1RCZCLIZzc4_0 boat 1RGxleB_Ezk_0 person 1RKOWfpa5Dc_0 knife 1RuPxpqNjBI_0 horse 1Tpmsev8onw_0 cat 1TsLUvJiluI_1 person 1TsLUvJiluI_0 person 1UhZKsDTuQs_2 boat 1V-7ErZ83ZY_0 bus 1ZN9xVmQojU_0 umbrella 1ZbSl9tPtbA_0 bird 1Z7CVnRjVT0_0 person 1as5iG4PPas_0 bus 1bFvYEA0U3U_1 elephant 1bveGPhOKuU_0 cow 1cKjzUG0YCQ_0 bicycle 1ceprZO-VEU_2 train 1ecpkwMLabI_0 person 1fOM-kkuRsw_0 car 1ggOn5NDRco_0 cat 1hUe5E9cjiU_0 motorcycle 1iQKKup2m3I_0 truck 1iQKKup2m3I_1 truck 1iSjb4IlqfU_0 person 1i7lugA55RU_0 bicycle 1i7lugA55RU_1 bicycle 1kZMlCvKoe8_0 skateboard 1kZMlCvKoe8_1 skateboard 1kZMlCvKoe8_2 skateboard 1ksBabVqkMY_0 car 1ltK_3kkqfg_4 elephant 1l7LOpfDmXY_0 person 1ohoCoKJLDU_0 motorcycle 1oyjAtaWDZA_0 truck 1sQ3EL13Vqo_0 person 1tK31PAVNJM_5 elephant 1tK31PAVNJM_0 elephant 1tK31PAVNJM_2 elephant 1tK31PAVNJM_3 elephant 1v2enBiUcqA_0 bus 1wIGd0H1CUo_0 person 1xSI36nguW0_0 bear 1xs-ibIaMMU_0 person 1xyKgJUu0lM_0 skateboard 1zVWBQWZxV0_0 person 1zVWBQWZxV0_1 person 1zqpqKWhr1Y_0 person 10la9pvd-pk_0 knife 11kfBYxzlFA_0 person 12f1R5wMVPs_0 person 12_S_8HkAvA_0 person 1462k8mwVB0_0 elephant 15Lx-nGngUo_0 skateboard 18WxVaz5Ue4_1 skateboard 19A2XM5NIWs_0 person 19UmUpkjRbs_0 person 19oZ30mOTkU_0 boat 1-p8vd0PFQ4_0 dog 1_6ymF7z_iM_0 truck 2ASHEEgYHcU_0 cat 2CF0oQ38cBQ_0 motorcycle 2DM1oM4HFjI_0 motorcycle 2FXE_xO8Mb4_0 bus 2FvnQne8he8_0 train 2GTexq12sBY_0 person 2GTtMvLQqio_4 truck 2GZphW1DkS4_0 person 2HvVFwq85n0_0 person 2Hwu-YpHKw0_0 elephant 2H8AZ00ONQE_0 elephant 2IJ4H46ZxEE_0 person 2INYBScuPM8_0 car 2IqEaQ0oyQg_0 airplane 2JN_uMTDa9I_0 skateboard 2KWlj_ZAw94_0 horse 2KWlj_ZAw94_1 horse 2KWlj_ZAw94_2 horse 2K2gLrhP9AU_1 airplane 2K2gLrhP9AU_2 airplane 2K6iDBPdcHk_0 motorcycle 2LBHZoJ5skk_0 person 2L3uwdhZtV0_0 car 2MJHsLxKUBg_0 person 2MiqTBWBlEc_0 umbrella 2NjC1r6v4IQ_0 person 2O-2zfQxbnA_0 person 2PaTs4s2Ybw_1 bear 2PaTs4s2Ybw_7 bear 2PaTs4s2Ybw_4 bear 2Pa1anwpeKE_0 person 2Q3_TaV8vcg_0 dog 2Rc-oAwMJBs_0 horse 2Tp0YJi7JwQ_0 giraffe 2UpHhiQWzD4_0 truck 2VZlkg5HjME_0 cow 2WTwzNufol8_0 dog 2WTwzNufol8_1 dog 2WtNxQ0RBfc_0 person 2ZXlS-GRWAw_0 knife 2Z6wSOr0jLI_1 person 2a5TUccpQ08_0 dog 2a_-AyOXTXg_0 skateboard 2cFRz-musVA_0 airplane 2cFRz-musVA_1 airplane 2cFRz-musVA_2 airplane 2cFRz-musVA_3 airplane 2dZFWL9XGmw_0 cow 2fCH7TpvtlM_0 train 2fCH7TpvtlM_1 train 2fJ1hPXpiQc_3 knife 2fJ1hPXpiQc_0 knife 2gGuKs-4t94_0 boat 2i45n6p8AT8_0 person 2i_wjgk6DiA_0 horse 2lK0mmHTvB8_3 train 2lK0mmHTvB8_1 train 2lqlNq6aII0_0 skateboard 2lxPwFW5YQo_0 umbrella 2l2gnrYWuWQ_0 truck 2l7MPXzF64M_0 cat 2l7TuAfDgO8_0 truck 2mO7-ybapaQ_1 umbrella 2nqGkC9ebf8_0 boat 2oA7J6HSmt8_6 bicycle 2oA7J6HSmt8_9 bicycle 2tSpb14o7SA_0 person 2vF8Va9DGSM_5 bicycle 2vF8Va9DGSM_4 bicycle 2vF8Va9DGSM_14 bicycle 2vF8Va9DGSM_15 bicycle 2vF8Va9DGSM_2 bicycle 2vrbssf2sDM_0 truck 2v808Hn8_do_0 person 2v808Hn8_do_1 person 2yEUVUqYMPc_0 giraffe 2ya3SN5pLyU_0 car 2065vf90oIM_0 person 2065vf90oIM_1 person 21GQbN_4k9M_0 cow 21Hp5g5RrOc_1 person 21Hp5g5RrOc_0 person 22iFltXYCcQ_0 cow 22ztStWwd8g_0 train 22ztStWwd8g_2 train 22ztStWwd8g_3 train 23qU2q5u0OE_6 bird 24Zxq5TuxzI_0 cow 26kWe8Ikgxk_0 bird 28AecePdVok_0 truck 281z-ZLrI3g_7 bicycle 281z-ZLrI3g_4 bicycle 29bWSLuiEl0_1 person 2_R2wz82ugQ_0 umbrella 3A4oCDgMkHw_0 cow 3A-dEIjnmyE_1 skateboard 3Bag9o-z-Ks_4 bear 3DN2iQJzM-k_0 train 3DaASBRARLQ_0 cow 3D8wwibqkYo_0 cow 3EtIKWgGaKY_0 person 3FJ4ZWRq_S0_0 person 3GLXlSuXWcs_1 cow 3GQxmRKhMMY_1 airplane 3GQxmRKhMMY_2 airplane 3GQxmRKhMMY_3 airplane 3GQxmRKhMMY_4 airplane 3GULyU-IOhA_0 person 3HFqP9a97kA_0 bird 3IgOwKkKALw_0 cat 3LruhG4SULI_1 truck 3LruhG4SULI_2 truck 3LruhG4SULI_7 truck 3LxUuC1C4y8_0 bird 3L7LWpMShiw_0 skateboard 3L759GhRx6M_0 person 3MiM8HSul5A_0 cow 3MiM8HSul5A_2 cow 3MiM8HSul5A_4 cow 3M9T5RFr_9s_0 person 3OmdALGspY8_0 person 3O4ynxtRIDk_5 train 3O4ynxtRIDk_2 train 3RLrjX-XB98_0 person 3RhgYReCxjo_0 bus 3S-lQgiUWVU_1 horse 3S-lQgiUWVU_0 horse 3UDEQElT2yQ_0 train 3WhmVhG1ZwU_0 boat 3WrB7zPpcHU_0 cow 3XDvXaNmGpM_0 dog 3XDvXaNmGpM_1 dog 3X29L9uQCqc_0 train 3X29L9uQCqc_1 train 3Y7-acGE4Wc_0 person 3ZBYYBUfT6E_0 train 3Zwa4XoeZcA_0 person 3bSWlbx1o3I_2 bear 3cOMDXFxcOQ_0 cat 3dvUlr2yxz4_0 train 3g4c88ocJ38_0 skateboard 3hMszgfh_qA_0 bicycle 3hR78-EVNEE_0 truck 3jdK8UPhpO8_1 skateboard 3jdK8UPhpO8_0 skateboard 3kdpeeQ1Jnc_0 car 3kd_QEZRUWc_1 truck 3kd_QEZRUWc_5 truck 3lHqsoi5cgo_0 person 3liK-2EflUk_0 car 3mIRDwcY1Lg_1 person 3m5eMVv4z6w_1 bear 3nD6nhJtxIU_1 skateboard 3nbim5nlANI_1 horse 3q6LFZBelUs_0 person 3rSUjqH5Wlw_0 truck 3sEpU7UoQP8_0 person 3sg9txiHCp0_0 bear 3szPqA1S6P0_0 person 3tv_dUR84cE_1 airplane 3tv_dUR84cE_0 airplane 3uG4S1gvMxs_0 bird 3uVS_DAYfvY_3 car 3vuykX663QA_0 person 3wI_ureHDBY_0 train 3xLvnY9w5y0_0 person 3xy8Fz8Nsgk_0 bear 3zV0wmpiS78_0 person 3zccg30U6vs_0 person 30AwDyYIr7o_0 skateboard 325FEWXtOYw_0 person 3293hM-lzx8_0 person 32_1y90B5eQ_0 person 34L4iiCFTXM_0 airplane 34Pma_R21A8_2 person 34jFMRay1zg_0 person 35-MplWeZYQ_0 motorcycle 36zopo-HS48_0 person 38fx_nvlYDE_0 truck 39yxd86tGLU_1 boat 3-ugxoEDuFY_0 person 3_DeqcBRuwE_1 elephant 3_DeqcBRuwE_3 elephant 3_w3NNPGotM_0 person 4ARhlapmEmI_0 dog 4Ac5edN3qIA_0 elephant 4Ac5edN3qIA_1 elephant 4BItGVIP3_w_0 cow 4BItGVIP3_w_1 cow 4BO3P7E3NDE_0 truck 4BO3P7E3NDE_1 truck 4BO3P7E3NDE_2 truck 4Bw4gKDBQCM_1 dog 4C8rmAORSg8_0 person 4Dcg1W7RRmQ_1 train 4ENxW7OPynQ_1 car 4ExA1FWRfMM_0 dog 4FVfzA07rVs_0 person 4FVfzA07rVs_1 person 4GgzQqhrTmA_0 train 4GrMZIyjUdo_0 person 4IUjw1DfTd4_0 cow 4ItJTYAUV3Q_0 cat 4IxmhmTsSRM_0 person 4I72WJJrc1o_0 person 4I72WJJrc1o_1 person 4KFEzxXCjmw_0 car 4KYtNfb0-64_0 person 4KqP6ylUZpI_0 umbrella 4LHOLAPnjV8_0 boat 4LXlXP1epJE_0 person 4MFPOb36tfo_2 bear 4MFPOb36tfo_1 bear 4MZrjdSF01s_1 boat 4Me3lyNuZ7k_0 person 4M9sKAzevzo_0 train 4NI5ycFo2TA_0 airplane 4NI5ycFo2TA_1 airplane 4NKnUR1OMGo_0 horse 4NKnUR1OMGo_1 horse 4Ng6OxFQ9RY_3 bear 4Nx45ho9gSg_0 person 4PNJ3ZV4f8E_0 airplane 4PNJ3ZV4f8E_1 airplane 4PNvdZPZIdM_0 train 4PhakAK74GE_1 motorcycle 4PxLGSy75rk_2 knife 4QOhfEMrhzU_0 airplane 4Q0M6mWNDiU_0 horse 4RhaYtFsnGY_0 person 4SrP2aSHoRk_0 person 4TyWpb19rk4_0 umbrella 4U9sm_eqKTM_1 car 4U9sm_eqKTM_2 car 4Xd_k2REw4I_3 bear 4YRd-9lHLko_0 truck 4ZIgGDQB_R0_0 airplane 4ZYWcd-Fdzg_0 person 4Zxsg6aJ9tA_0 person 4aOWHpM7rOM_0 skateboard 4avaoLry8L0_2 skateboard 4bHGieqZfUk_1 knife 4duFrAfYG8k_0 person 4d6P5umc9j0_0 bird 4fIznTWAFRw_0 horse 4fIznTWAFRw_1 horse 4fIznTWAFRw_2 horse 4f_X4WbQu4M_0 elephant 4hCLCX2lLGk_0 person 4iBMfS5mIt8_0 bird 4ibKNzoA1tQ_0 truck 4igLFns238c_0 motorcycle 4kGNxHIXcUA_0 person 4kLhVZ9UGDE_0 skateboard 4lC7BU1eHxc_0 bus 4l683stlRno_0 knife 4mv1Nx0j3k4_0 person 4nz8CN4XlBE_0 dog 4oWXZIsPnEg_4 elephant 4ofuHARhFlQ_0 person 4pYH5Cm7Vkg_1 boat 4p3JGxvfiNE_4 bicycle 4p3JGxvfiNE_8 bicycle 4p3JGxvfiNE_10 bicycle 4qBYTh0AcfM_0 train 4qIx-9Qs3Zs_0 airplane 4qIx-9Qs3Zs_2 airplane 4qRkIra0ARM_0 person 4rhkfDV0QC8_1 truck 4ry_MJjFDUA_0 cat 4skAfQd8nX8_0 person 4t79zNxVi0Y_0 elephant 4t79zNxVi0Y_1 elephant 4uFHcf-qpkU_0 horse 4uwly-P5oxg_0 person 4uwly-P5oxg_1 person 4u7pm-h8fiE_0 person 4wox28JkSKY_1 person 4w3ykGq-Q_E_0 bicycle 4w3ykGq-Q_E_2 bicycle 4w5q5RdJ5g4_0 horse 4w5q5RdJ5g4_2 horse 4w5q5RdJ5g4_4 horse 4x80RbpjCPM_0 bear 4x80RbpjCPM_4 bear 4yFIyyevEVY_1 airplane 4ycylGSteiU_0 truck 4yjvwunpMKI_0 car 4yjvwunpMKI_1 car 4yjvwunpMKI_2 car 4yw2hFyx47Q_0 person 4y3qJAq5ap0_0 car 40QgDL4dxrc_0 airplane 40deMboVqPI_1 bird 44FNsfkuWOI_0 elephant 44hlNbUHL2c_0 person 44672wUoOwM_0 person 46NXMVbpzZw_1 boat 468w3XkLHwc_1 boat 47Nn3ywWOlU_1 person 47cBD-Sq9mw_1 person 48ujtCaCdX0_0 person 49CwzbRIUpI_1 bird 49a6EgDu-ZU_0 truck 4-GpBan9Z8s_0 horse 4_A8f6NAa3w_0 person 5BHekdOG9JA_0 elephant 5Bw22C4nsb4_0 train 5CPZUe4hn0M_0 airplane 5DS23LkFit8_0 cow 5DVU9wTDzN8_0 skateboard 5DjSsYt5N4Q_0 skateboard 5FAbvaslTQE_0 motorcycle 5FXOzzaKrcw_0 airplane 5Fro7Bo628Y_0 boat 5FxLl3jd7I0_0 skateboard 5F5fgLUXow8_3 car 5F5fgLUXow8_7 car 5F5fgLUXow8_8 car 5F5fgLUXow8_0 car 5F5fgLUXow8_1 car 5F5fgLUXow8_2 car 5F5fgLUXow8_4 car 5GMISyAZA9o_0 horse 5GpziDmwRTc_0 cow 5JPqrGj3CgM_0 giraffe 5Ko6ZHOz4IY_0 person 5Lbguv7FGLM_1 bird 5M7Wx_HJ_XQ_0 person 5Nz4g-YykuI_0 person 5O41yfenxMM_1 cow 5PeDI6XI7is_3 horse 5Qd986abGHo_0 person 5Tza7UHp3xE_0 train 5WTw98UVUCo_1 horse 5WpjuP9uJrI_2 bird 5W8Hg8uhxgQ_0 car 5W8Hg8uhxgQ_1 car 5XEAIdyb_ng_0 person 5XcopMzRch4_0 skateboard 5YbA5Uw-5xQ_0 person 5YbA5Uw-5xQ_1 person 5bIO0Gl25u0_1 boat 5bIO0Gl25u0_0 boat 5dGbxAkTDPM_1 cow 5dRnssv_jug_0 cow 5eRQh3Rv1Lk_0 horse 5eak0nLYZC0_0 airplane 5enKNMe1Dpg_0 person 5eq6WBGMyME_0 giraffe 5eum6r7kxbw_1 giraffe 5eum6r7kxbw_4 giraffe 5e84K5OEIj4_0 person 5fXoyIBk_gI_0 person 5gNgZQ0nDW8_4 knife 5gNgZQ0nDW8_5 knife 5gNhZJMFmis_0 bear 5gNhZJMFmis_1 bear 5gbLo2hItTs_0 person 5geZjQ9qAJU_0 motorcycle 5iDhgUX1kdc_0 person 5iwoWJK4GGo_0 car 5ll8fjNhIzg_0 person 5lv2GCs3_E0_0 person 5l9rlcuS7pE_0 bus 5mocfP3c3JE_0 bear 5mqvNWXtMCU_0 cat 5nAuDbKmWLY_0 elephant 5nC2ZXfE-sg_0 train 5nkh3PK6lBs_0 cow 5of5t38DQL4_0 cow 5okxoIw3cJI_0 skateboard 5ovlgihl130_0 knife 5phhj08_8hI_0 dog 5psIBlFu-yQ_0 person 5rh7nf5z_O0_1 cow 5rkM4mLsQoU_0 knife 5sIj93XnVc0_1 motorcycle 5sjUnvABkko_0 airplane 5s4kqURLLo4_0 person 5toRpAYrY_4_0 person 5uYObEyAbCQ_0 horse 5ukcjpXOopg_0 person 5vPXxAEGTrw_0 airplane 5vUtusnPXXs_0 bird 5vaBUAh4HkU_0 airplane 5yMeqHPiJgY_1 horse 5yMeqHPiJgY_2 horse 5yMeqHPiJgY_3 horse 5yeSANffSRk_0 person 5yeSANffSRk_1 person 5zJuhMtO1F8_0 bird 5zKtWxffw-0_0 boat 51rDJW0FO8w_0 horse 51yQTVmaMXw_1 motorcycle 52UjkVxSSHg_0 person 52VFNDCXUHg_0 person 52pNzl4wrxs_0 person 52wdqvYrGv4_0 person 522wkm19sH0_0 bus 54icMYqqx_w_1 bus 55H1IVgQj3E_0 boat 56BI7lH0z1g_0 person 56bgv0J-cXw_1 knife 56bgv0J-cXw_4 knife 56r2wDCnuQQ_0 horse 57BY7QjcYbQ_0 person 574FA_5qp-s_0 bus 58K_ZPS7U8M_0 person 58gdyHWU6do_1 truck 5802XdQdAkU_0 cow 59JJGcB2jRE_0 horse 59JJGcB2jRE_4 horse 59JJGcB2jRE_2 horse 59cXOQc39JI_1 zebra 5928Zhy26yI_1 giraffe 5-Oeo8tmauc_0 bus 5-Oeo8tmauc_1 bus 5-Oeo8tmauc_2 bus 5-O2xma48Tw_0 bird 5-y_Rrr8shw_2 person 5_njhyGAXdE_0 truck 5_njhyGAXdE_1 truck 5_njhyGAXdE_2 truck 5_2sGSrZblY_0 person 6AD9GHHEVkE_1 boat 6AYkCla5Oak_0 car 6A2LC4_gts4_0 person 6A2LC4_gts4_1 person 6BB65BA-pS0_1 knife 6CKS3WJRpHI_0 person 6C1C-L7L6CE_0 person 6DQ-H73b62Y_0 person 6EHcwJiML3g_2 person 6GlBa-DUEqc_0 person 6HlTwF1ZDkc_0 person 6HrWOx9GfzI_0 person 6JrhpITR8po_1 cow 6JrhpITR8po_0 cow 6KpKxtwB1Ww_0 person 6LiW0KF3fME_0 person 6Meaw8zK8sU_0 person 6M3wDWZDZJ8_0 car 6M4oJG9NsRM_0 person 6Nc1z3BVzlI_0 bear 6OlxDr5vZuI_2 horse 6Ona04rOyZk_0 cat 6PBKPTCkWOo_0 person 6PH-mFChsi0_0 airplane 6PwE6q6pebc_1 person 6QFs4uNsSt4_0 person 6RIFox7kLqY_0 cat 6SBj14dkVPM_0 cow 6SdX0oE9Qm8_0 cat 6SizSdOT9_k_0 horse 6TEQ098RfzE_0 cow 6TQ8X9G4BAY_0 dog 6UQbOOWv_ws_0 cow 6UQbOOWv_ws_2 cow 6XUe2u2YWkQ_2 umbrella 6bJPo4tzJvQ_0 person 6bco275PcUs_0 truck 6bco275PcUs_1 truck 6gwBOlfJ34I_1 skateboard 6gww5ltOLQY_0 bird 6gww5ltOLQY_1 bird 6hAG7632JjA_0 cat 6htKDjHsXPQ_0 cow 6id5A0aiJbE_0 train 6jwTUZocHXY_0 horse 6j07-PcNv70_0 truck 6kjb3q8EygI_0 elephant 6lAxaY4AYB8_0 person 6lPPfWdeBvU_0 cat 6l3SpVgqJY0_0 person 6mYi-vXre4Q_0 truck 6med3JZ2k40_0 person 6miVJWDTBCY_1 train 6n6fVeWD_m0_0 knife 6o61j0KZ9cA_0 person 6pPjKIlVlfY_0 bicycle 6pnenPlFGIc_0 motorcycle 6pnenPlFGIc_1 motorcycle 6pny8Td3Lvs_0 horse 6qRIuIHqJco_0 train 6qSDUh2ES7Q_0 person 6qVpY1VC2hU_1 cat 6qhp1FiVbBQ_0 knife 6rlBtCRp25g_0 cat 6r0rYZCL4Qc_0 person 6r0rYZCL4Qc_1 person 6uMmknjq0mg_0 bicycle 6uSZqFsKMGI_0 cow 6um2PoiKfT4_0 motorcycle 6vAGEaKFuyY_1 bus 6vAGEaKFuyY_2 bus 6vafM_LKdhA_0 umbrella 6vc8u4MPWkY_0 bird 6v_NKAM10sA_5 bicycle 6v_NKAM10sA_9 bicycle 6v_NKAM10sA_10 bicycle 6v_NKAM10sA_11 bicycle 6v_NKAM10sA_12 bicycle 6v_NKAM10sA_0 bicycle 6v_NKAM10sA_1 bicycle 6w-nwNFVYm8_0 motorcycle 6y78kiGuIAk_0 person 6zPET0HFVaM_3 train 6zPgsocp4bY_1 bicycle 6zPgsocp4bY_2 bicycle 6zPgsocp4bY_3 bicycle 6zPgsocp4bY_7 bicycle 6zPgsocp4bY_9 bicycle 6zW1omjPFRs_0 elephant 6zW1omjPFRs_1 elephant 62MEsd3U1aQ_0 person 62PpG0cOcbU_0 person 63vKOQ-SCBw_0 airplane 63_kFJCm2pQ_0 person 64yGcACuF0g_0 cat 64yZxDGH92I_0 person 64-njkqyF7k_0 bus 65u4BXZ10RY_0 dog 65u4BXZ10RY_1 dog 654ylXfWndU_0 boat 66HPgc7Up3o_6 horse 66HPgc7Up3o_3 horse 66HPgc7Up3o_4 horse 66HPgc7Up3o_7 horse 66N_Ju8hg2U_0 knife 665JKK-JrTc_0 person 67kix34dj7A_0 truck 67wgEifQYpg_0 person 68KnEa1hVf8_0 bicycle 6-Z9S0qy8ys_1 dog 6-7x1BQGuQE_0 person 6_nq4o_21CY_0 elephant 7BBHz6wfABM_0 person 7CYm8WQftfw_0 bus 7DIXCjEBWLw_0 airplane 7D-ypPzaTDI_0 person 7GvsFRhnxWc_1 bird 7G2sXxpbA-0_0 motorcycle 7HXox1j1X2A_0 person 7Hthj7LhsoI_1 elephant 7H1AhHiyip0_0 person 7JXhfaNTsUQ_2 bird 7K61aiu3UsM_0 person 7K61aiu3UsM_1 person 7LKG4ReUlZA_0 person 7LTKFUY3Xo8_0 bird 7MQZWaHzUOo_0 cow 7Mb_dcvNENM_7 bicycle 7Mb_dcvNENM_3 bicycle 7Mb_dcvNENM_4 bicycle 7Mb_dcvNENM_5 bicycle 7Mb_dcvNENM_6 bicycle 7NDhXBp57BY_0 person 7NFMDZwqdw4_0 person 7Ng49Wed4Y4_0 cow 7Ng49Wed4Y4_2 cow 7NxvW5DSQrI_0 cat 7O8grUKQopY_0 person 7PeZgsBNi5g_0 car 7QauV6mvt98_0 car 7RxzfGFIxSg_0 cat 7Strg7qJtW0_0 elephant 7Strg7qJtW0_7 elephant 7Strg7qJtW0_1 elephant 7Strg7qJtW0_2 elephant 7Strg7qJtW0_3 elephant 7VQ8QZRnxD8_0 cow 7Vcfkjk--Fc_1 dog 7V5Q7Te4KNI_0 bus 7WZRhdW3Ysw_0 elephant 7XQ-ufhX7gc_0 cow 7XQ-ufhX7gc_1 cow 7YCox5adS-U_0 person 7YQM-nFSHW4_0 knife 7Ya_jh9VO9U_0 person 7aTla4KAK_U_1 knife 7bqlApH5GwI_1 bicycle 7dFEYp-1Hgo_0 person 7e8WNmzDHUQ_0 person 7fF7heSCMTw_0 motorcycle 7fRxyCT-Wao_0 giraffe 7fRxyCT-Wao_2 giraffe 7fSMUG5W8vk_2 bicycle 7g8SI9aAn70_1 umbrella 7hIJP5KExbE_1 elephant 7hjOcuaQm7I_0 elephant 7kPsaqRQBCk_0 knife 7kl1hNW3aVs_0 motorcycle 7k7H9RKhOF8_1 skateboard 7k7H9RKhOF8_3 skateboard 7ledBa3nuVs_0 train 7ledBa3nuVs_2 train 7m98zjjFHbU_0 person 7ntsSm-LFZA_0 person 7ntsSm-LFZA_1 person 7nzY38tPTM0_0 person 7nzY38tPTM0_1 person 7n8C_td0Th8_0 horse 7p4RxRFB_Eg_0 horse 7rE5dIroJwQ_0 person 7rifGM-TuPA_0 horse 7trl2U6nLPc_0 horse 7vyHv7_GxbQ_0 person 7wte1pPBwQ0_1 bear 7w616uMnI_8_0 elephant 7w616uMnI_8_1 elephant 7x8K4JervhE_0 bus 7y0joj813H0_3 bus 7zRaB-2B7B0_0 train 72RzEHZFYtM_2 airplane 72RzEHZFYtM_1 airplane 73Wonc3xnLI_0 person 73Z4KnnAMlU_0 person 74gRlu6vJLY_0 person 747bRdBUPSw_0 person 76LU6w1a7UA_1 airplane 76PIBEC3WVo_0 skateboard 77GychcVDRI_0 person 77dvi_3OU4M_0 person 79MY0qku9uc_1 horse 8AgZqrCi9no_0 horse 8BK44tI3ACo_0 person 8BQJVHpHFsU_1 dog 8BQJVHpHFsU_2 dog 8B3bbakza_Q_0 person 8CJRCoA1Rps_0 person 8ClOgfNAjXs_0 giraffe 8DlXcc1IXlw_0 car 8EwDzFi34nA_0 cow 8FEp5ORJ27g_0 truck 8FyuS809d24_0 dog 8FyuS809d24_1 dog 8GGi0BXLCaM_0 person 8G_vBzM-Ws4_1 umbrella 8HcyzPUv5ag_0 person 8JIpa6tfWzo_0 airplane 8JKJnuN_UTI_0 cow 8JhHIO_7m-0_0 cow 8LGnOH6nDbc_0 dog 8LGnOH6nDbc_1 dog 8Lx004yCltY_6 elephant 8Lx004yCltY_12 elephant 8Lx004yCltY_18 elephant 8MO_kng7L-s_0 person 8MO_kng7L-s_1 person 8NlznvdsNJQ_2 boat 8N8hB2Au4JE_0 person 8Pbd3dd3v5E_0 person 8Pz3xq3KFo0_6 elephant 8Pz3xq3KFo0_4 elephant 8Qr-5_567tI_1 truck 8Q8g9z-DNF8_0 motorcycle 8RZsKbffdqI_0 cat 8Sbz2MGzhp4_0 person 8UcqXCLmq-M_1 elephant 8UcqXCLmq-M_3 elephant 8UcqXCLmq-M_6 elephant 8UcqXCLmq-M_7 elephant 8Ul_lS0g_RU_0 skateboard 8UmKRVMR08g_2 bird 8U7BmrkcgcU_2 truck 8VkbfdMQrR8_0 person 8VzjERSpeS4_1 elephant 8VzjERSpeS4_0 elephant 8WcBoYh-IMg_0 bird 8X27eyH-tx0_0 car 8Zi2bsTpMeY_0 person 8ZmfZDMaVhg_0 cat 8Z1GvAHPEnU_0 cat 8a1bD-UgfKE_0 truck 8bD-aqWPxwM_0 motorcycle 8bE_FhrjBuM_2 skateboard 8bE_FhrjBuM_0 skateboard 8bE_FhrjBuM_1 skateboard 8bypIjdKgEI_0 person 8b5fedIr-WQ_0 person 8cNzCe26dSM_0 person 8cSOpd9gaPE_0 cow 8c8TJ_Jzngk_0 horse 8d6950aGpD8_0 dog 8eK3ktD9j5o_0 horse 8eK3ktD9j5o_1 horse 8ewNcrMhg-w_0 person 8gsiG2Wu3YM_0 giraffe 8hFEJz0GvfU_0 elephant 8hwa44VMdLs_0 person 8h8Cpkugo-Y_0 elephant 8h_eY7zEIqk_3 truck 8iBiHoA_OJk_0 person 8jRFQ8RKZ0s_1 car 8kTREwiI1-8_0 cow 8kn6PJbtsyA_0 bicycle 8kn6PJbtsyA_1 bicycle 8kn6PJbtsyA_2 bicycle 8kn6PJbtsyA_3 bicycle 8kn6PJbtsyA_4 bicycle 8lKXEr2W3yM_0 knife 8lMRKCKyBwk_0 person 8lonNtE99PI_1 person 8l7UmXXnAJs_0 truck 8mlHevSC8cc_0 car 8m-GtOBjbzY_1 bicycle 8nWSGwlJyPQ_0 cat 8nsl-r_i0AI_0 person 8n3A8io4GNU_0 person 8okfUuO0Pvc_1 bird 8poWB-6q4xk_1 bicycle 8p2saqn2kiQ_0 person 8qFJg_AoKeY_0 cow 8qulLm8MYrM_0 bus 8rBxRMDJEFY_0 person 8sOWPIfWpCM_0 horse 8tKto2zQWUg_0 elephant 8uoYlmdJlAo_1 knife 8wdvLn40CTk_5 bus 8wdvLn40CTk_0 bus 8wdvLn40CTk_1 bus 8wv3WJBJmog_1 dog 8yFZUTSjpos_0 motorcycle 8zBx-nHUqBY_0 person 8zUAF30Hu6c_1 train 8zUAF30Hu6c_2 train 8zftjn0I9TQ_0 truck 8zftjn0I9TQ_2 truck 8zjgYuK3nVY_0 person 8z-YLOzAxb4_2 bicycle 8z-YLOzAxb4_4 bicycle 8z-sTr28AWk_0 skateboard 80CcMFD-Rcw_1 person 80CcMFD-Rcw_0 person 81cNVk8boEM_0 person 82lK9rB-e08_1 motorcycle 84P6L_HrN48_0 bird 88N5__h7Zdo_0 bicycle 89a461_gh2o_0 bicycle 89mGhzBokZ8_1 bear 89qfsC77BYk_0 person 8_oUj2cuPdo_0 dog 9A-VO1zCZJ4_1 motorcycle 9BVgbNz-bi8_0 person 9BVgbNz-bi8_1 person 9BpvtvUGG5g_0 person 9DGpFjuUVBk_0 person 9DY0dTRH5xI_0 bird 9D5ORdC7BuQ_6 bus 9ELQq5BMR1U_0 person 9E8VBIYmTGY_1 cow 9E8VBIYmTGY_0 cow 9FAB9BrcQls_0 person 9FTOvdcnzDQ_0 airplane 9GdhKEBm0pA_6 bicycle 9GdhKEBm0pA_1 bicycle 9GdhKEBm0pA_3 bicycle 9HqapwdLVzk_4 knife 9KfdTsjy53o_0 truck 9LHbQA-pT0U_2 horse 9LJRUmW_AII_0 boat 9LOpNoTFWKg_0 truck 9LOpNoTFWKg_4 truck 9LOpNoTFWKg_1 truck 9LOpNoTFWKg_2 truck 9LqExSHe9y8_0 knife 9Ls7gSZQt1w_2 bear 9NsmnTdRiik_0 airplane 9PsezNNV0Jc_1 airplane 9PsezNNV0Jc_2 airplane 9PsezNNV0Jc_0 airplane 9Q3srzApSJU_0 person 9RGlWjTKvE0_0 bus 9RZCK24Shec_0 cat 9ScZtgWAJZA_1 person 9SgrA5Q1d94_0 person 9ShZpsmuvc4_2 skateboard 9ShZpsmuvc4_1 skateboard 9UU2h6M8DJk_2 truck 9UwLiWKOIGY_0 person 9U-tccGetsk_0 knife 9VwSYjCCRYk_1 truck 9VwSYjCCRYk_2 truck 9WDPvYpnrfU_1 truck 9WDt0JjOFIA_0 person 9YVkZ7QxD5E_0 person 9Y6XZFO31JU_0 cow 9ZpZZoTtySo_1 bear 9Z0Jz1tesQ4_4 cow 9Z0Jz1tesQ4_1 cow 9Z0Jz1tesQ4_2 cow 9Z0Jz1tesQ4_3 cow 9aQOAnspXGo_1 bird 9bYPYgMQVjU_0 person 9bzmQFGK8m8_0 person 9dOPPvgyMqk_0 person 9eI_0DoOE08_0 person 9eI_0DoOE08_1 person 9g8o260G10k_0 bird 9hAU80xKWy0_0 truck 9jS5MThAtmo_0 person 9kGuuCx39JA_0 motorcycle 9lsXenPJ-X8_1 bird 9ltdzlYXfp8_0 cow 9ltdzlYXfp8_3 cow 9muklrcigJY_0 dog 9nqU8e9IUPU_0 skateboard 9pEB8cjvPSQ_1 horse 9qamzN9bwxw_0 person 9rvVWyyuud0_0 person 9r1FvK19XV8_0 person 9uhZRDsQKnc_0 person 9yt1if13PHk_0 elephant 9y5txKR57mc_0 bird 9zBCjCtH3Eg_0 horse 9zqk5w8Qx1Q_1 bicycle 9zroWMwZHGI_1 person 907A5I4-LpA_0 motorcycle 91SWvU-5TcI_0 person 92MaWPuO8PI_0 boat 92560YiwSP0_0 person 93gyPa_dPGU_0 truck 946wiAK4Seg_1 person 95CV_olHtcI_0 person 96WWGXa4QrI_0 car 96akJFw5SPU_0 truck 96iqXHgOXKY_0 person 98XiF-Z__aI_0 cat 99Tb7HSFn3I_0 person 9_bFE0FUq_c_1 knife -A-tBuMjU8s_0 cat -B4YQQLrOfI_2 skateboard -C0rYHhL_x4_0 motorcycle -DYf49hlRSE_0 person -Ebcfmg0-eE_0 person -E05a-eQSwY_0 umbrella -FMaVn21dYU_1 horse -Fu9coX9J-A_0 person -Fu9coX9J-A_1 person -Gk4iMiEMCc_0 person -LVtIbelA3M_0 horse -LXr7LdXtrk_0 boat -LjAFTF5WP4_0 bicycle -LjAFTF5WP4_1 bicycle -LjAFTF5WP4_3 bicycle -MpLPuviQ00_0 person -M_jT3EYgcc_0 person -NWvB2g952Q_2 bird -OZt785bbpY_0 airplane -P37Y1G6oHk_2 airplane -P37Y1G6oHk_3 airplane -P37Y1G6oHk_0 airplane -QBeUV_OkJg_1 dog -QQCINzsXpw_0 person -Q6g2xZ0PxY_1 airplane -RjxMfaV-Vo_1 knife -RjxMfaV-Vo_2 knife -SPHavKGd3M_0 skateboard -S8L2HACCPE_12 elephant -S8L2HACCPE_1 elephant -S8L2HACCPE_10 elephant -TKKOo1FfAI_0 bird -VgWHKeRRjs_0 airplane -VgWHKeRRjs_1 airplane -WyEyKxdZOQ_0 person -XWeGpACKwc_0 skateboard -Xj6MiGVWt0_0 person -XwZnoNm0FU_0 dog -ZDO95E0pl8_0 person -anX-ad_gHQ_0 person -avz2OsPIq4_2 bicycle -bJkl4q5f-A_0 bird -c1b7nHzGn4_0 airplane -dQnNlBQp3o_0 person -db_SToBhkg_2 motorcycle -eZUdm8ERQQ_0 person -e42Pb0YeOY_0 cat -fnhznKC3CU_0 person -f0JLwuyuTM_0 person -jL0HOXwYls_0 person -kLIF2a7yeU_0 person -k1TxEpOgnA_0 person -l9NS6DuRPI_0 person -mgNwLW3ODc_0 person -mwDgqLpu-k_0 skateboard -nOfuA8B7As_1 bicycle -nzXunuZac4_0 cat -oG6YVPhC_I_0 horse -o28rb1UnYA_0 car -sJOJNjOCBI_0 motorcycle -sJOJNjOCBI_1 motorcycle -sWch1rnO10_0 person -th9NS9hl6s_0 cow -uP01llwXFY_5 boat -uP01llwXFY_1 boat -u5MNR-9ClU_0 person -vkMKVuweFA_0 person -v7FXEhgwtE_0 person -y652b4w3Ss_0 bird -zqHD6Jthqg_0 person -0U1vm6LIi8_0 person -1je1K1ihbk_2 skateboard -2iw3MzUP2Y_0 motorcycle -3OvKcu5P2U_0 car -3fzr21Ov5w_0 person -6vJDV8XnWE_0 boat -7Im8MyvaXU_0 cat --8shIp3t0I_0 knife -_iBuJTwjw8_1 horse -_xag4X_Do0_0 bird _ATEx5gbBEQ_0 knife _ATEx5gbBEQ_1 knife _AcvI8VF5ig_0 cow _Ae4vmwt8uA_0 person _Auvs-o5Pck_0 truck _A8nA25Tq8c_1 person _C_yvxdjVGA_2 horse _C_yvxdjVGA_0 horse _DXAxnPIiBU_0 cow _D-9w3aSX50_0 person _GyE3cPQ6U8_0 car _HN1_MjnjWo_2 elephant _HYaLoOKE84_1 cow _IhkqtAQHBw_0 train _InrHPE8Umw_0 motorcycle _IpUnYit3Pg_0 dog _JNG6qK6INs_3 bear _KzDIvt0cCk_0 person _K6jYgDC1JU_0 airplane _NZ4o-omJLE_0 umbrella _NtOMcyVAp4_1 dog _OmnjH4t-IY_0 person _QF0A9B-xB8_0 person _QRy9nd4kcg_0 airplane _Q9M8QAjSMk_0 person _Rd-wEO2r10_0 person _R6nlDzh6Tc_0 person _R6nlDzh6Tc_2 person _T0O1BlYjaU_1 bear _VegkTdhrQE_0 motorcycle _WKJaPPBz8Q_0 umbrella _WcqTpLKkww_1 truck _Y6_E1l4blQ_1 knife _ZDU4qi4lcI_2 cow _ZDU4qi4lcI_0 cow _ZDU4qi4lcI_1 cow _ZHmkH59bCQ_0 person _ZXqLyRe4n0_0 elephant _ZsogS9uPJQ_0 person _akq_DieEWE_0 person _akq_DieEWE_1 person _bO2sdIelLY_0 person _dC_upYbxWI_0 knife _eCb7mFYyIg_0 motorcycle _egWujmdZtw_0 person _epdfuB0qRM_0 car _e5Vvy9DJ9E_4 bear _e5Vvy9DJ9E_0 bear _foK5Dvj1As_0 bird _hryEVGKNuw_0 horse _iY4AnGfq0Y_0 train _jBzwdg0QRA_1 bus _jci9tIBIB4_5 truck _kdhlRke8uI_0 person _kfdh_5bI-Q_0 person _lmD-useijU_0 person _mJBwuCegJ0_12 truck _mJBwuCegJ0_1 truck _mJBwuCegJ0_2 truck _mJBwuCegJ0_8 truck _mJBwuCegJ0_9 truck _oRtPVRmtwo_0 dog _pEHwWe2seA_5 elephant _sV1Jd1uiYg_0 person _tZU1XTOML4_0 boat _usyDpllGBo_0 horse _vBAv8cBoqE_0 skateboard _vV0wdWq0cU_0 person _xMVx44FbT4_0 horse _xQn3TupjYs_0 cat _xy58m6yCko_0 motorcycle _yQQjARqD1s_0 boat _yfoe4GCA0Q_4 airplane _yfoe4GCA0Q_2 airplane _yv5Cwbm9EA_0 person _zIDofZkgS4_1 truck _zQt1CSSKyA_1 bicycle _0eR2vQAEqE_0 elephant _0eR2vQAEqE_1 elephant _17u-cPTYt0_0 car _17u-cPTYt0_1 car _2mIWIhbDPY_0 bus _37U5Elgnck_0 person _5fE6dP48FM_0 cow _5sIT4l5izM_0 knife _6qUuUUYvUQ_0 person _7zbbqEa3nw_1 train _7zbbqEa3nw_4 train _8VTthFkvS0_0 bird _8iyumFI4sQ_1 elephant _8iyumFI4sQ_2 elephant _8iyumFI4sQ_3 elephant _81FImml2gk_0 dog _9bypka_Q4c_0 bus _-CvwC7H730_0 person _-XcxnQLKPM_0 dog __Q5A7gExpI_0 person ================================================ FILE: lib/train/dataset/COCO_tool.py ================================================ __author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # annToMask - Convert segmentation in an annotation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>annToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import copy import itertools from pycocotools import mask as maskUtils import os from collections import defaultdict import sys PYTHON_VERSION = sys.version_info[0] if PYTHON_VERSION == 2: from urllib import urlretrieve elif PYTHON_VERSION == 3: from urllib.request import urlretrieve def _isArrayLike(obj): return hasattr(obj, '__iter__') and hasattr(obj, '__len__') class COCO: def __init__(self, dataset): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset)) self.dataset = dataset self.createIndex() def createIndex(self): # create index print('creating index...') anns, cats, imgs = {}, {}, {} imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print('index created!') # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print('{}: {}'.format(key, value)) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if _isArrayLike(catNms) else [catNms] supNms = supNms if _isArrayLike(supNms) else [supNms] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if _isArrayLike(ids): return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if _isArrayLike(ids): return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if _isArrayLike(ids): return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns, draw_bbox=False): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception('datasetType not supported') if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((int(len(seg)/2), 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = maskUtils.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) if draw_bbox: [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox'] poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]] np_poly = np.array(poly).reshape((4,2)) polygons.append(Polygon(np_poly)) color.append(c) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption']) def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print('Loading and preparing results...') tic = time.time() if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode): with open(resFile) as f: anns = json.load(f) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = maskUtils.area(ann['segmentation']) if not 'bbox' in ann: ann['bbox'] = maskUtils.toBbox(ann['segmentation']) ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print('DONE (t={:0.2f}s)'.format(time.time()- tic)) res.dataset['annotations'] = anns res.createIndex() return res def download(self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print('Please specify target directory') return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urlretrieve(img['coco_url'], fname) print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic)) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print('Converting ndarray to lists...') assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print('{}/{}'.format(i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle def annToMask(self, ann): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann) m = maskUtils.decode(rle) return m ================================================ FILE: lib/train/dataset/__init__.py ================================================ from .lasot import Lasot from .got10k import Got10k from .tracking_net import TrackingNet from .imagenetvid import ImagenetVID from .coco import MSCOCO from .coco_seq import MSCOCOSeq from .got10k_lmdb import Got10k_lmdb from .lasot_lmdb import Lasot_lmdb from .imagenetvid_lmdb import ImagenetVID_lmdb from .coco_seq_lmdb import MSCOCOSeq_lmdb from .tracking_net_lmdb import TrackingNet_lmdb ================================================ FILE: lib/train/dataset/base_image_dataset.py ================================================ import torch.utils.data from lib.train.data.image_loader import jpeg4py_loader class BaseImageDataset(torch.utils.data.Dataset): """ Base class for image datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.image_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_images() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_images(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.image_list) def has_class_info(self): return False def get_class_name(self, image_id): return None def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_images_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_image_info(self, seq_id): """ Returns information about a particular image, args: seq_id - index of the image returns: Dict """ raise NotImplementedError def get_image(self, image_id, anno=None): """ Get a image args: image_id - index of image anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: image - anno - dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: lib/train/dataset/base_video_dataset.py ================================================ import torch.utils.data # 2021.1.5 use jpeg4py_loader_w_failsafe as default from lib.train.data.image_loader import jpeg4py_loader_w_failsafe class BaseVideoDataset(torch.utils.data.Dataset): """ Base class for video datasets """ def __init__(self, name, root, image_loader=jpeg4py_loader_w_failsafe): """ args: root - The root path to the dataset image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. """ self.name = name self.root = root self.image_loader = image_loader self.sequence_list = [] # Contains the list of sequences. self.class_list = [] def __len__(self): """ Returns size of the dataset returns: int - number of samples in the dataset """ return self.get_num_sequences() def __getitem__(self, index): """ Not to be used! Check get_frames() instead. """ return None def is_video_sequence(self): """ Returns whether the dataset is a video dataset or an image dataset returns: bool - True if a video dataset """ return True def is_synthetic_video_dataset(self): """ Returns whether the dataset contains real videos or synthetic returns: bool - True if a video dataset """ return False def get_name(self): """ Name of the dataset returns: string - Name of the dataset """ raise NotImplementedError def get_num_sequences(self): """ Number of sequences in a dataset returns: int - number of sequences in the dataset.""" return len(self.sequence_list) def has_class_info(self): return False def has_occlusion_info(self): return False def get_num_classes(self): return len(self.class_list) def get_class_list(self): return self.class_list def get_sequences_in_class(self, class_name): raise NotImplementedError def has_segmentation_info(self): return False def get_sequence_info(self, seq_id): """ Returns information about a particular sequences, args: seq_id - index of the sequence returns: Dict """ raise NotImplementedError def get_frames(self, seq_id, frame_ids, anno=None): """ Get a set of frames from a particular sequence args: seq_id - index of sequence frame_ids - a list of frame numbers anno(None) - The annotation for the sequence (see get_sequence_info). If None, they will be loaded. returns: list - List of frames corresponding to frame_ids list - List of dicts for each frame dict - A dict containing meta information about the sequence, e.g. class of the target object. """ raise NotImplementedError ================================================ FILE: lib/train/dataset/coco.py ================================================ import os from .base_image_dataset import BaseImageDataset import torch import random from collections import OrderedDict from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings from pycocotools.coco import COCO class MSCOCO(BaseImageDataset): """ The COCO object detection dataset. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None, split="train", version="2014"): """ args: root - path to coco root folder image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. data_fraction - Fraction of dataset to be used. The complete dataset is used by default min_area - Objects with area less than min_area are filtered out. Default is 0.0 split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() # the parent class thing would happen in the sampler self.image_list = self._get_image_list(min_area=min_area) if data_fraction is not None: self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction)) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): ann_list = list(self.coco_set.anns.keys()) image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] if min_area is not None: image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area] return image_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def has_segmentation_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def _build_im_per_class(self): im_per_class = {} for i, im in enumerate(self.image_list): class_name = self.cats[self.coco_set.anns[im]['category_id']]['name'] if class_name not in im_per_class: im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4,) mask = torch.Tensor(self.coco_set.annToMask(anno)) valid = (bbox[2] > 0) & (bbox[3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.coco_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if anno is None: anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return frame, anno, object_meta ================================================ FILE: lib/train/dataset/coco_seq.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import torch import random from pycocotools.coco import COCO from collections import OrderedDict from lib.train.admin import env_settings class MSCOCOSeq(BaseVideoDataset): """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"): """ args: root - path to the coco dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the images will be used split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO', root, image_loader) self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version)) self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version)) # Load the COCO set. self.coco_set = COCO(self.anno_path) self.cats = self.coco_set.cats self.class_list = self.get_class_list() self.sequence_list = self._get_sequence_list() if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_seq_per_class() def _get_sequence_list(self): ann_list = list(self.coco_set.anns.keys()) seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] return seq_list def is_video_sequence(self): return False def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def _build_seq_per_class(self): seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name'] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def get_sequence_info(self, seq_id): anno = self._get_anno(seq_id) bbox = torch.Tensor(anno['bbox']).view(1, 4) mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0) '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels''' valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, seq_id): anno = self.coco_set.anns[self.sequence_list[seq_id]] return anno def _get_frames(self, seq_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, seq_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, seq_id): cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] return cat_dict_current['name'] def get_frames(self, seq_id=None, frame_ids=None, anno=None): # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a # list containing these replicated images. frame = self._get_frames(seq_id) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0, ...] for _ in frame_ids] object_meta = self.get_meta_info(seq_id) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/dataset/coco_seq_lmdb.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import torch import random from collections import OrderedDict from lib.train.admin import env_settings from lib.train.dataset.COCO_tool import COCO from lib.utils.lmdb_utils import decode_img, decode_json import time class MSCOCOSeq_lmdb(BaseVideoDataset): """ The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1. Publication: Microsoft COCO: Common Objects in Context. Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick ECCV, 2014 https://arxiv.org/pdf/1405.0312.pdf Download the images along with annotations from http://cocodataset.org/#download. The root folder should be organized as follows. - coco_root - annotations - instances_train2014.json - instances_train2017.json - images - train2014 - train2017 Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi. """ def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"): """ args: root - path to the coco dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the images will be used split - 'train' or 'val'. version - version of coco dataset (2014 or 2017) """ root = env_settings().coco_dir if root is None else root super().__init__('COCO_lmdb', root, image_loader) self.root = root self.img_pth = 'images/{}{}/'.format(split, version) self.anno_path = 'annotations/instances_{}{}.json'.format(split, version) # Load the COCO set. print('loading annotations into memory...') tic = time.time() coco_json = decode_json(root, self.anno_path) print('Done (t={:0.2f}s)'.format(time.time() - tic)) self.coco_set = COCO(coco_json) self.cats = self.coco_set.cats self.class_list = self.get_class_list() self.sequence_list = self._get_sequence_list() if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_seq_per_class() def _get_sequence_list(self): ann_list = list(self.coco_set.anns.keys()) seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0] return seq_list def is_video_sequence(self): return False def get_num_classes(self): return len(self.class_list) def get_name(self): return 'coco_lmdb' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def _build_seq_per_class(self): seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name'] if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def get_sequence_info(self, seq_id): anno = self._get_anno(seq_id) bbox = torch.Tensor(anno['bbox']).view(1, 4) mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0) '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels''' valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, seq_id): anno = self.coco_set.anns[self.sequence_list[seq_id]] return anno def _get_frames(self, seq_id): path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name'] # img = self.image_loader(os.path.join(self.img_pth, path)) img = decode_img(self.root, os.path.join(self.img_pth, path)) return img def get_meta_info(self, seq_id): try: cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': cat_dict_current['supercategory'], 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, seq_id): cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']] return cat_dict_current['name'] def get_frames(self, seq_id=None, frame_ids=None, anno=None): # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a # list containing these replicated images. frame = self._get_frames(seq_id) frame_list = [frame.copy() for _ in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[0, ...] for _ in frame_ids] object_meta = self.get_meta_info(seq_id) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/dataset/got10k.py ================================================ import os import os.path import numpy as np import torch import csv import pandas import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings class Got10k(BaseVideoDataset): """ GOT-10k dataset. Publication: GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild Lianghua Huang, Xin Zhao, and Kaiqi Huang arXiv:1810.11981, 2018 https://arxiv.org/pdf/1810.11981.pdf Download dataset from http://got-10k.aitestunion.com/downloads """ def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None): """ args: root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split, not NOT the official got-10k validation split. To use the official validation split, provide that as the root folder instead. seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids' options can be used at the same time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().got10k_dir if root is None else root super().__init__('GOT10k', root, image_loader) # all folders inside the root self.sequence_list = self._get_sequence_list() # seq_id is the index of the folder inside the got10k root path if split is not None: if seq_ids is not None: raise ValueError('Cannot set both split_name and seq_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt') elif split == 'val': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt') elif split == 'train_full': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt') elif split == 'vottrain': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt') elif split == 'votval': file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt') else: raise ValueError('Unknown split name.') # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist() seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze("columns").values.tolist() elif seq_ids is None: seq_ids = list(range(0, len(self.sequence_list))) self.sequence_list = [self.sequence_list[i] for i in seq_ids] if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.sequence_meta_info = self._load_meta_info() self.seq_per_class = self._build_seq_per_class() self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def get_name(self): return 'got10k' def has_class_info(self): return True def has_occlusion_info(self): return True def _load_meta_info(self): sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list} return sequence_meta_info def _read_meta(self, seq_path): try: with open(os.path.join(seq_path, 'meta_info.ini')) as f: meta_info = f.readlines() object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1], 'motion_class': meta_info[6].split(': ')[-1][:-1], 'major_class': meta_info[7].split(': ')[-1][:-1], 'root_class': meta_info[8].split(': ')[-1][:-1], 'motion_adverb': meta_info[9].split(': ')[-1][:-1]}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def _build_seq_per_class(self): seq_per_class = {} for i, s in enumerate(self.sequence_list): object_class = self.sequence_meta_info[s]['object_class_name'] if object_class in seq_per_class: seq_per_class[object_class].append(i) else: seq_per_class[object_class] = [i] return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _get_sequence_list(self): with open(os.path.join(self.root, 'list.txt')) as f: dir_list = list(csv.reader(f)) dir_list = [dir_name[0] for dir_name in dir_list] return dir_list def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "absence.label") cover_file = os.path.join(seq_path, "cover.label") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) with open(cover_file, 'r', newline='') as f: cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)]) target_visible = ~occlusion & (cover>0).byte() visible_ratio = cover.float() / 8 return target_visible, visible_ratio def _get_sequence_path(self, seq_id): return os.path.join(self.root, self.sequence_list[seq_id]) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible, visible_ratio = self._read_target_visible(seq_path) visible = visible & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def get_class_name(self, seq_id): obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] return obj_meta['object_class_name'] def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return frame_list, anno_frames, obj_meta ================================================ FILE: lib/train/dataset/got10k_lmdb.py ================================================ import os import os.path import numpy as np import torch import csv import pandas import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings '''2021.1.16 Gok10k for loading lmdb dataset''' from lib.utils.lmdb_utils import * class Got10k_lmdb(BaseVideoDataset): def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None): """ args: root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split, not NOT the official got-10k validation split. To use the official validation split, provide that as the root folder instead. seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids' options can be used at the same time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default use_lmdb - whether the dataset is stored in lmdb format """ root = env_settings().got10k_lmdb_dir if root is None else root super().__init__('GOT10k_lmdb', root, image_loader) # all folders inside the root self.sequence_list = self._get_sequence_list() # seq_id is the index of the folder inside the got10k root path if split is not None: if seq_ids is not None: raise ValueError('Cannot set both split_name and seq_ids.') train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt') elif split == 'val': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt') elif split == 'train_full': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt') elif split == 'vottrain': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt') elif split == 'votval': file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt') else: raise ValueError('Unknown split name.') seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist() elif seq_ids is None: seq_ids = list(range(0, len(self.sequence_list))) self.sequence_list = [self.sequence_list[i] for i in seq_ids] if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.sequence_meta_info = self._load_meta_info() self.seq_per_class = self._build_seq_per_class() self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def get_name(self): return 'got10k_lmdb' def has_class_info(self): return True def has_occlusion_info(self): return True def _load_meta_info(self): def _read_meta(meta_info): object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1], 'motion_class': meta_info[6].split(': ')[-1], 'major_class': meta_info[7].split(': ')[-1], 'root_class': meta_info[8].split(': ')[-1], 'motion_adverb': meta_info[9].split(': ')[-1]}) return object_meta sequence_meta_info = {} for s in self.sequence_list: try: meta_str = decode_str(self.root, "train/%s/meta_info.ini" %s) sequence_meta_info[s] = _read_meta(meta_str.split('\n')) except: sequence_meta_info[s] = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return sequence_meta_info def _build_seq_per_class(self): seq_per_class = {} for i, s in enumerate(self.sequence_list): object_class = self.sequence_meta_info[s]['object_class_name'] if object_class in seq_per_class: seq_per_class[object_class].append(i) else: seq_per_class[object_class] = [i] return seq_per_class def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _get_sequence_list(self): dir_str = decode_str(self.root, 'train/list.txt') dir_list = dir_str.split('\n') return dir_list def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt_str_list = decode_str(self.root, bb_anno_file).split('\n')[:-1] # the last line in got10k is empty gt_list = [list(map(float, line.split(','))) for line in gt_str_list] gt_arr = np.array(gt_list).astype(np.float32) return torch.tensor(gt_arr) def _read_target_visible(self, seq_path): # full occlusion and out_of_view files occlusion_file = os.path.join(seq_path, "absence.label") cover_file = os.path.join(seq_path, "cover.label") # Read these files occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\n')[:-1])) # the last line in got10k is empty occlusion = torch.ByteTensor(occ_list) cover_list = list(map(int, decode_str(self.root, cover_file).split('\n')[:-1])) # the last line in got10k is empty cover = torch.ByteTensor(cover_list) target_visible = ~occlusion & (cover>0).byte() visible_ratio = cover.float() / 8 return target_visible, visible_ratio def _get_sequence_path(self, seq_id): return os.path.join("train", self.sequence_list[seq_id]) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible, visible_ratio = self._read_target_visible(seq_path) visible = visible & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return decode_img(self.root, self._get_frame_path(seq_path, frame_id)) def get_class_name(self, seq_id): obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] return obj_meta['object_class_name'] def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]] frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] return frame_list, anno_frames, obj_meta ================================================ FILE: lib/train/dataset/imagenetvid.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import xml.etree.ElementTree as ET import json import torch from collections import OrderedDict from lib.train.admin import env_settings def get_target_to_image_ratio(seq): anno = torch.Tensor(seq['anno']) img_sz = torch.Tensor(seq['image_size']) return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt() class ImagenetVID(BaseVideoDataset): """ Imagenet VID dataset. Publication: ImageNet Large Scale Visual Recognition Challenge Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei IJCV, 2015 https://arxiv.org/pdf/1409.0575.pdf Download the dataset from http://image-net.org/ """ def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1): """ args: root - path to the imagenet vid dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. min_length - Minimum allowed sequence length. max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets which cover complete image. """ root = env_settings().imagenet_dir if root is None else root super().__init__("imagenetvid", root, image_loader) cache_file = os.path.join(root, 'cache.json') if os.path.isfile(cache_file): # If available, load the pre-processed cache file containing meta-info for each sequence with open(cache_file, 'r') as f: sequence_list_dict = json.load(f) self.sequence_list = sequence_list_dict else: # Else process the imagenet annotations and generate the cache file self.sequence_list = self._process_anno(root) with open(cache_file, 'w') as f: json.dump(self.sequence_list, f) # Filter the sequences based on min_length and max_target_area in the first frame self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and get_target_to_image_ratio(x) < max_target_area] def get_name(self): return 'imagenetvid' def get_num_sequences(self): return len(self.sequence_list) def get_sequence_info(self, seq_id): bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno']) valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0) visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte() return {'bbox': bb_anno, 'valid': valid, 'visible': visible} def _get_frame(self, sequence, frame_id): set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id']) vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id']) frame_number = frame_id + sequence['start_frame'] frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name, '{:06d}.JPEG'.format(frame_number)) return self.image_loader(frame_path) def get_frames(self, seq_id, frame_ids, anno=None): sequence = self.sequence_list[seq_id] frame_list = [self._get_frame(sequence, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) # Create anno dict anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] # added the class info to the meta info object_meta = OrderedDict({'object_class': sequence['class_name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta def _process_anno(self, root): # Builds individual tracklets base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train') all_sequences = [] for set in sorted(os.listdir(base_vid_anno_path)): set_id = int(set.split('_')[-1]) for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))): vid_id = int(vid.split('_')[-1]) anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid))) frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0])) image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)] objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object') for f in anno_files] tracklets = {} # Find all tracklets along with start frame for f_id, all_targets in enumerate(objects): for target in all_targets: tracklet_id = target.find('trackid').text if tracklet_id not in tracklets: tracklets[tracklet_id] = f_id for tracklet_id, tracklet_start in tracklets.items(): tracklet_anno = [] target_visible = [] class_name_id = None for f_id in range(tracklet_start, len(objects)): found = False for target in objects[f_id]: if target.find('trackid').text == tracklet_id: if not class_name_id: class_name_id = target.find('name').text x1 = int(target.find('bndbox/xmin').text) y1 = int(target.find('bndbox/ymin').text) x2 = int(target.find('bndbox/xmax').text) y2 = int(target.find('bndbox/ymax').text) tracklet_anno.append([x1, y1, x2 - x1, y2 - y1]) target_visible.append(target.find('occluded').text == '0') found = True break if not found: break new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id, 'start_frame': tracklet_start, 'anno': tracklet_anno, 'target_visible': target_visible, 'image_size': image_size} all_sequences.append(new_sequence) return all_sequences ================================================ FILE: lib/train/dataset/imagenetvid_lmdb.py ================================================ import os from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader import torch from collections import OrderedDict from lib.train.admin import env_settings from lib.utils.lmdb_utils import decode_img, decode_json def get_target_to_image_ratio(seq): anno = torch.Tensor(seq['anno']) img_sz = torch.Tensor(seq['image_size']) return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt() class ImagenetVID_lmdb(BaseVideoDataset): """ Imagenet VID dataset. Publication: ImageNet Large Scale Visual Recognition Challenge Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei IJCV, 2015 https://arxiv.org/pdf/1409.0575.pdf Download the dataset from http://image-net.org/ """ def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1): """ args: root - path to the imagenet vid dataset. image_loader (default_image_loader) - The function to read the images. If installed, jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else, opencv's imread is used. min_length - Minimum allowed sequence length. max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets which cover complete image. """ root = env_settings().imagenet_dir if root is None else root super().__init__("imagenetvid_lmdb", root, image_loader) sequence_list_dict = decode_json(root, "cache.json") self.sequence_list = sequence_list_dict # Filter the sequences based on min_length and max_target_area in the first frame self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and get_target_to_image_ratio(x) < max_target_area] def get_name(self): return 'imagenetvid_lmdb' def get_num_sequences(self): return len(self.sequence_list) def get_sequence_info(self, seq_id): bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno']) valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0) visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte() return {'bbox': bb_anno, 'valid': valid, 'visible': visible} def _get_frame(self, sequence, frame_id): set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id']) vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id']) frame_number = frame_id + sequence['start_frame'] frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name, '{:06d}.JPEG'.format(frame_number)) return decode_img(self.root, frame_path) def get_frames(self, seq_id, frame_ids, anno=None): sequence = self.sequence_list[seq_id] frame_list = [self._get_frame(sequence, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) # Create anno dict anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] # added the class info to the meta info object_meta = OrderedDict({'object_class': sequence['class_name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/dataset/lasot.py ================================================ import os import os.path import torch import numpy as np import pandas import csv import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings class Lasot(BaseVideoDataset): """ LaSOT dataset. Publication: LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling CVPR, 2019 https://arxiv.org/pdf/1809.07845.pdf Download the dataset from https://cis.temple.edu/lasot/download.html """ def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): """ args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().lasot_dir if root is None else root super().__init__('LaSOT', root, image_loader) # Keep a list of all classes self.class_list = [f for f in os.listdir(self.root)] self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)} self.sequence_list = self._build_sequence_list(vid_ids, split) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_class_list() def _build_sequence_list(self, vid_ids=None, split=None): if split is not None: if vid_ids is not None: raise ValueError('Cannot set both split_name and vid_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt') else: raise ValueError('Unknown split name.') # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist() sequence_list = pandas.read_csv(file_path, header=None).squeeze("columns").values.tolist() elif vid_ids is not None: sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids] else: raise ValueError('Set either split_name or vid_ids.') return sequence_list def _build_class_list(self): seq_per_class = {} for seq_id, seq_name in enumerate(self.sequence_list): class_name = seq_name.split('-')[0] if class_name in seq_per_class: seq_per_class[class_name].append(seq_id) else: seq_per_class[class_name] = [seq_id] return seq_per_class def get_name(self): return 'lasot' def has_class_info(self): return True def has_occlusion_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "full_occlusion.txt") out_of_view_file = os.path.join(seq_path, "out_of_view.txt") with open(occlusion_file, 'r', newline='') as f: occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) with open(out_of_view_file, 'r') as f: out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]]) target_visible = ~occlusion & ~out_of_view return target_visible def _get_sequence_path(self, seq_id): seq_name = self.sequence_list[seq_id] class_name = seq_name.split('-')[0] vid_id = seq_name.split('-')[1] return os.path.join(self.root, class_name, class_name + '-' + vid_id) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = self._read_target_visible(seq_path) & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return self.image_loader(self._get_frame_path(seq_path, frame_id)) def _get_class(self, seq_path): raw_class = seq_path.split('/')[-2] return raw_class def get_class_name(self, seq_id): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/dataset/lasot_lmdb.py ================================================ import os import os.path import torch import numpy as np import pandas import csv import random from collections import OrderedDict from .base_video_dataset import BaseVideoDataset from lib.train.data import jpeg4py_loader from lib.train.admin import env_settings '''2021.1.16 Lasot for loading lmdb dataset''' from lib.utils.lmdb_utils import * class Lasot_lmdb(BaseVideoDataset): def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): """ args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().lasot_lmdb_dir if root is None else root super().__init__('LaSOT_lmdb', root, image_loader) self.sequence_list = self._build_sequence_list(vid_ids, split) class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list] self.class_list = [] for ele in class_list: if ele not in self.class_list: self.class_list.append(ele) # Keep a list of all classes self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)} if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction)) self.seq_per_class = self._build_class_list() def _build_sequence_list(self, vid_ids=None, split=None): if split is not None: if vid_ids is not None: raise ValueError('Cannot set both split_name and vid_ids.') ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') if split == 'train': file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt') else: raise ValueError('Unknown split name.') sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist() elif vid_ids is not None: sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids] else: raise ValueError('Set either split_name or vid_ids.') return sequence_list def _build_class_list(self): seq_per_class = {} for seq_id, seq_name in enumerate(self.sequence_list): class_name = seq_name.split('-')[0] if class_name in seq_per_class: seq_per_class[class_name].append(seq_id) else: seq_per_class[class_name] = [seq_id] return seq_per_class def get_name(self): return 'lasot_lmdb' def has_class_info(self): return True def has_occlusion_info(self): return True def get_num_sequences(self): return len(self.sequence_list) def get_num_classes(self): return len(self.class_list) def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_path): bb_anno_file = os.path.join(seq_path, "groundtruth.txt") gt_str_list = decode_str(self.root, bb_anno_file).split('\n')[:-1] # the last line is empty gt_list = [list(map(float, line.split(','))) for line in gt_str_list] gt_arr = np.array(gt_list).astype(np.float32) return torch.tensor(gt_arr) def _read_target_visible(self, seq_path): # Read full occlusion and out_of_view occlusion_file = os.path.join(seq_path, "full_occlusion.txt") out_of_view_file = os.path.join(seq_path, "out_of_view.txt") occ_list = list(map(int, decode_str(self.root, occlusion_file).split(','))) occlusion = torch.ByteTensor(occ_list) out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(','))) out_of_view = torch.ByteTensor(out_view_list) target_visible = ~occlusion & ~out_of_view return target_visible def _get_sequence_path(self, seq_id): seq_name = self.sequence_list[seq_id] class_name = seq_name.split('-')[0] vid_id = seq_name.split('-')[1] return os.path.join(class_name, class_name + '-' + vid_id) def get_sequence_info(self, seq_id): seq_path = self._get_sequence_path(seq_id) bbox = self._read_bb_anno(seq_path) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = self._read_target_visible(seq_path) & valid.byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame_path(self, seq_path, frame_id): return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1 def _get_frame(self, seq_path, frame_id): return decode_img(self.root, self._get_frame_path(seq_path, frame_id)) def _get_class(self, seq_path): raw_class = seq_path.split('/')[-2] return raw_class def get_class_name(self, seq_id): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): seq_path = self._get_sequence_path(seq_id) obj_class = self._get_class(seq_path) frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/dataset/tracking_net.py ================================================ import torch import os import os.path import numpy as np import pandas import random from collections import OrderedDict from lib.train.data import jpeg4py_loader from .base_video_dataset import BaseVideoDataset from lib.train.admin import env_settings def list_sequences(root, set_ids): """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name) args: root: Root directory to TrackingNet set_ids: Sets (0-11) which are to be used returns: list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence """ sequence_list = [] for s in set_ids: anno_dir = os.path.join(root, "TRAIN_" + str(s), "anno") sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')] sequence_list += sequences_cur_set return sequence_list class TrackingNet(BaseVideoDataset): """ TrackingNet dataset. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None): """ args: root - The path to the TrackingNet folder, containing the training sets. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the sets (0 - 11) will be used. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().trackingnet_dir if root is None else root super().__init__('TrackingNet', root, image_loader) if set_ids is None: set_ids = [i for i in range(12)] self.set_ids = set_ids # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and # video_name for each sequence self.sequence_list = list_sequences(self.root, self.set_ids) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction)) self.seq_to_class_map, self.seq_per_class = self._load_class_info() # we do not have the class_lists for the tracking net self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def _load_class_info(self): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt') with open(class_map_path, 'r') as f: seq_to_class_map = {seq_class.split('\t')[0]: seq_class.rstrip().split('\t')[1] for seq_class in f} seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = seq_to_class_map.get(seq[1], 'Unknown') if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_to_class_map, seq_per_class def get_name(self): return 'trackingnet' def has_class_info(self): return True def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] bb_anno_file = os.path.join(self.root, "TRAIN_" + str(set_id), "anno", vid_name + ".txt") gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values return torch.tensor(gt) def get_sequence_info(self, seq_id): bbox = self._read_bb_anno(seq_id) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame(self, seq_id, frame_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] frame_path = os.path.join(self.root, "TRAIN_" + str(set_id), "frames", vid_name, str(frame_id) + ".jpg") return self.image_loader(frame_path) def _get_class(self, seq_id): seq_name = self.sequence_list[seq_id][1] return self.seq_to_class_map[seq_name] def get_class_name(self, seq_id): obj_class = self._get_class(seq_id) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): frame_list = [self._get_frame(seq_id, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] obj_class = self._get_class(seq_id) object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/dataset/tracking_net_lmdb.py ================================================ import torch import os import os.path import numpy as np import random from collections import OrderedDict from lib.train.data import jpeg4py_loader from .base_video_dataset import BaseVideoDataset from lib.train.admin import env_settings import json from lib.utils.lmdb_utils import decode_img, decode_str def list_sequences(root): """ Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name) args: root: Root directory to TrackingNet returns: list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence """ fname = os.path.join(root, "seq_list.json") with open(fname, "r") as f: sequence_list = json.loads(f.read()) return sequence_list class TrackingNet_lmdb(BaseVideoDataset): """ TrackingNet dataset. Publication: TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild. Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem ECCV, 2018 https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit. """ def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None): """ args: root - The path to the TrackingNet folder, containing the training sets. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the sets (0 - 11) will be used. data_fraction - Fraction of dataset to be used. The complete dataset is used by default """ root = env_settings().trackingnet_lmdb_dir if root is None else root super().__init__('TrackingNet_lmdb', root, image_loader) if set_ids is None: set_ids = [i for i in range(12)] self.set_ids = set_ids # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and # video_name for each sequence self.sequence_list = list_sequences(self.root) if data_fraction is not None: self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction)) self.seq_to_class_map, self.seq_per_class = self._load_class_info() # we do not have the class_lists for the tracking net self.class_list = list(self.seq_per_class.keys()) self.class_list.sort() def _load_class_info(self): ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt') with open(class_map_path, 'r') as f: seq_to_class_map = {seq_class.split('\t')[0]: seq_class.rstrip().split('\t')[1] for seq_class in f} seq_per_class = {} for i, seq in enumerate(self.sequence_list): class_name = seq_to_class_map.get(seq[1], 'Unknown') if class_name not in seq_per_class: seq_per_class[class_name] = [i] else: seq_per_class[class_name].append(i) return seq_to_class_map, seq_per_class def get_name(self): return 'trackingnet_lmdb' def has_class_info(self): return True def get_sequences_in_class(self, class_name): return self.seq_per_class[class_name] def _read_bb_anno(self, seq_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] gt_str_list = decode_str(os.path.join(self.root, "TRAIN_%d_lmdb" % set_id), os.path.join("anno", vid_name + ".txt")).split('\n')[:-1] gt_list = [list(map(float, line.split(','))) for line in gt_str_list] gt_arr = np.array(gt_list).astype(np.float32) return torch.tensor(gt_arr) def get_sequence_info(self, seq_id): bbox = self._read_bb_anno(seq_id) valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0) visible = valid.clone().byte() return {'bbox': bbox, 'valid': valid, 'visible': visible} def _get_frame(self, seq_id, frame_id): set_id = self.sequence_list[seq_id][0] vid_name = self.sequence_list[seq_id][1] return decode_img(os.path.join(self.root, "TRAIN_%d_lmdb" % set_id), os.path.join("frames", vid_name, str(frame_id) + ".jpg")) def _get_class(self, seq_id): seq_name = self.sequence_list[seq_id][1] return self.seq_to_class_map[seq_name] def get_class_name(self, seq_id): obj_class = self._get_class(seq_id) return obj_class def get_frames(self, seq_id, frame_ids, anno=None): frame_list = [self._get_frame(seq_id, f) for f in frame_ids] if anno is None: anno = self.get_sequence_info(seq_id) anno_frames = {} for key, value in anno.items(): anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids] obj_class = self._get_class(seq_id) object_meta = OrderedDict({'object_class_name': obj_class, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return frame_list, anno_frames, object_meta ================================================ FILE: lib/train/run_training.py ================================================ import os import sys import argparse import importlib import cv2 as cv import torch.backends.cudnn import torch.distributed as dist import torch import random import numpy as np torch.backends.cudnn.benchmark = False import _init_paths import lib.train.admin.settings as ws_settings def init_seeds(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.set_num_threads(4) cv.setNumThreads(1) cv.ocl.setUseOpenCL(False) def run_training(script_name, config_name, cudnn_benchmark=True, local_rank=-1, save_dir=None, base_seed=None, use_lmdb=False, script_name_prv=None, config_name_prv=None, use_wandb=False, distill=None, script_teacher=None, config_teacher=None): """Run the train script. args: script_name: Name of emperiment in the "experiments/" folder. config_name: Name of the yaml file in the "experiments/". cudnn_benchmark: Use cudnn benchmark or not (default is True). """ if save_dir is None: print("save_dir dir is not given. Use the default dir instead.") # This is needed to avoid strange crashes related to opencv torch.set_num_threads(4) cv.setNumThreads(4) torch.backends.cudnn.benchmark = cudnn_benchmark print('script_name: {}.py config_name: {}.yaml'.format(script_name, config_name)) '''2021.1.5 set seed for different process''' if base_seed is not None: if local_rank != -1: init_seeds(base_seed + local_rank) else: init_seeds(base_seed) settings = ws_settings.Settings() settings.script_name = script_name settings.config_name = config_name settings.project_path = 'train/{}/{}'.format(script_name, config_name) if script_name_prv is not None and config_name_prv is not None: settings.project_path_prv = 'train/{}/{}'.format(script_name_prv, config_name_prv) settings.local_rank = local_rank settings.save_dir = os.path.abspath(save_dir) settings.use_lmdb = use_lmdb prj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) settings.cfg_file = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_name, config_name)) settings.use_wandb = use_wandb if distill: settings.distill = distill settings.script_teacher = script_teacher settings.config_teacher = config_teacher if script_teacher is not None and config_teacher is not None: settings.project_path_teacher = 'train/{}/{}'.format(script_teacher, config_teacher) settings.cfg_file_teacher = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_teacher, config_teacher)) expr_module = importlib.import_module('lib.train.train_script_distill') else: expr_module = importlib.import_module('lib.train.train_script') expr_func = getattr(expr_module, 'run') expr_func(settings) def main(): parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.') parser.add_argument('--script', type=str, required=True, help='Name of the train script.') parser.add_argument('--config', type=str, required=True, help="Name of the config file.") parser.add_argument('--cudnn_benchmark', type=bool, default=False, help='Set cudnn benchmark on (1) or off (0) (default is on).') parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training') parser.add_argument('--save_dir', type=str, help='the directory to save checkpoints and logs') parser.add_argument('--seed', type=int, default=42, help='seed for random numbers') parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0) # whether datasets are in lmdb format parser.add_argument('--script_prv', type=str, default=None, help='Name of the train script of previous model.') parser.add_argument('--config_prv', type=str, default=None, help="Name of the config file of previous model.") parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0) # whether to use wandb # for knowledge distillation parser.add_argument('--distill', type=int, choices=[0, 1], default=0) # whether to use knowledge distillation parser.add_argument('--script_teacher', type=str, help='teacher script name') parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name') args = parser.parse_args() if args.local_rank != -1: dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) else: torch.cuda.set_device(0) run_training(args.script, args.config, cudnn_benchmark=args.cudnn_benchmark, local_rank=args.local_rank, save_dir=args.save_dir, base_seed=args.seed, use_lmdb=args.use_lmdb, script_name_prv=args.script_prv, config_name_prv=args.config_prv, use_wandb=args.use_wandb, distill=args.distill, script_teacher=args.script_teacher, config_teacher=args.config_teacher) if __name__ == '__main__': main() ================================================ FILE: lib/train/train_script.py ================================================ import os # loss function related from lib.utils.box_ops import giou_loss from torch.nn.functional import l1_loss from torch.nn import BCEWithLogitsLoss # train pipeline related from lib.train.trainers import LTRTrainer, LTRSeqTrainer, LTRSeqTrainerV2 from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader, sequence_sampler, sequence_sampler_v2 # distributed training related from torch.nn.parallel import DistributedDataParallel as DDP # some more advanced functions from .base_functions import * # network related from lib.models.artrack import build_artrack from lib.models.artrackv2 import build_artrackv2 from lib.models.artrack_seq import build_artrack_seq from lib.models.artrackv2_seq import build_artrackv2_seq # forward propagation related from lib.train.actors import ARTrackActor, ARTrackSeqActor, ARTrackV2Actor, ARTrackV2SeqActor # for import modules import importlib from ..utils.focal_loss import FocalLoss def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] #settings.use_lmdb = True for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def slt_collate(batch): ret = {} for k in batch[0].keys(): here_list = [] for ex in batch: here_list.append(ex[k]) ret[k] = here_list return ret class SLTLoader(torch.utils.data.dataloader.DataLoader): """ Data loader. Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset. """ __initialized = False def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): if collate_fn is None: collate_fn = slt_collate super(SLTLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn) self.name = name self.training = training self.epoch_interval = epoch_interval self.stack_dim = stack_dim def run(settings): settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2' # update the default configs with config file if not os.path.exists(settings.cfg_file): raise ValueError("%s doesn't exist." % settings.cfg_file) config_module = importlib.import_module("lib.config.%s.config" % settings.script_name) cfg = config_module.cfg config_module.update_config_from_file(settings.cfg_file) if settings.local_rank in [-1, 0]: print("New configuration is shown below.") for key in cfg.keys(): print("%s configuration:" % key, cfg[key]) print('\n') # update settings based on cfg update_settings(settings, cfg) # Record the training log log_dir = os.path.join(settings.save_dir, 'logs') if settings.local_rank in [-1, 0]: if not os.path.exists(log_dir): os.makedirs(log_dir) settings.log_file = os.path.join(log_dir, "%s-%s.log" % (settings.script_name, settings.config_name)) # Build dataloaders if "RepVGG" in cfg.MODEL.BACKBONE.TYPE or "swin" in cfg.MODEL.BACKBONE.TYPE or "LightTrack" in cfg.MODEL.BACKBONE.TYPE: cfg.ckpt_dir = settings.save_dir bins = cfg.MODEL.BINS search_size = cfg.DATA.SEARCH.SIZE # Create network if settings.script_name == "artrack": net = build_artrack(cfg) loader_train, loader_val = build_dataloaders(cfg, settings) elif settings.script_name == "artrack_seq": net = build_artrack_seq(cfg) dataset_train = sequence_sampler.SequenceSampler( datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_GAP, max_interval=cfg.DATA.MAX_INTERVAL, num_search_frames=cfg.DATA.SEARCH.NUMBER, num_template_frames=1, frame_sample_mode='random_interval', prob=cfg.DATA.INTERVAL_PROB) loader_train = SLTLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, shuffle=False, drop_last=True) elif settings.script_name == "artrackv2": net = build_artrackv2(cfg) loader_train, loader_val = build_dataloaders(cfg, settings) elif settings.script_name == "artrackv2_seq": net = build_artrackv2_seq(cfg) dataset_train = sequence_sampler_v2.SequenceSampler( datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_GAP, max_interval=cfg.DATA.MAX_INTERVAL, num_search_frames=cfg.DATA.SEARCH.NUMBER, num_template_frames=1, frame_sample_mode='random_interval', prob=cfg.DATA.INTERVAL_PROB) loader_train = SLTLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, shuffle=False, drop_last=True) else: raise ValueError("illegal script name") # wrap networks to distributed one net.cuda() if settings.local_rank != -1: # net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) # add syncBN converter net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True) settings.device = torch.device("cuda:%d" % settings.local_rank) else: settings.device = torch.device("cuda:0") settings.deep_sup = getattr(cfg.TRAIN, "DEEP_SUPERVISION", False) settings.distill = getattr(cfg.TRAIN, "DISTILL", False) settings.distill_loss_type = getattr(cfg.TRAIN, "DISTILL_LOSS_TYPE", "KL") # Loss functions and Actors if settings.script_name == "artrack": focal_loss = FocalLoss() objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2.} actor = ARTrackActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size) elif settings.script_name == "artrack_seq": focal_loss = FocalLoss() objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2.} actor = ARTrackSeqActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size) elif settings.script_name == "artrackv2": focal_loss = FocalLoss() objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2., 'score': cfg.TRAIN.SCORE_WEIGHT} actor = ARTrackV2Actor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size) elif settings.script_name == "artrackv2_seq": focal_loss = FocalLoss() objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2., 'score_update': cfg.TRAIN.SCORE_WEIGHT} actor = ARTrackV2SeqActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size) else: raise ValueError("illegal script name") # if cfg.TRAIN.DEEP_SUPERVISION: # raise ValueError("Deep supervision is not supported now.") # Optimizer, parameters, and learning rates if settings.script_name == 'artrack' or settings.script_name == 'artrack_seq': optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg) elif settings.script_name == 'artrackv2' or settings.script_name == 'artrackv2_seq': optimizer, lr_scheduler = get_optimizer_scheduler_v2(net, cfg) use_amp = getattr(cfg.TRAIN, "AMP", False) if settings.script_name == "artrack": trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp) elif settings.script_name == "artrack_seq": trainer = LTRSeqTrainer(actor, [loader_train], optimizer, settings, lr_scheduler, use_amp=use_amp) elif settings.script_name == "artrackv2": trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp) elif settings.script_name == "artrackv2_seq": trainer = LTRSeqTrainerV2(actor, [loader_train], optimizer, settings, lr_scheduler, use_amp=use_amp) # train process trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True) ================================================ FILE: lib/train/train_script_distill.py ================================================ import os # loss function related from lib.utils.box_ops import giou_loss from torch.nn.functional import l1_loss from torch.nn import BCEWithLogitsLoss # train pipeline related from lib.train.trainers import LTRTrainer # distributed training related from torch.nn.parallel import DistributedDataParallel as DDP # some more advanced functions from .base_functions import * # network related from lib.models.stark import build_starks, build_starkst from lib.models.stark import build_stark_lightning_x_trt # forward propagation related from lib.train.actors import STARKLightningXtrtdistillActor # for import modules import importlib def build_network(script_name, cfg): # Create network if script_name == "stark_s": net = build_starks(cfg) elif script_name == "stark_st1" or script_name == "stark_st2": net = build_starkst(cfg) elif script_name == "stark_lightning_X_trt": net = build_stark_lightning_x_trt(cfg, phase="train") else: raise ValueError("illegal script name") return net def run(settings): settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2' # update the default configs with config file if not os.path.exists(settings.cfg_file): raise ValueError("%s doesn't exist." % settings.cfg_file) config_module = importlib.import_module("lib.config.%s.config" % settings.script_name) cfg = config_module.cfg config_module.update_config_from_file(settings.cfg_file) if settings.local_rank in [-1, 0]: print("New configuration is shown below.") for key in cfg.keys(): print("%s configuration:" % key, cfg[key]) print('\n') # update the default teacher configs with teacher config file if not os.path.exists(settings.cfg_file_teacher): raise ValueError("%s doesn't exist." % settings.cfg_file_teacher) config_module_teacher = importlib.import_module("lib.config.%s.config" % settings.script_teacher) cfg_teacher = config_module_teacher.cfg config_module_teacher.update_config_from_file(settings.cfg_file_teacher) if settings.local_rank in [-1, 0]: print("New teacher configuration is shown below.") for key in cfg_teacher.keys(): print("%s configuration:" % key, cfg_teacher[key]) print('\n') # update settings based on cfg update_settings(settings, cfg) # Record the training log log_dir = os.path.join(settings.save_dir, 'logs') if settings.local_rank in [-1, 0]: if not os.path.exists(log_dir): os.makedirs(log_dir) settings.log_file = os.path.join(log_dir, "%s-%s.log" % (settings.script_name, settings.config_name)) # Build dataloaders loader_train, loader_val = build_dataloaders(cfg, settings) if "RepVGG" in cfg.MODEL.BACKBONE.TYPE or "swin" in cfg.MODEL.BACKBONE.TYPE: cfg.ckpt_dir = settings.save_dir """turn on the distillation mode""" cfg.TRAIN.DISTILL = True cfg_teacher.TRAIN.DISTILL = True net = build_network(settings.script_name, cfg) net_teacher = build_network(settings.script_teacher, cfg_teacher) # wrap networks to distributed one net.cuda() net_teacher.cuda() net_teacher.eval() if settings.local_rank != -1: net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True) net_teacher = DDP(net_teacher, device_ids=[settings.local_rank], find_unused_parameters=True) settings.device = torch.device("cuda:%d" % settings.local_rank) else: settings.device = torch.device("cuda:0") # settings.deep_sup = getattr(cfg.TRAIN, "DEEP_SUPERVISION", False) # settings.distill = getattr(cfg.TRAIN, "DISTILL", False) settings.distill_loss_type = getattr(cfg.TRAIN, "DISTILL_LOSS_TYPE", "L1") # Loss functions and Actors if settings.script_name == "stark_lightning_X_trt": objective = {'giou': giou_loss, 'l1': l1_loss} loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT} actor = STARKLightningXtrtdistillActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, net_teacher=net_teacher) else: raise ValueError("illegal script name") # Optimizer, parameters, and learning rates optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg) use_amp = getattr(cfg.TRAIN, "AMP", False) trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp) # train process trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True, distill=True) ================================================ FILE: lib/train/trainers/__init__.py ================================================ from .base_trainer import BaseTrainer from .ltr_trainer import LTRTrainer from .ltr_seq_trainer import LTRSeqTrainer from .ltr_seq_trainer_v2 import LTRSeqTrainerV2 ================================================ FILE: lib/train/trainers/base_trainer.py ================================================ import os import glob import torch import traceback from lib.train.admin import multigpu from torch.utils.data.distributed import DistributedSampler class BaseTrainer: """Base trainer class. Contains functions for training and saving/loading checkpoints. Trainer classes should inherit from this one and overload the train_epoch function.""" def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ self.actor = actor self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.loaders = loaders self.update_settings(settings) self.epoch = 0 self.stats = {} self.device = getattr(settings, 'device', None) if self.device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() and settings.use_gpu else "cpu") self.actor.to(self.device) self.settings = settings def update_settings(self, settings=None): """Updates the trainer settings. Must be called to update internal settings.""" if settings is not None: self.settings = settings if self.settings.env.workspace_dir is not None: self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir) '''2021.1.4 New function: specify checkpoint dir''' if self.settings.save_dir is None: self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints') else: self._checkpoint_dir = os.path.join(self.settings.save_dir, 'checkpoints') print("checkpoints will be saved to %s" % self._checkpoint_dir) if self.settings.local_rank in [-1, 0]: if not os.path.exists(self._checkpoint_dir): print("Training with multiple GPUs. checkpoints directory doesn't exist. " "Create checkpoints directory") os.makedirs(self._checkpoint_dir) else: self._checkpoint_dir = None def train(self, max_epochs, load_latest=False, fail_safe=True, load_previous_ckpt=False, distill=False): """Do training for the given number of epochs. args: max_epochs - Max number of training epochs, load_latest - Bool indicating whether to resume from latest epoch. fail_safe - Bool indicating whether the training to automatically restart in case of any crashes. """ epoch = -1 num_tries = 1 for i in range(num_tries): try: if load_latest: self.load_checkpoint() if load_previous_ckpt: directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_prv) self.load_state_dict(directory) if distill: directory_teacher = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_teacher) self.load_state_dict(directory_teacher, distill=True) for epoch in range(self.epoch+1, max_epochs+1): self.epoch = epoch self.train_epoch() if self.lr_scheduler is not None: if self.settings.scheduler_type != 'cosine': self.lr_scheduler.step() else: self.lr_scheduler.step(epoch - 1) # only save the last 10 checkpoints save_every_epoch = getattr(self.settings, "save_every_epoch", False) save_epochs = [] if epoch > (max_epochs - 1) or save_every_epoch or epoch % 5 == 0 or epoch in save_epochs or epoch > (max_epochs - 5): # if epoch > (max_epochs - 10) or save_every_epoch or epoch % 100 == 0: if self._checkpoint_dir: if self.settings.local_rank in [-1, 0]: self.save_checkpoint() except: print('Training crashed at epoch {}'.format(epoch)) if fail_safe: self.epoch -= 1 load_latest = True print('Traceback for the error!') print(traceback.format_exc()) print('Restarting training from last epoch ...') else: raise print('Finished training!') def train_epoch(self): raise NotImplementedError def save_checkpoint(self): """Saves a checkpoint of the network and other variables.""" net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ state = { 'epoch': self.epoch, 'actor_type': actor_type, 'net_type': net_type, 'net': net.state_dict(), 'net_info': getattr(net, 'info', None), 'constructor': getattr(net, 'constructor', None), 'optimizer': self.optimizer.state_dict(), 'stats': self.stats, 'settings': self.settings } directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path) print(directory) if not os.path.exists(directory): print("directory doesn't exist. creating...") os.makedirs(directory) # First save as a tmp file tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch) torch.save(state, tmp_file_path) file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch) # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure os.rename(tmp_file_path, file_path) def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False): """Loads a network checkpoint file. Can be called in three different ways: load_checkpoint(): Loads the latest epoch from the workspace. Use this to continue training. load_checkpoint(epoch_num): Loads the network at the given epoch number (int). load_checkpoint(path_to_checkpoint): Loads the file from the given absolute path (str). """ net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net actor_type = type(self.actor).__name__ net_type = type(net).__name__ if checkpoint is None: # Load most recent checkpoint checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: print('No matching checkpoint file found') return elif isinstance(checkpoint, int): # Checkpoint is the epoch number checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path, net_type, checkpoint) elif isinstance(checkpoint, str): # checkpoint is the path if os.path.isdir(checkpoint): checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No checkpoint found') else: checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network print(checkpoint_path) checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') print(checkpoint_dict['net_type']) assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.' if fields is None: fields = checkpoint_dict.keys() if ignore_fields is None: ignore_fields = ['settings'] # Never load the scheduler. It exists in older checkpoints. ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info']) # Load all fields for key in fields: if key in ignore_fields: continue if key == 'net': net.load_state_dict(checkpoint_dict[key]) elif key == 'optimizer': self.optimizer.load_state_dict(checkpoint_dict[key]) else: setattr(self, key, checkpoint_dict[key]) # Set the net info if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None: net.constructor = checkpoint_dict['constructor'] if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None: net.info = checkpoint_dict['net_info'] # Update the epoch in lr scheduler if 'epoch' in fields: self.lr_scheduler.last_epoch = self.epoch # 2021.1.10 Update the epoch in data_samplers for loader in self.loaders: if isinstance(loader.sampler, DistributedSampler): loader.sampler.set_epoch(self.epoch) return True def load_state_dict(self, checkpoint=None, distill=False): """Loads a network checkpoint file. Can be called in three different ways: load_checkpoint(): Loads the latest epoch from the workspace. Use this to continue training. load_checkpoint(epoch_num): Loads the network at the given epoch number (int). load_checkpoint(path_to_checkpoint): Loads the file from the given absolute path (str). """ if distill: net = self.actor.net_teacher.module if multigpu.is_multi_gpu(self.actor.net_teacher) \ else self.actor.net_teacher else: net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net net_type = type(net).__name__ if isinstance(checkpoint, str): # checkpoint is the path if os.path.isdir(checkpoint): checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint))) if checkpoint_list: checkpoint_path = checkpoint_list[-1] else: raise Exception('No checkpoint found') else: checkpoint_path = os.path.expanduser(checkpoint) else: raise TypeError # Load network print("Loading pretrained model from ", checkpoint_path) checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.' missing_k, unexpected_k = net.load_state_dict(checkpoint_dict["net"], strict=False) print("previous checkpoint is loaded.") print("missing keys: ", missing_k) print("unexpected keys:", unexpected_k) return True ================================================ FILE: lib/train/trainers/ltr_seq_trainer.py ================================================ import os import datetime from collections import OrderedDict from torch.nn.utils import clip_grad_norm_ # from lib.train.data.wandb_logger import WandbWriter from lib.train.trainers import BaseTrainer from lib.train.admin import AverageMeter, StatValue from memory_profiler import profile # from lib.train.admin import TensorboardWriter import torch import time import numpy as np from torch.utils.data.distributed import DistributedSampler from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler from lib.utils.misc import get_world_size class LTRSeqTrainer(BaseTrainer): def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ super().__init__(actor, loaders, optimizer, settings, lr_scheduler) self._set_default_settings() # Initialize statistics variables self.stats = OrderedDict({loader.name: None for loader in self.loaders}) # Initialize tensorboard and wandb # self.wandb_writer = None # if settings.local_rank in [-1, 0]: # tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path) # if not os.path.exists(tensorboard_writer_dir): # os.makedirs(tensorboard_writer_dir) # self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders]) # if settings.use_wandb: # world_size = get_world_size() # cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1) # interval = (world_size * settings.batchsize) # * interval # self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval) self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True) print("move_data", self.move_data_to_gpu) self.settings = settings self.use_amp = use_amp if use_amp: self.scaler = GradScaler() def _set_default_settings(self): # Dict of all default values default = {'print_interval': 10, 'print_stats': None, 'description': ''} for param, default_value in default.items(): if getattr(self.settings, param, None) is None: setattr(self.settings, param, default_value) self.miou_list = [] def cycle_dataset(self, loader): """Do a cycle of training or validation.""" torch.autograd.set_detect_anomaly(True) self.actor.train(loader.training) torch.set_grad_enabled(loader.training) self._init_timing() for i, data in enumerate(loader, 1): self.actor.eval() self.data_read_done_time = time.time() with torch.no_grad(): explore_result = self.actor.explore(data) if explore_result == None: print("this time i skip") # self._update_stats(stats, batch_size, loader) continue # get inputs # print(data) self.data_to_gpu_time = time.time() data['epoch'] = self.epoch data['settings'] = self.settings stats = {} reward_record = [] miou_record = [] e_miou_record = [] num_seq = len(data['num_frames']) # Calculate reward tensor # reward_tensor = torch.zeros(explore_result['baseline_iou'].size()) baseline_iou = explore_result['baseline_iou'] # explore_iou = explore_result['explore_iou'] for seq_idx in range(num_seq): num_frames = data['num_frames'][seq_idx] - 1 b_miou = torch.mean(baseline_iou[:num_frames, seq_idx]) # e_miou = torch.mean(explore_iou[:num_frames, seq_idx]) miou_record.append(b_miou.item()) # e_miou_record.append(e_miou.item()) b_reward = b_miou.item() # e_reward = e_miou.item() # iou_gap = e_reward - b_reward # reward_record.append(iou_gap) # reward_tensor[:num_frames, seq_idx] = iou_gap # Training mode cursor = 0 bs_backward = 1 # print(self.actor.net.module.box_head.decoder.layers[2].mlpx.fc1.weight) self.optimizer.zero_grad() while cursor < num_seq: # print("now is ", cursor , "and all is ", num_seq) model_inputs = {} model_inputs['slt_loss_weight'] = 15 if cursor < num_seq: model_inputs['template_images'] = explore_result['template_images'][ cursor:cursor + bs_backward].cuda() else: model_inputs['template_images'] = explore_result['template_images_reverse'][ cursor - num_seq:cursor - num_seq + bs_backward].cuda() model_inputs['search_images'] = explore_result['search_images'][:, cursor:cursor + bs_backward].cuda() model_inputs['search_anno'] = explore_result['search_anno'][:, cursor:cursor + bs_backward].cuda() model_inputs['pre_seq'] = explore_result['pre_seq'][:, cursor:cursor + bs_backward].cuda() model_inputs['x_feat'] = explore_result['x_feat'].squeeze(1)[:, cursor:cursor + bs_backward].cuda() model_inputs['epoch'] = data['epoch'] # model_inputs['template_update'] = explore_result['template_update'].squeeze(1)[:, # cursor:cursor + bs_backward].cuda() # print("this is cursor") # print(explore_result['pre_seq'].shape) # print(explore_result['x_feat'].squeeze(1).shape) # model_inputs['action_tensor'] = explore_result['action_tensor'][:, cursor:cursor + bs_backward].cuda() # model_inputs['reward_tensor'] = reward_tensor[:, cursor:cursor + bs_backward].cuda() loss, stats_cur = self.actor.compute_sequence_losses(model_inputs) # for name, param in self.actor.net.named_parameters(): # shape, c = (param.grad.shape, param.grad.sum()) if param.grad is not None else (None, None) # print(f'{name}: {param.shape} \n\t grad: {shape} \n\t {c}') # print("i make this!") loss.backward() # print("i made that?") for key, val in stats_cur.items(): if key in stats: stats[key] += val * (bs_backward / num_seq) else: stats[key] = val * (bs_backward / num_seq) cursor += bs_backward grad_norm = clip_grad_norm_(self.actor.net.parameters(), 100) stats['grad_norm'] = grad_norm # print(self.actor.net.module.backbone.blocks[8].mlp.fc1.weight) self.optimizer.step() # print(self.optimizer) miou = np.mean(miou_record) self.miou_list.append(miou) # stats['reward'] = np.mean(reward_record) # stats['e_mIoU'] = np.mean(e_miou_record) stats['mIoU'] = miou stats['mIoU10'] = np.mean(self.miou_list[-10:]) stats['mIoU100'] = np.mean(self.miou_list[-100:]) batch_size = num_seq * np.max(data['num_frames']) self._update_stats(stats, batch_size, loader) self._print_stats(i, loader, batch_size) torch.cuda.empty_cache() # # forward pass # if not self.use_amp: # loss, stats = self.actor(data) # else: # with autocast(): # loss, stats = self.actor(data) # # # backward pass and update weights # if loader.training: # self.optimizer.zero_grad() # if not self.use_amp: # loss.backward() # if self.settings.grad_clip_norm > 0: # torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm) # self.optimizer.step() # else: # self.scaler.scale(loss).backward() # self.scaler.step(self.optimizer) # self.scaler.update() # update statistics # batch_size = data['template_images'].shape[loader.stack_dim] # self._update_stats(stats, batch_size, loader) # print statistics # self._print_stats(i, loader, batch_size) # update wandb status # if self.wandb_writer is not None and i % self.settings.print_interval == 0: # if self.settings.local_rank in [-1, 0]: # self.wandb_writer.write_log(self.stats, self.epoch) # calculate ETA after every epoch # epoch_time = self.prev_time - self.start_time # print("Epoch Time: " + str(datetime.timedelta(seconds=epoch_time))) # print("Avg Data Time: %.5f" % (self.avg_date_time / self.num_frames * batch_size)) # print("Avg GPU Trans Time: %.5f" % (self.avg_gpu_trans_time / self.num_frames * batch_size)) # print("Avg Forward Time: %.5f" % (self.avg_forward_time / self.num_frames * batch_size)) def train_epoch(self): """Do one epoch for each loader.""" for loader in self.loaders: if self.epoch % loader.epoch_interval == 0: # 2021.1.10 Set epoch if isinstance(loader.sampler, DistributedSampler): loader.sampler.set_epoch(self.epoch) self.cycle_dataset(loader) self._stats_new_epoch() # if self.settings.local_rank in [-1, 0]: # self._write_tensorboard() def _init_timing(self): self.num_frames = 0 self.start_time = time.time() self.prev_time = self.start_time self.avg_date_time = 0 self.avg_gpu_trans_time = 0 self.avg_forward_time = 0 def _update_stats(self, new_stats: OrderedDict, batch_size, loader): # Initialize stats if not initialized yet if loader.name not in self.stats.keys() or self.stats[loader.name] is None: self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()}) # add lr state if loader.training: lr_list = self.lr_scheduler.get_last_lr() for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for name, val in new_stats.items(): if name not in self.stats[loader.name].keys(): self.stats[loader.name][name] = AverageMeter() self.stats[loader.name][name].update(val, batch_size) def _print_stats(self, i, loader, batch_size): self.num_frames += batch_size current_time = time.time() batch_fps = batch_size / (current_time - self.prev_time) average_fps = self.num_frames / (current_time - self.start_time) prev_frame_time_backup = self.prev_time self.prev_time = current_time self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup) self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time) self.avg_forward_time += current_time - self.data_to_gpu_time if i % self.settings.print_interval == 0 or i == loader.__len__(): print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()) print_str += 'FPS: %.1f (%.1f) , ' % (average_fps, batch_fps) # 2021.12.14 add data time print print_str += 'DataTime: %.3f (%.3f) , ' % ( self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size) print_str += 'ForwardTime: %.3f , ' % (self.avg_forward_time / self.num_frames * batch_size) print_str += 'TotalTime: %.3f , ' % ((current_time - self.start_time) / self.num_frames * batch_size) # print_str += 'DataTime: %.3f (%.3f) , ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time) # print_str += 'ForwardTime: %.3f , ' % (current_time - self.data_to_gpu_time) # print_str += 'TotalTime: %.3f , ' % (current_time - prev_frame_time_backup) for name, val in self.stats[loader.name].items(): if (self.settings.print_stats is None or name in self.settings.print_stats): if hasattr(val, 'avg'): print_str += '%s: %.5f , ' % (name, val.avg) # else: # print_str += '%s: %r , ' % (name, val) print(print_str[:-5]) log_str = print_str[:-5] + '\n' with open(self.settings.log_file, 'a') as f: f.write(log_str) def _stats_new_epoch(self): # Record learning rate for loader in self.loaders: if loader.training: try: lr_list = self.lr_scheduler.get_last_lr() except: lr_list = self.lr_scheduler._get_lr(self.epoch) for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for loader_stats in self.stats.values(): if loader_stats is None: continue for stat_value in loader_stats.values(): if hasattr(stat_value, 'new_epoch'): stat_value.new_epoch() # def _write_tensorboard(self): # if self.epoch == 1: # self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description) # self.tensorboard_writer.write_epoch(self.stats, self.epoch) ================================================ FILE: lib/train/trainers/ltr_seq_trainer_v2.py ================================================ import os import datetime from collections import OrderedDict from torch.nn.utils import clip_grad_norm_ # from lib.train.data.wandb_logger import WandbWriter from lib.train.trainers import BaseTrainer from lib.train.admin import AverageMeter, StatValue from memory_profiler import profile # from lib.train.admin import TensorboardWriter import torch import time import numpy as np from torch.utils.data.distributed import DistributedSampler from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler from lib.utils.misc import get_world_size class LTRSeqTrainerV2(BaseTrainer): def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ super().__init__(actor, loaders, optimizer, settings, lr_scheduler) self._set_default_settings() # Initialize statistics variables self.stats = OrderedDict({loader.name: None for loader in self.loaders}) # Initialize tensorboard and wandb # self.wandb_writer = None # if settings.local_rank in [-1, 0]: # tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path) # if not os.path.exists(tensorboard_writer_dir): # os.makedirs(tensorboard_writer_dir) # self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders]) # if settings.use_wandb: # world_size = get_world_size() # cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1) # interval = (world_size * settings.batchsize) # * interval # self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval) self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True) print("move_data", self.move_data_to_gpu) self.settings = settings self.use_amp = use_amp if use_amp: self.scaler = GradScaler() def _set_default_settings(self): # Dict of all default values default = {'print_interval': 10, 'print_stats': None, 'description': ''} for param, default_value in default.items(): if getattr(self.settings, param, None) is None: setattr(self.settings, param, default_value) self.miou_list = [] def cycle_dataset(self, loader): """Do a cycle of training or validation.""" torch.autograd.set_detect_anomaly(True) self.actor.train(loader.training) torch.set_grad_enabled(loader.training) self._init_timing() for i, data in enumerate(loader, 1): self.actor.eval() self.data_read_done_time = time.time() with torch.no_grad(): explore_result = self.actor.explore(data) if explore_result == None: print("this time i skip") continue # get inputs # print(data) self.data_to_gpu_time = time.time() data['epoch'] = self.epoch data['settings'] = self.settings stats = {} reward_record = [] miou_record = [] e_miou_record = [] num_seq = len(data['num_frames']) # Calculate reward tensor # reward_tensor = torch.zeros(explore_result['baseline_iou'].size()) baseline_iou = explore_result['baseline_iou'] # explore_iou = explore_result['explore_iou'] for seq_idx in range(num_seq): num_frames = data['num_frames'][seq_idx] - 1 b_miou = torch.mean(baseline_iou[:num_frames, seq_idx]) # e_miou = torch.mean(explore_iou[:num_frames, seq_idx]) miou_record.append(b_miou.item()) # e_miou_record.append(e_miou.item()) b_reward = b_miou.item() # e_reward = e_miou.item() # iou_gap = e_reward - b_reward # reward_record.append(iou_gap) # reward_tensor[:num_frames, seq_idx] = iou_gap # Training mode cursor = 0 bs_backward = 1 # print(self.actor.net.module.box_head.decoder.layers[2].mlpx.fc1.weight) self.optimizer.zero_grad() self.actor.train() while cursor < num_seq * 2: model_inputs = {} model_inputs['slt_loss_weight'] = 15 model_inputs['search_images'] = explore_result['search_images'][:, cursor:cursor + bs_backward].cuda() model_inputs['search_anno'] = explore_result['search_anno'][:, cursor:cursor + bs_backward].cuda() model_inputs['pre_seq'] = explore_result['pre_seq'][:, cursor:cursor + bs_backward].cuda() model_inputs['x_feat'] = explore_result['x_feat'].squeeze(1)[:, cursor:cursor + bs_backward].cuda() model_inputs['template_images_z0'] = explore_result['template_images_z0'][:, cursor:cursor + bs_backward].cuda() model_inputs['dz_feat_update'] = explore_result['dz_feat_update'][:, cursor:cursor + bs_backward].cuda() model_inputs['target_in_search'] = explore_result['target_in_search'][:, cursor:cursor + bs_backward].cuda() model_inputs['epoch'] = self.epoch loss, stats_cur = self.actor.compute_sequence_losses(model_inputs) loss.backward() for key, val in stats_cur.items(): if key in stats: stats[key] += val * (bs_backward / num_seq) else: stats[key] = val * (bs_backward / num_seq) cursor += bs_backward grad_norm = clip_grad_norm_(self.actor.net.parameters(), 100) stats['grad_norm'] = grad_norm self.optimizer.step() miou = np.mean(miou_record) self.miou_list.append(miou) stats['mIoU'] = miou stats['mIoU10'] = np.mean(self.miou_list[-10:]) stats['mIoU100'] = np.mean(self.miou_list[-100:]) batch_size = num_seq * np.max(data['num_frames']) self._update_stats(stats, batch_size, loader) self._print_stats(i, loader, batch_size) torch.cuda.empty_cache() def train_epoch(self): """Do one epoch for each loader.""" for loader in self.loaders: if self.epoch % loader.epoch_interval == 0: # 2021.1.10 Set epoch if isinstance(loader.sampler, DistributedSampler): loader.sampler.set_epoch(self.epoch) self.cycle_dataset(loader) self._stats_new_epoch() # if self.settings.local_rank in [-1, 0]: # self._write_tensorboard() def _init_timing(self): self.num_frames = 0 self.start_time = time.time() self.prev_time = self.start_time self.avg_date_time = 0 self.avg_gpu_trans_time = 0 self.avg_forward_time = 0 def _update_stats(self, new_stats: OrderedDict, batch_size, loader): # Initialize stats if not initialized yet if loader.name not in self.stats.keys() or self.stats[loader.name] is None: self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()}) # add lr state if loader.training: lr_list = self.lr_scheduler.get_last_lr() for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for name, val in new_stats.items(): if name not in self.stats[loader.name].keys(): self.stats[loader.name][name] = AverageMeter() self.stats[loader.name][name].update(val, batch_size) def _print_stats(self, i, loader, batch_size): self.num_frames += batch_size current_time = time.time() batch_fps = batch_size / (current_time - self.prev_time) average_fps = self.num_frames / (current_time - self.start_time) prev_frame_time_backup = self.prev_time self.prev_time = current_time self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup) self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time) self.avg_forward_time += current_time - self.data_to_gpu_time if i % self.settings.print_interval == 0 or i == loader.__len__(): print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()) print_str += 'FPS: %.1f (%.1f) , ' % (average_fps, batch_fps) # 2021.12.14 add data time print print_str += 'DataTime: %.3f (%.3f) , ' % ( self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size) print_str += 'ForwardTime: %.3f , ' % (self.avg_forward_time / self.num_frames * batch_size) print_str += 'TotalTime: %.3f , ' % ((current_time - self.start_time) / self.num_frames * batch_size) # print_str += 'DataTime: %.3f (%.3f) , ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time) # print_str += 'ForwardTime: %.3f , ' % (current_time - self.data_to_gpu_time) # print_str += 'TotalTime: %.3f , ' % (current_time - prev_frame_time_backup) for name, val in self.stats[loader.name].items(): if (self.settings.print_stats is None or name in self.settings.print_stats): if hasattr(val, 'avg'): print_str += '%s: %.5f , ' % (name, val.avg) # else: # print_str += '%s: %r , ' % (name, val) print(print_str[:-5]) log_str = print_str[:-5] + '\n' with open(self.settings.log_file, 'a') as f: f.write(log_str) def _stats_new_epoch(self): # Record learning rate for loader in self.loaders: if loader.training: try: lr_list = self.lr_scheduler.get_last_lr() except: lr_list = self.lr_scheduler._get_lr(self.epoch) for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for loader_stats in self.stats.values(): if loader_stats is None: continue for stat_value in loader_stats.values(): if hasattr(stat_value, 'new_epoch'): stat_value.new_epoch() # def _write_tensorboard(self): # if self.epoch == 1: # self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description) # self.tensorboard_writer.write_epoch(self.stats, self.epoch) ================================================ FILE: lib/train/trainers/ltr_trainer.py ================================================ import os import datetime from collections import OrderedDict #from lib.train.data.wandb_logger import WandbWriter from lib.train.trainers import BaseTrainer from lib.train.admin import AverageMeter, StatValue #from lib.train.admin import TensorboardWriter import torch import time from torch.utils.data.distributed import DistributedSampler from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler from lib.utils.misc import get_world_size class LTRTrainer(BaseTrainer): def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False): """ args: actor - The actor for training the network loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one epoch for each loader. optimizer - The optimizer used for training, e.g. Adam settings - Training settings lr_scheduler - Learning rate scheduler """ super().__init__(actor, loaders, optimizer, settings, lr_scheduler) self._set_default_settings() # Initialize statistics variables self.stats = OrderedDict({loader.name: None for loader in self.loaders}) # Initialize tensorboard and wandb #self.wandb_writer = None #if settings.local_rank in [-1, 0]: # tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path) # if not os.path.exists(tensorboard_writer_dir): # os.makedirs(tensorboard_writer_dir) # self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders]) # if settings.use_wandb: # world_size = get_world_size() # cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1) # interval = (world_size * settings.batchsize) # * interval # self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval) self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True) print("move_data", self.move_data_to_gpu) self.settings = settings self.use_amp = use_amp if use_amp: self.scaler = GradScaler() def _set_default_settings(self): # Dict of all default values default = {'print_interval': 10, 'print_stats': None, 'description': ''} for param, default_value in default.items(): if getattr(self.settings, param, None) is None: setattr(self.settings, param, default_value) def cycle_dataset(self, loader): """Do a cycle of training or validation.""" self.actor.train(loader.training) torch.set_grad_enabled(loader.training) self._init_timing() for i, data in enumerate(loader, 1): self.data_read_done_time = time.time() # get inputs if self.move_data_to_gpu: data = data.to(self.device) self.data_to_gpu_time = time.time() data['epoch'] = self.epoch data['settings'] = self.settings # forward pass if not self.use_amp: loss, stats = self.actor(data) else: with autocast(): loss, stats = self.actor(data) # backward pass and update weights if loader.training: self.optimizer.zero_grad() if not self.use_amp: loss.backward() if self.settings.grad_clip_norm > 0: torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm) self.optimizer.step() else: self.scaler.scale(loss).backward() self.scaler.step(self.optimizer) self.scaler.update() # update statistics batch_size = data['template_images'].shape[loader.stack_dim] self._update_stats(stats, batch_size, loader) # print statistics self._print_stats(i, loader, batch_size) # update wandb status #if self.wandb_writer is not None and i % self.settings.print_interval == 0: # if self.settings.local_rank in [-1, 0]: # self.wandb_writer.write_log(self.stats, self.epoch) # calculate ETA after every epoch epoch_time = self.prev_time - self.start_time print("Epoch Time: " + str(datetime.timedelta(seconds=epoch_time))) print("Avg Data Time: %.5f" % (self.avg_date_time / self.num_frames * batch_size)) print("Avg GPU Trans Time: %.5f" % (self.avg_gpu_trans_time / self.num_frames * batch_size)) print("Avg Forward Time: %.5f" % (self.avg_forward_time / self.num_frames * batch_size)) def train_epoch(self): """Do one epoch for each loader.""" for loader in self.loaders: if self.epoch % loader.epoch_interval == 0: # 2021.1.10 Set epoch if isinstance(loader.sampler, DistributedSampler): loader.sampler.set_epoch(self.epoch) self.cycle_dataset(loader) self._stats_new_epoch() #if self.settings.local_rank in [-1, 0]: # self._write_tensorboard() def _init_timing(self): self.num_frames = 0 self.start_time = time.time() self.prev_time = self.start_time self.avg_date_time = 0 self.avg_gpu_trans_time = 0 self.avg_forward_time = 0 def _update_stats(self, new_stats: OrderedDict, batch_size, loader): # Initialize stats if not initialized yet if loader.name not in self.stats.keys() or self.stats[loader.name] is None: self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()}) # add lr state if loader.training: lr_list = self.lr_scheduler.get_last_lr() for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for name, val in new_stats.items(): if name not in self.stats[loader.name].keys(): self.stats[loader.name][name] = AverageMeter() self.stats[loader.name][name].update(val, batch_size) def _print_stats(self, i, loader, batch_size): self.num_frames += batch_size current_time = time.time() batch_fps = batch_size / (current_time - self.prev_time) average_fps = self.num_frames / (current_time - self.start_time) prev_frame_time_backup = self.prev_time self.prev_time = current_time self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup) self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time) self.avg_forward_time += current_time - self.data_to_gpu_time if i % self.settings.print_interval == 0 or i == loader.__len__(): print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()) print_str += 'FPS: %.1f (%.1f) , ' % (average_fps, batch_fps) # 2021.12.14 add data time print print_str += 'DataTime: %.3f (%.3f) , ' % (self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size) print_str += 'ForwardTime: %.3f , ' % (self.avg_forward_time / self.num_frames * batch_size) print_str += 'TotalTime: %.3f , ' % ((current_time - self.start_time) / self.num_frames * batch_size) # print_str += 'DataTime: %.3f (%.3f) , ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time) # print_str += 'ForwardTime: %.3f , ' % (current_time - self.data_to_gpu_time) # print_str += 'TotalTime: %.3f , ' % (current_time - prev_frame_time_backup) for name, val in self.stats[loader.name].items(): if (self.settings.print_stats is None or name in self.settings.print_stats): if hasattr(val, 'avg'): print_str += '%s: %.5f , ' % (name, val.avg) # else: # print_str += '%s: %r , ' % (name, val) print(print_str[:-5]) log_str = print_str[:-5] + '\n' with open(self.settings.log_file, 'a') as f: f.write(log_str) def _stats_new_epoch(self): # Record learning rate for loader in self.loaders: if loader.training: try: lr_list = self.lr_scheduler.get_last_lr() except: lr_list = self.lr_scheduler._get_lr(self.epoch) for i, lr in enumerate(lr_list): var_name = 'LearningRate/group{}'.format(i) if var_name not in self.stats[loader.name].keys(): self.stats[loader.name][var_name] = StatValue() self.stats[loader.name][var_name].update(lr) for loader_stats in self.stats.values(): if loader_stats is None: continue for stat_value in loader_stats.values(): if hasattr(stat_value, 'new_epoch'): stat_value.new_epoch() #def _write_tensorboard(self): # if self.epoch == 1: # self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description) # self.tensorboard_writer.write_epoch(self.stats, self.epoch) ================================================ FILE: lib/utils/__init__.py ================================================ from .tensor import TensorDict, TensorList ================================================ FILE: lib/utils/box_ops.py ================================================ import torch from torchvision.ops.boxes import box_area import numpy as np def box_xywh_to_cxywh(x): x1, y1, w, h = x.unbind(-1) b = [x1+0.5*w, y1+0.5*h, w, h] return torch.stack(b, dim=-1) def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) def box_xywh_to_xyxy(x): x1, y1, w, h = x.unbind(-1) b = [x1, y1, x1 + w, y1 + h] return torch.stack(b, dim=-1) def box_xyxy_to_xywh(x): x1, y1, x2, y2 = x.unbind(-1) b = [x1, y1, x2 - x1, y2 - y1] return torch.stack(b, dim=-1) def box_xyxy_to_cxcywh(x): x0, y0, x1, y1 = x.unbind(-1) b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return torch.stack(b, dim=-1) # modified from torchvision to also return the union '''Note that this function only supports shape (N,4)''' def box_iou(boxes1, boxes2): """ :param boxes1: (N, 4) (x1,y1,x2,y2) :param boxes2: (N, 4) (x1,y1,x2,y2) :return: """ area1 = box_area(boxes1) # (N,) area2 = box_area(boxes2) # (N,) lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # (N,2) rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # (N,2) wh = (rb - lt).clamp(min=0) # (N,2) inter = wh[:, 0] * wh[:, 1] # (N,) union = area1 + area2 - inter iou = inter / union return iou, union '''Note that this implementation is different from DETR's''' def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format boxes1: (N, 4) boxes2: (N, 4) """ # degenerate boxes gives inf / nan results # so do an early check # try: #assert (boxes1[:, 2:] >= boxes1[:, :2]).all() # assert (boxes2[:, 2:] >= boxes2[:, :2]).all() iou, union = box_iou(boxes1, boxes2) # (N,) lt = torch.min(boxes1[:, :2], boxes2[:, :2]) rb = torch.max(boxes1[:, 2:], boxes2[:, 2:]) wh = (rb - lt).clamp(min=0) # (N,2) area = wh[:, 0] * wh[:, 1] # (N,) return iou - (area - union) / area, iou def giou_loss(boxes1, boxes2): """ :param boxes1: (N, 4) (x1,y1,x2,y2) :param boxes2: (N, 4) (x1,y1,x2,y2) :return: """ giou, iou = generalized_box_iou(boxes1, boxes2) return (1 - giou).mean(), iou def clip_box(box: list, H, W, margin=0): x1, y1, w, h = box x2, y2 = x1 + w, y1 + h x1 = min(max(0, x1), W-margin) x2 = min(max(margin, x2), W) y1 = min(max(0, y1), H-margin) y2 = min(max(margin, y2), H) w = max(margin, x2-x1) h = max(margin, y2-y1) return [x1, y1, w, h] ================================================ FILE: lib/utils/ce_utils.py ================================================ import math import torch import torch.nn.functional as F def generate_bbox_mask(bbox_mask, bbox): b, h, w = bbox_mask.shape for i in range(b): bbox_i = bbox[i].cpu().tolist() bbox_mask[i, int(bbox_i[1]):int(bbox_i[1] + bbox_i[3] - 1), int(bbox_i[0]):int(bbox_i[0] + bbox_i[2] - 1)] = 1 return bbox_mask def generate_mask_cond(cfg, bs, device, gt_bbox): template_size = cfg.DATA.TEMPLATE.SIZE stride = cfg.MODEL.BACKBONE.STRIDE template_feat_size = template_size // stride if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL': box_mask_z = None elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT': if template_feat_size == 8: index = slice(3, 4) elif template_feat_size == 12: index = slice(5, 6) elif template_feat_size == 7: index = slice(3, 4) elif template_feat_size == 14: index = slice(6, 7) else: raise NotImplementedError box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device) box_mask_z[:, index, index] = 1 box_mask_z = box_mask_z.flatten(1).to(torch.bool) elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC': # use fixed 4x4 region, 3:5 for 8x8 # use fixed 4x4 region 5:6 for 12x12 if template_feat_size == 8: index = slice(3, 5) elif template_feat_size == 12: index = slice(5, 7) elif template_feat_size == 7: index = slice(3, 4) else: raise NotImplementedError box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device) box_mask_z[:, index, index] = 1 box_mask_z = box_mask_z.flatten(1).to(torch.bool) elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX': box_mask_z = torch.zeros([bs, template_size, template_size], device=device) # box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:]) # (batch, 1, 128, 128) box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to( torch.float) # (batch, 1, 128, 128) # box_mask_z_vis = box_mask_z.cpu().numpy() box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear', align_corners=False) box_mask_z = box_mask_z.flatten(1).to(torch.bool) # box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy() # gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy() else: raise NotImplementedError return box_mask_z def adjust_keep_rate(epoch, warmup_epochs, total_epochs, ITERS_PER_EPOCH, base_keep_rate=0.5, max_keep_rate=1, iters=-1): if epoch < warmup_epochs: return 1 if epoch >= total_epochs: return base_keep_rate if iters == -1: iters = epoch * ITERS_PER_EPOCH total_iters = ITERS_PER_EPOCH * (total_epochs - warmup_epochs) iters = iters - ITERS_PER_EPOCH * warmup_epochs keep_rate = base_keep_rate + (max_keep_rate - base_keep_rate) \ * (math.cos(iters / total_iters * math.pi) + 1) * 0.5 return keep_rate ================================================ FILE: lib/utils/focal_loss.py ================================================ from abc import ABC import torch import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.Module, ABC): def __init__(self, alpha=2, beta=4): super(FocalLoss, self).__init__() self.alpha = alpha self.beta = beta def forward(self, prediction, target): positive_index = target.eq(1).float() negative_index = target.lt(1).float() negative_weights = torch.pow(1 - target, self.beta) # clamp min value is set to 1e-12 to maintain the numerical stability prediction = torch.clamp(prediction, 1e-12) positive_loss = torch.log(prediction) * torch.pow(1 - prediction, self.alpha) * positive_index negative_loss = torch.log(1 - prediction) * torch.pow(prediction, self.alpha) * negative_weights * negative_index num_positive = positive_index.float().sum() positive_loss = positive_loss.sum() negative_loss = negative_loss.sum() if num_positive == 0: loss = -negative_loss else: loss = -(positive_loss + negative_loss) / num_positive return loss class LBHinge(nn.Module): """Loss that uses a 'hinge' on the lower bound. This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is also smaller than that threshold. args: error_matric: What base loss to use (MSE by default). threshold: Threshold to use for the hinge. clip: Clip the loss if it is above this value. """ def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None): super().__init__() self.error_metric = error_metric self.threshold = threshold if threshold is not None else -100 self.clip = clip def forward(self, prediction, label, target_bb=None): negative_mask = (label < self.threshold).float() positive_mask = (1.0 - negative_mask) prediction = negative_mask * F.relu(prediction) + positive_mask * prediction loss = self.error_metric(prediction, positive_mask * label) if self.clip is not None: loss = torch.min(loss, torch.tensor([self.clip], device=loss.device)) return loss ================================================ FILE: lib/utils/heapmap_utils.py ================================================ import numpy as np import torch def generate_heatmap(bboxes, patch_size=320, stride=16): """ Generate ground truth heatmap same as CenterNet Args: bboxes (torch.Tensor): shape of [num_search, bs, 4] Returns: gaussian_maps: list of generated heatmap """ gaussian_maps = [] heatmap_size = patch_size // stride for single_patch_bboxes in bboxes: bs = single_patch_bboxes.shape[0] gt_scoremap = torch.zeros(bs, heatmap_size, heatmap_size) classes = torch.arange(bs).to(torch.long) bbox = single_patch_bboxes * heatmap_size wh = bbox[:, 2:] centers_int = (bbox[:, :2] + wh / 2).round() CenterNetHeatMap.generate_score_map(gt_scoremap, classes, wh, centers_int, 0.7) gaussian_maps.append(gt_scoremap.to(bbox.device)) return gaussian_maps class CenterNetHeatMap(object): @staticmethod def generate_score_map(fmap, gt_class, gt_wh, centers_int, min_overlap): radius = CenterNetHeatMap.get_gaussian_radius(gt_wh, min_overlap) radius = torch.clamp_min(radius, 0) radius = radius.type(torch.int).cpu().numpy() for i in range(gt_class.shape[0]): channel_index = gt_class[i] CenterNetHeatMap.draw_gaussian(fmap[channel_index], centers_int[i], radius[i]) @staticmethod def get_gaussian_radius(box_size, min_overlap): """ copyed from CornerNet box_size (w, h), it could be a torch.Tensor, numpy.ndarray, list or tuple notice: we are using a bug-version, please refer to fix bug version in CornerNet """ # box_tensor = torch.Tensor(box_size) box_tensor = box_size width, height = box_tensor[..., 0], box_tensor[..., 1] a1 = 1 b1 = height + width c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = torch.sqrt(b1 ** 2 - 4 * a1 * c1) r1 = (b1 + sq1) / 2 a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = torch.sqrt(b2 ** 2 - 4 * a2 * c2) r2 = (b2 + sq2) / 2 a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = torch.sqrt(b3 ** 2 - 4 * a3 * c3) r3 = (b3 + sq3) / 2 return torch.min(r1, torch.min(r2, r3)) @staticmethod def gaussian2D(radius, sigma=1): # m, n = [(s - 1.) / 2. for s in shape] m, n = radius y, x = np.ogrid[-m: m + 1, -n: n + 1] gauss = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) gauss[gauss < np.finfo(gauss.dtype).eps * gauss.max()] = 0 return gauss @staticmethod def draw_gaussian(fmap, center, radius, k=1): diameter = 2 * radius + 1 gaussian = CenterNetHeatMap.gaussian2D((radius, radius), sigma=diameter / 6) gaussian = torch.Tensor(gaussian) x, y = int(center[0]), int(center[1]) height, width = fmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_fmap = fmap[y - top: y + bottom, x - left: x + right] masked_gaussian = gaussian[radius - top: radius + bottom, radius - left: radius + right] if min(masked_gaussian.shape) > 0 and min(masked_fmap.shape) > 0: masked_fmap = torch.max(masked_fmap, masked_gaussian * k) fmap[y - top: y + bottom, x - left: x + right] = masked_fmap # return fmap def compute_grids(features, strides): """ grids regret to the input image size """ grids = [] for level, feature in enumerate(features): h, w = feature.size()[-2:] shifts_x = torch.arange( 0, w * strides[level], step=strides[level], dtype=torch.float32, device=feature.device) shifts_y = torch.arange( 0, h * strides[level], step=strides[level], dtype=torch.float32, device=feature.device) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \ strides[level] // 2 grids.append(grids_per_level) return grids def get_center3x3(locations, centers, strides, range=3): ''' Inputs: locations: M x 2 centers: N x 2 strides: M ''' range = (range - 1) / 2 M, N = locations.shape[0], centers.shape[0] locations_expanded = locations.view(M, 1, 2).expand(M, N, 2) # M x N x 2 centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2 strides_expanded = strides.view(M, 1, 1).expand(M, N, 2) # M x N centers_discret = ((centers_expanded / strides_expanded).int() * strides_expanded).float() + \ strides_expanded / 2 # M x N x 2 dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs() dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs() return (dist_x <= strides_expanded[:, :, 0] * range) & \ (dist_y <= strides_expanded[:, :, 0] * range) def get_pred(score_map_ctr, size_map, offset_map, feat_size): max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True) idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1) size = size_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1) return size * feat_size, offset ================================================ FILE: lib/utils/image.py ================================================ ================================================ FILE: lib/utils/lmdb_utils.py ================================================ import lmdb import numpy as np import cv2 import json LMDB_ENVS = dict() LMDB_HANDLES = dict() LMDB_FILELISTS = dict() def get_lmdb_handle(name): global LMDB_HANDLES, LMDB_FILELISTS item = LMDB_HANDLES.get(name, None) if item is None: env = lmdb.open(name, readonly=True, lock=False, readahead=False, meminit=False) LMDB_ENVS[name] = env item = env.begin(write=False) LMDB_HANDLES[name] = item return item def decode_img(lmdb_fname, key_name): handle = get_lmdb_handle(lmdb_fname) binfile = handle.get(key_name.encode()) if binfile is None: print("Illegal data detected. %s %s" % (lmdb_fname, key_name)) s = np.frombuffer(binfile, np.uint8) x = cv2.cvtColor(cv2.imdecode(s, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB) return x def decode_str(lmdb_fname, key_name): handle = get_lmdb_handle(lmdb_fname) binfile = handle.get(key_name.encode()) string = binfile.decode() return string def decode_json(lmdb_fname, key_name): return json.loads(decode_str(lmdb_fname, key_name)) if __name__ == "__main__": lmdb_fname = "/data/sda/v-yanbi/iccv21/LittleBoy_clean/data/got10k_lmdb" '''Decode image''' # key_name = "test/GOT-10k_Test_000001/00000001.jpg" # img = decode_img(lmdb_fname, key_name) # cv2.imwrite("001.jpg", img) '''Decode str''' # key_name = "test/list.txt" # key_name = "train/GOT-10k_Train_000001/groundtruth.txt" key_name = "train/GOT-10k_Train_000001/absence.label" str_ = decode_str(lmdb_fname, key_name) print(str_) ================================================ FILE: lib/utils/merge.py ================================================ import torch def merge_template_search(inp_list, return_search=False, return_template=False): """NOTICE: search region related features must be in the last place""" seq_dict = {"feat": torch.cat([x["feat"] for x in inp_list], dim=0), "mask": torch.cat([x["mask"] for x in inp_list], dim=1), "pos": torch.cat([x["pos"] for x in inp_list], dim=0)} if return_search: x = inp_list[-1] seq_dict.update({"feat_x": x["feat"], "mask_x": x["mask"], "pos_x": x["pos"]}) if return_template: z = inp_list[0] seq_dict.update({"feat_z": z["feat"], "mask_z": z["mask"], "pos_z": z["pos"]}) return seq_dict def get_qkv(inp_list): """The 1st element of the inp_list is about the template, the 2nd (the last) element is about the search region""" dict_x = inp_list[-1] dict_c = {"feat": torch.cat([x["feat"] for x in inp_list], dim=0), "mask": torch.cat([x["mask"] for x in inp_list], dim=1), "pos": torch.cat([x["pos"] for x in inp_list], dim=0)} # concatenated dict q = dict_x["feat"] + dict_x["pos"] k = dict_c["feat"] + dict_c["pos"] v = dict_c["feat"] key_padding_mask = dict_c["mask"] return q, k, v, key_padding_mask ================================================ FILE: lib/utils/misc.py ================================================ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Misc functions, including distributed helpers. Mostly copy-paste from torchvision references. """ import os import subprocess import time from collections import defaultdict, deque import datetime import pickle from typing import Optional, List import torch import torch.distributed as dist from torch import Tensor # needed due to empty tensor bug in pytorch and torchvision 0.5 import torchvision vers = torchvision.__version__.split('.') if int(vers[0]) <= 0 and int(vers[1]) < 7: from torchvision.ops import _new_empty_tensor from torchvision.ops.misc import _output_size class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.tensor([tensor.numel()], device="cuda") size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) if local_size != max_size: padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def reduce_dict(input_dict, average=True): """ Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.all_reduce(values) if average: values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' if torch.cuda.is_available(): log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}', 'max mem: {memory:.0f}' ]) else: log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ]) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len(iterable))) def get_sha(): cwd = os.path.dirname(os.path.abspath(__file__)) def _run(command): return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() sha = 'N/A' diff = "clean" branch = 'N/A' try: sha = _run(['git', 'rev-parse', 'HEAD']) subprocess.check_output(['git', 'diff'], cwd=cwd) diff = _run(['git', 'diff-index', 'HEAD']) diff = "has uncommited changes" if diff else "clean" branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) except Exception: pass message = f"sha: {sha}, status: {diff}, branch: {branch}" return message def collate_fn(batch): batch = list(zip(*batch)) batch[0] = nested_tensor_from_tensor_list(batch[0]) return tuple(batch) def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] # get the first one for sublist in the_list[1:]: # [h,w,3] for index, item in enumerate(sublist): # index: 0,1,2 maxes[index] = max(maxes[index], item) # compare current max with the other elements in the whole return maxes class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): # type: (Device) -> NestedTensor # noqa cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: assert mask is not None cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): # TODO make this more general if tensor_list[0].ndim == 3: if torchvision._is_tracing(): # nested_tensor_from_tensor_list() does not export well to ONNX # call _onnx_nested_tensor_from_tensor_list() instead return _onnx_nested_tensor_from_tensor_list(tensor_list) # TODO make it support different-sized images max_size = _max_by_axis([list(img.shape) for img in tensor_list]) # [[3,h1,w1], [3,h2,w2], [3,h3,w3], ...] # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) batch_shape = [len(tensor_list)] + max_size # () b, c, h, w = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((b, h, w), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # copy valid regions of the images to the largest padded base. m[: img.shape[1], :img.shape[2]] = False else: raise ValueError('not supported') return NestedTensor(tensor, mask) # _onnx_nested_tensor_from_tensor_list() is an implementation of # nested_tensor_from_tensor_list() that is supported by ONNX tracing. @torch.jit.unused def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: max_size = [] for i in range(tensor_list[0].dim()): max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) max_size.append(max_size_i) max_size = tuple(max_size) # work around for # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # m[: img.shape[1], :img.shape[2]] = False # which is not yet supported in onnx padded_imgs = [] padded_masks = [] for img in tensor_list: padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) padded_imgs.append(padded_img) m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) padded_masks.append(padded_mask.to(torch.bool)) tensor = torch.stack(padded_imgs) mask = torch.stack(padded_masks) return NestedTensor(tensor, mask=mask) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0) @torch.no_grad() def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" if target.numel() == 0: return [torch.zeros([], device=output.device)] maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor """ Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away. """ if float(torchvision.__version__[:3]) < 0.7: if input.numel() > 0: return torch.nn.functional.interpolate( input, size, scale_factor, mode, align_corners ) output_shape = _output_size(2, input, size, scale_factor) output_shape = list(input.shape[:-2]) + list(output_shape) return _new_empty_tensor(input, output_shape) else: return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) ================================================ FILE: lib/utils/tensor.py ================================================ import functools import torch import copy from collections import OrderedDict class TensorDict(OrderedDict): """Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.""" def concat(self, other): """Concatenates two dicts without copying internal data.""" return TensorDict(self, **other) def copy(self): return TensorDict(super(TensorDict, self).copy()) def __deepcopy__(self, memodict={}): return TensorDict(copy.deepcopy(list(self), memodict)) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorDict\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()}) return apply_attr def attribute(self, attr: str, *args): return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()}) def apply(self, fn, *args, **kwargs): return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()}) @staticmethod def _iterable(a): return isinstance(a, (TensorDict, list)) class TensorList(list): """Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.""" def __init__(self, list_of_tensors = None): if list_of_tensors is None: list_of_tensors = list() super(TensorList, self).__init__(list_of_tensors) def __deepcopy__(self, memodict={}): return TensorList(copy.deepcopy(list(self), memodict)) def __getitem__(self, item): if isinstance(item, int): return super(TensorList, self).__getitem__(item) elif isinstance(item, (tuple, list)): return TensorList([super(TensorList, self).__getitem__(i) for i in item]) else: return TensorList(super(TensorList, self).__getitem__(item)) def __add__(self, other): if TensorList._iterable(other): return TensorList([e1 + e2 for e1, e2 in zip(self, other)]) return TensorList([e + other for e in self]) def __radd__(self, other): if TensorList._iterable(other): return TensorList([e2 + e1 for e1, e2 in zip(self, other)]) return TensorList([other + e for e in self]) def __iadd__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] += e2 else: for i in range(len(self)): self[i] += other return self def __sub__(self, other): if TensorList._iterable(other): return TensorList([e1 - e2 for e1, e2 in zip(self, other)]) return TensorList([e - other for e in self]) def __rsub__(self, other): if TensorList._iterable(other): return TensorList([e2 - e1 for e1, e2 in zip(self, other)]) return TensorList([other - e for e in self]) def __isub__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] -= e2 else: for i in range(len(self)): self[i] -= other return self def __mul__(self, other): if TensorList._iterable(other): return TensorList([e1 * e2 for e1, e2 in zip(self, other)]) return TensorList([e * other for e in self]) def __rmul__(self, other): if TensorList._iterable(other): return TensorList([e2 * e1 for e1, e2 in zip(self, other)]) return TensorList([other * e for e in self]) def __imul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] *= e2 else: for i in range(len(self)): self[i] *= other return self def __truediv__(self, other): if TensorList._iterable(other): return TensorList([e1 / e2 for e1, e2 in zip(self, other)]) return TensorList([e / other for e in self]) def __rtruediv__(self, other): if TensorList._iterable(other): return TensorList([e2 / e1 for e1, e2 in zip(self, other)]) return TensorList([other / e for e in self]) def __itruediv__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] /= e2 else: for i in range(len(self)): self[i] /= other return self def __matmul__(self, other): if TensorList._iterable(other): return TensorList([e1 @ e2 for e1, e2 in zip(self, other)]) return TensorList([e @ other for e in self]) def __rmatmul__(self, other): if TensorList._iterable(other): return TensorList([e2 @ e1 for e1, e2 in zip(self, other)]) return TensorList([other @ e for e in self]) def __imatmul__(self, other): if TensorList._iterable(other): for i, e2 in enumerate(other): self[i] @= e2 else: for i in range(len(self)): self[i] @= other return self def __mod__(self, other): if TensorList._iterable(other): return TensorList([e1 % e2 for e1, e2 in zip(self, other)]) return TensorList([e % other for e in self]) def __rmod__(self, other): if TensorList._iterable(other): return TensorList([e2 % e1 for e1, e2 in zip(self, other)]) return TensorList([other % e for e in self]) def __pos__(self): return TensorList([+e for e in self]) def __neg__(self): return TensorList([-e for e in self]) def __le__(self, other): if TensorList._iterable(other): return TensorList([e1 <= e2 for e1, e2 in zip(self, other)]) return TensorList([e <= other for e in self]) def __ge__(self, other): if TensorList._iterable(other): return TensorList([e1 >= e2 for e1, e2 in zip(self, other)]) return TensorList([e >= other for e in self]) def concat(self, other): return TensorList(super(TensorList, self).__add__(other)) def copy(self): return TensorList(super(TensorList, self).copy()) def unroll(self): if not any(isinstance(t, TensorList) for t in self): return self new_list = TensorList() for t in self: if isinstance(t, TensorList): new_list.extend(t.unroll()) else: new_list.append(t) return new_list def list(self): return list(self) def attribute(self, attr: str, *args): return TensorList([getattr(e, attr, *args) for e in self]) def apply(self, fn): return TensorList([fn(e) for e in self]) def __getattr__(self, name): if not hasattr(torch.Tensor, name): raise AttributeError('\'TensorList\' object has not attribute \'{}\''.format(name)) def apply_attr(*args, **kwargs): return TensorList([getattr(e, name)(*args, **kwargs) for e in self]) return apply_attr @staticmethod def _iterable(a): return isinstance(a, (TensorList, list)) def tensor_operation(op): def islist(a): return isinstance(a, TensorList) @functools.wraps(op) def oplist(*args, **kwargs): if len(args) == 0: raise ValueError('Must be at least one argument without keyword (i.e. operand).') if len(args) == 1: if islist(args[0]): return TensorList([op(a, **kwargs) for a in args[0]]) else: # Multiple operands, assume max two if islist(args[0]) and islist(args[1]): return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])]) if islist(args[0]): return TensorList([op(a, *args[1:], **kwargs) for a in args[0]]) if islist(args[1]): return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]]) # None of the operands are lists return op(*args, **kwargs) return oplist ================================================ FILE: lib/utils/variable_hook.py ================================================ import torch from bytecode import Bytecode, Instr class get_local(object): cache = {} is_activate = False def __init__(self, varname): self.varname = varname def __call__(self, func): if not type(self).is_activate: return func type(self).cache[func.__qualname__] = [] c = Bytecode.from_code(func.__code__) extra_code = [ Instr('STORE_FAST', '_res'), Instr('LOAD_FAST', self.varname), Instr('STORE_FAST', '_value'), Instr('LOAD_FAST', '_res'), Instr('LOAD_FAST', '_value'), Instr('BUILD_TUPLE', 2), Instr('STORE_FAST', '_result_tuple'), Instr('LOAD_FAST', '_result_tuple'), ] c[-1:-1] = extra_code func.__code__ = c.to_code() def wrapper(*args, **kwargs): res, values = func(*args, **kwargs) if isinstance(values, torch.Tensor): type(self).cache[func.__qualname__].append(values.detach().cpu().numpy()) elif isinstance(values, list): # list of Tensor type(self).cache[func.__qualname__].append([value.detach().cpu().numpy() for value in values]) else: raise NotImplementedError return res return wrapper @classmethod def clear(cls): for key in cls.cache.keys(): cls.cache[key] = [] @classmethod def activate(cls): cls.is_activate = True ================================================ FILE: lib/vis/__init__.py ================================================ ================================================ FILE: lib/vis/plotting.py ================================================ import matplotlib.pyplot as plt import numpy as np import torch import cv2 def draw_figure(fig): fig.canvas.draw() fig.canvas.flush_events() plt.pause(0.001) def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None): """Display a 2D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim == 3: a_np = np.transpose(a_np, (1, 2, 0)) if ax is None: fig = plt.figure(fig_num) plt.tight_layout() plt.cla() plt.imshow(a_np, vmin=range[0], vmax=range[1]) plt.axis('off') plt.axis('equal') if title is not None: plt.title(title) draw_figure(fig) else: ax.cla() ax.imshow(a_np, vmin=range[0], vmax=range[1]) ax.set_axis_off() ax.axis('equal') if title is not None: ax.set_title(title) draw_figure(plt.gcf()) def plot_graph(a: torch.Tensor, fig_num = None, title = None): """Plot graph. Data is a 1D tensor. args: fig_num: Figure number. title: Title of figure. """ a_np = a.squeeze().cpu().clone().detach().numpy() if a_np.ndim > 1: raise ValueError fig = plt.figure(fig_num) # plt.tight_layout() plt.cla() plt.plot(a_np) if title is not None: plt.title(title) draw_figure(fig) def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None): im_np = im.clone().cpu().squeeze().numpy() im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8)) boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int) # Draw proposals for i_ in range(boxes.shape[0]): if disp_ids is None or disp_ids[i_]: bb = boxes[i_, :] disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256) cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), disp_color, 1) if iou_pred is not None: text_pos = (bb[0], bb[1] - 5) cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False) im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float() return im_tensor def _pascal_color_map(N=256, normalized=False): """ Python implementation of the color map function for the PASCAL VOC data set. Official Matlab version can be found in the PASCAL VOC devkit http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit """ def bitget(byteval, idx): return (byteval & (1 << idx)) != 0 dtype = 'float32' if normalized else 'uint8' cmap = np.zeros((N, 3), dtype=dtype) for i in range(N): r = g = b = 0 c = i for j in range(8): r = r | (bitget(c, 0) << 7 - j) g = g | (bitget(c, 1) << 7 - j) b = b | (bitget(c, 2) << 7 - j) c = c >> 3 cmap[i] = np.array([r, g, b]) cmap = cmap / 255 if normalized else cmap return cmap def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None): """ Overlay mask over image. Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py This function allows you to overlay a mask over an image with some transparency. # Arguments im: Numpy Array. Array with the image. The shape must be (H, W, 3) and the pixels must be represented as `np.uint8` data type. ann: Numpy Array. Array with the mask. The shape must be (H, W) and the values must be intergers alpha: Float. Proportion of alpha to apply at the overlaid mask. colors: Numpy Array. Optional custom colormap. It must have shape (N, 3) being N the maximum number of colors to represent. contour_thickness: Integer. Thickness of each object index contour draw over the overlay. This function requires to have installed the package `opencv-python`. # Returns Numpy Array: Image of the overlay with shape (H, W, 3) and data type `np.uint8`. """ im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int) if im.shape[:-1] != ann.shape: raise ValueError('First two dimensions of `im` and `ann` must match') if im.shape[-1] != 3: raise ValueError('im must have three channels at the 3 dimension') colors = colors or _pascal_color_map() colors = np.asarray(colors, dtype=np.uint8) mask = colors[ann] fg = im * alpha + (1 - alpha) * mask img = im.copy() img[ann > 0] = fg[ann > 0] if contour_thickness: # pragma: no cover import cv2 for obj_id in np.unique(ann[ann > 0]): contours = cv2.findContours((ann == obj_id).astype( np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(), contour_thickness) return img ================================================ FILE: lib/vis/utils.py ================================================ import torch import numpy as np def numpy_to_torch(a: np.ndarray): return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0) ================================================ FILE: lib/vis/visdom_cus.py ================================================ import visdom import visdom.server import cv2 import torch import copy import numpy as np from collections import OrderedDict from enum import Enum from lib.vis.plotting import overlay_mask, show_image_with_boxes from lib.vis.utils import numpy_to_torch class cv_colors(Enum): WHITE = (255, 255, 255)[::-1] RED = (0, 0, 255)[::-1] GREEN = (0, 255, 0)[::-1] BLUE = (255, 0, 0)[::-1] PURPLE = (247, 44, 200)[::-1] ORANGE = (44, 162, 247)[::-1] MINT = (239, 255, 66)[::-1] YELLOW = (2, 255, 250)[::-1] BLACK = (0, 0, 0)[::-1] def index_to_color(idx): return { 0: cv_colors.GREEN.value, 1: cv_colors.BLUE.value, 2: cv_colors.RED.value, 3: cv_colors.MINT.value, 4: cv_colors.YELLOW.value, 5: cv_colors.WHITE.value, 6: cv_colors.BLACK.value, }[idx] class VisBase: def __init__(self, visdom, show_data, title): self.visdom = visdom self.show_data = show_data self.title = title self.raw_data = None def update(self, data, **kwargs): self.save_data(data, **kwargs) if self.show_data: self.draw_data() def save_data(self, data, **kwargs): raise NotImplementedError def draw_data(self): raise NotImplementedError def toggle_display(self, new_mode=None): if new_mode is not None: self.show_data = new_mode else: self.show_data = not self.show_data if self.show_data: self.draw_data() else: self.visdom.close(self.title) class VisImage(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): data = data.float() self.raw_data = data def draw_data(self): self.visdom.image(self.raw_data.clone(), opts={'title': self.title}, win=self.title) class VisHeatmap(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data, **kwargs): data = data.squeeze().flip(0) if kwargs: self.raw_data = [data, kwargs] else: self.raw_data = [data] # self.raw_data = data def draw_data(self): if len(self.raw_data) == 2: self.visdom.heatmap(self.raw_data[0].clone(), opts={'title': self.title + ' ' + self.raw_data[1]['caption'], **self.raw_data[1]}, win=self.title) else: self.visdom.heatmap(self.raw_data[0].clone(), opts={'title': self.title}, win=self.title) # self.visdom.heatmap(self.raw_data.clone(), opts={'title': self.title}, win=self.title) class VisFeaturemap(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.block_list = None def block_list_callback_handler(self, data): self.block_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui') self.draw_data() def save_data(self, data): data = data.view(-1, *data.shape[-2:]) data = data.flip(1) if self.block_list is None: self.block_list = [] self.draw_feat = [] for i in range(data.shape[0]): self.block_list.append({'type': 'checkbox', 'name': 'Channel {:04d}'.format(i), 'value': False}) self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui') self.visdom.register_event_handler(self.block_list_callback_handler, 'featuremap_ui') self.raw_data = data def draw_data(self): if self.block_list is not None and self.show_data: for i, d in enumerate(self.block_list): if d['value']: fig_title = '{} ch: {:04d}'.format(self.title, i) self.visdom.heatmap(self.raw_data[i, :, :].clone(), opts={'title': fig_title}, win=fig_title) class VisCostVolume(VisBase): def __init__(self, visdom, show_data, title, flip=False): super().__init__(visdom, show_data, title) self.show_slice = False self.slice_pos = None self.flip = flip def show_cost_volume(self): data = self.raw_data.clone() # data_perm = data.permute(2, 0, 3, 1).contiguous() data_perm = data.permute(0, 2, 1, 3).contiguous() if self.flip: data_perm = data_perm.permute(2, 3, 0, 1).contiguous() data_perm = data_perm.view(data_perm.shape[0] * data_perm.shape[1], -1) self.visdom.heatmap(data_perm.flip(0), opts={'title': self.title}, win=self.title) def set_zoom_pos(self, slice_pos): self.slice_pos = slice_pos def toggle_show_slice(self, new_mode=None): if new_mode is not None: self.show_slice = new_mode else: self.show_slice = not self.show_slice def show_cost_volume_slice(self): slice_pos = self.slice_pos # slice_pos: [row, col] cost_volume_data = self.raw_data.clone() if self.flip: cost_volume_slice = cost_volume_data[:, :, slice_pos[0], slice_pos[1]] else: cost_volume_slice = cost_volume_data[slice_pos[0], slice_pos[1], :, :] self.visdom.heatmap(cost_volume_slice.flip(0), opts={'title': self.title}, win=self.title) def save_data(self, data): data = data.view(data.shape[-2], data.shape[-1], data.shape[-2], data.shape[-1]) self.raw_data = data def draw_data(self): if self.show_slice: self.show_cost_volume_slice() else: self.show_cost_volume() class VisCostVolumeUI(VisBase): def cv_ui_handler(self, data): zoom_toggled = False if data['event_type'] == 'KeyPress': if data['key'] == 'ArrowRight': self.zoom_pos[1] = min(self.zoom_pos[1] + 1, self.feat_shape[1] - 1) elif data['key'] == 'ArrowLeft': self.zoom_pos[1] = max(self.zoom_pos[1] - 1, 0) elif data['key'] == 'ArrowUp': self.zoom_pos[0] = max(self.zoom_pos[0] - 1, 0) elif data['key'] == 'ArrowDown': self.zoom_pos[0] = min(self.zoom_pos[0] + 1, self.feat_shape[0] - 1) elif data['key'] == 'Enter': self.zoom_mode = not self.zoom_mode zoom_toggled = True # Update image self.show_image() # Update cost volumes for block_title, block in self.registered_blocks.items(): if isinstance(block, VisCostVolume): block.set_zoom_pos(self.zoom_pos) block.toggle_show_slice(self.zoom_mode) if (self.zoom_mode or zoom_toggled) and block.show_data: block.draw_data() def __init__(self, visdom, show_data, title, feat_shape, registered_blocks): super().__init__(visdom, show_data, title) self.feat_shape = feat_shape self.zoom_mode = False self.zoom_pos = [int((feat_shape[0] - 1) / 2), int((feat_shape[1] - 1) / 2)] self.registered_blocks = registered_blocks self.visdom.register_event_handler(self.cv_ui_handler, title) def draw_grid(self, data): stride_r = int(data.shape[1] / self.feat_shape[0]) stride_c = int(data.shape[2] / self.feat_shape[1]) # Draw grid data[:, list(range(0, data.shape[1], stride_r)), :] = 0 data[:, :, list(range(0, data.shape[2], stride_c))] = 0 data[0, list(range(0, data.shape[1], stride_r)), :] = 255 data[0, :, list(range(0, data.shape[2], stride_c))] = 255 return data def shade_cell(self, data): stride_r = int(data.shape[1] / self.feat_shape[0]) stride_c = int(data.shape[2] / self.feat_shape[1]) r1 = self.zoom_pos[0] * stride_r r2 = min((self.zoom_pos[0] + 1) * stride_r, data.shape[1]) c1 = self.zoom_pos[1] * stride_c c2 = min((self.zoom_pos[1] + 1) * stride_c, data.shape[2]) factor = 0.8 if self.zoom_mode else 0.5 data[:, r1:r2, c1:c2] = data[:, r1:r2, c1:c2] * (1 - factor) + torch.tensor([255.0, 0.0, 0.0]).view(3, 1, 1).to( data.device) * factor return data def show_image(self, data=None): if data is None: data = self.raw_data.clone() data = self.draw_grid(data) data = self.shade_cell(data) self.visdom.image(data, opts={'title': self.title}, win=self.title) def save_data(self, data): # Ignore feat shape data = data[0] data = data.float() self.raw_data = data def draw_data(self): self.show_image(self.raw_data.clone()) class VisInfoDict(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.raw_data = OrderedDict() def generate_display_text(self, data): display_text = '' for key, value in data.items(): key = key.replace('_', ' ') if value is None: display_text += '{}: {}
'.format(key, 'None') elif isinstance(value, (str, int)): display_text += '{}: {}
'.format(key, value) else: display_text += '{}: {:.2f}
'.format(key, value) return display_text def save_data(self, data): for key, val in data.items(): self.raw_data[key] = val def draw_data(self): data = copy.deepcopy(self.raw_data) display_text = self.generate_display_text(data) self.visdom.text(display_text, opts={'title': self.title}, win=self.title) class VisText(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): self.raw_data = data def draw_data(self): data = copy.deepcopy(self.raw_data) self.visdom.text(data, opts={'title': self.title}, win=self.title) class VisLinePlot(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) def save_data(self, data): self.raw_data = data def draw_data(self): if isinstance(self.raw_data, (list, tuple)): data_y = self.raw_data[0].clone() data_x = self.raw_data[1].clone() else: data_y = self.raw_data.clone() data_x = torch.arange(data_y.shape[0]) self.visdom.line(data_y, data_x, opts={'title': self.title}, win=self.title) class VisTracking(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.count = 0 def save_data(self, data, **kwargs): image = data[0] boxes_masks = data[1:] boxes, masks = [], [] for bm in boxes_masks: if bm is None: continue if isinstance(bm, list): boxes.append(torch.Tensor(bm)) continue if len(bm.shape) > 1: # Binarize segmentation if a float tensor is provided if bm.dtype != np.uint8: bm = (bm > 0.5).astype(np.uint8) masks.append(bm) continue boxes.append(bm.float()) if kwargs: self.raw_data = [image, boxes, masks, kwargs] else: self.raw_data = [image, boxes, masks] def draw_data(self): disp_image = self.raw_data[0].copy() resize_factor = 1 if max(disp_image.shape) > 480: resize_factor = 480.0 / float(max(disp_image.shape)) disp_image = cv2.resize(disp_image, None, fx=resize_factor, fy=resize_factor) for i, mask in enumerate(self.raw_data[2]): self.raw_data[2][i] = cv2.resize(mask, None, fx=resize_factor, fy=resize_factor) # if box has score scores = None if self.raw_data[1][0].shape[0] == 5: scores = [box[4].item() for box in self.raw_data[1]] self.raw_data[1] = [box[:4] for box in self.raw_data[1]] boxes = [resize_factor * b.clone() for b in self.raw_data[1]] for i, disp_rect in enumerate(boxes): # color = ((255 * ((i % 3) > 0)), 255 * ((i + 1) % 2), (255 * (i % 5)) // 4) color = index_to_color(i % 7) cv2.rectangle(disp_image, (int(disp_rect[0]), int(disp_rect[1])), (int(disp_rect[0] + disp_rect[2]), int(disp_rect[1] + disp_rect[3])), color, 2) if scores is not None: cv2.putText(disp_image, "{:.3f}".format(scores[i]), (int(disp_rect[0]), int(disp_rect[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1) for i, mask in enumerate(self.raw_data[2], 1): disp_image = overlay_mask(disp_image, mask * i) # import os # write_img = disp_image.copy() # write_img = write_img[:, :, ::-1] # # cv2.imwrite(os.path.join('/home/yebotao/test', str(self.count).zfill(3) + '.jpg'), write_img) # cv2.imwrite(os.path.join('/home/yebotao/test', self.raw_data[3]['caption'].split('_')[-1] + '.jpg'), write_img) # self.count += 1 disp_image = numpy_to_torch(disp_image).squeeze(0) disp_image = disp_image.float() if len(self.raw_data) > 3: self.visdom.image(disp_image, opts={'title': self.title, **self.raw_data[3]}, win=self.title) else: self.visdom.image(disp_image, opts={'title': self.title}, win=self.title) class VisBBReg(VisBase): def __init__(self, visdom, show_data, title): super().__init__(visdom, show_data, title) self.block_list = [] def block_list_callback_handler(self, data): self.block_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis') self.draw_data() def save_data(self, data): self.image = data[0].float() self.init_boxes = data[1] self.final_boxes = data[2] self.final_ious = data[3] def draw_data(self): if len(self.block_list) == 0: self.block_list.append({'type': 'checkbox', 'name': 'ID 0', 'value': True}) self.block_list.append({'type': 'checkbox', 'name': 'ID 1', 'value': True}) self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis') self.visdom.register_event_handler(self.block_list_callback_handler, 'bbreg_vis') disp_image = self.image ids = [x['value'] for x in self.block_list] init_box_image = show_image_with_boxes(disp_image.clone(), self.init_boxes.clone(), disp_ids=ids) final_box_image = show_image_with_boxes(disp_image.clone(), self.final_boxes.clone(), self.final_ious.clone(), disp_ids=ids) self.visdom.image(init_box_image, opts={'title': 'Init Boxes'}, win='Init Boxes') self.visdom.image(final_box_image, opts={'title': 'Final Boxes'}, win='Final Boxes') class Visdom: def __init__(self, debug=0, ui_info=None, visdom_info=None, env=None): self.debug = debug if env is not None: self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'), port=visdom_info.get('port', 8097), env=env) else: self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'), port=visdom_info.get('port', 8097)) self.registered_blocks = {} self.blocks_list = [] self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') self.visdom.register_event_handler(self.block_list_callback_handler, 'block_list') if ui_info is not None: self.visdom.register_event_handler(ui_info['handler'], ui_info['win_id']) def block_list_callback_handler(self, data): field_name = self.blocks_list[data['propertyId']]['name'] self.registered_blocks[field_name].toggle_display(data['value']) self.blocks_list[data['propertyId']]['value'] = data['value'] self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') def register(self, data, mode, debug_level=0, title='Data', **kwargs): if title not in self.registered_blocks.keys(): show_data = self.debug >= debug_level if title != 'Tracking': self.blocks_list.append({'type': 'checkbox', 'name': title, 'value': show_data}) self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list') if mode == 'image': self.registered_blocks[title] = VisImage(self.visdom, show_data, title) elif mode == 'heatmap': self.registered_blocks[title] = VisHeatmap(self.visdom, show_data, title) elif mode == 'cost_volume': self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title) elif mode == 'cost_volume_flip': self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title, flip=True) elif mode == 'cost_volume_ui': self.registered_blocks[title] = VisCostVolumeUI(self.visdom, show_data, title, data[1], self.registered_blocks) elif mode == 'info_dict': self.registered_blocks[title] = VisInfoDict(self.visdom, show_data, title) elif mode == 'text': self.registered_blocks[title] = VisText(self.visdom, show_data, title) elif mode == 'lineplot': self.registered_blocks[title] = VisLinePlot(self.visdom, show_data, title) elif mode == 'Tracking': self.registered_blocks[title] = VisTracking(self.visdom, show_data, title) elif mode == 'bbreg': self.registered_blocks[title] = VisBBReg(self.visdom, show_data, title) elif mode == 'featmap': self.registered_blocks[title] = VisFeaturemap(self.visdom, show_data, title) else: raise ValueError('Visdom Error: Unknown data mode {}'.format(mode)) # Update self.registered_blocks[title].update(data, **kwargs) ================================================ FILE: tracking/_init_paths.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as osp import sys def add_path(path): if path not in sys.path: sys.path.insert(0, path) this_dir = osp.dirname(__file__) prj_path = osp.join(this_dir, '..') add_path(prj_path) ================================================ FILE: tracking/analysis_results.py ================================================ import _init_paths import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [8, 8] from lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results from lib.test.evaluation import get_dataset, trackerlist trackers = [] dataset_name = 'lasot_extension_subset' #dataset_name = 'lasot' """stark""" # trackers.extend(trackerlist(name='stark_s', parameter_name='baseline', dataset_name=dataset_name, # run_ids=None, display_name='STARK-S50')) # trackers.extend(trackerlist(name='stark_st', parameter_name='baseline', dataset_name=dataset_name, # run_ids=None, display_name='STARK-ST50')) # trackers.extend(trackerlist(name='stark_st', parameter_name='baseline_R101', dataset_name=dataset_name, # run_ids=None, display_name='STARK-ST101')) """TransT""" # trackers.extend(trackerlist(name='TransT_N2', parameter_name=None, dataset_name=None, # run_ids=None, display_name='TransT_N2', result_only=True)) # trackers.extend(trackerlist(name='TransT_N4', parameter_name=None, dataset_name=None, # run_ids=None, display_name='TransT_N4', result_only=True)) """pytracking""" # trackers.extend(trackerlist('atom', 'default', None, range(0,5), 'ATOM')) # trackers.extend(trackerlist('dimp', 'dimp18', None, range(0,5), 'DiMP18')) # trackers.extend(trackerlist('dimp', 'dimp50', None, range(0,5), 'DiMP50')) # trackers.extend(trackerlist('dimp', 'prdimp18', None, range(0,5), 'PrDiMP18')) # trackers.extend(trackerlist('dimp', 'prdimp50', None, range(0,5), 'PrDiMP50')) """ostrack""" trackers.extend(trackerlist(name='artrack_seq', parameter_name='artrack_seq_256_full', dataset_name=dataset_name, run_ids=None, display_name='ARTrackSeq_256')) #trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_384_mae_ce_32x4_ep300', dataset_name=dataset_name, #. run_ids=None, display_name='OSTrack384')) dataset = get_dataset(dataset_name) # dataset = get_dataset('otb', 'nfs', 'uav', 'tc128ce') # plot_results(trackers, dataset, 'OTB2015', merge_results=True, plot_types=('success', 'norm_prec'), # skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05) print_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'norm_prec', 'prec')) # print_results(trackers, dataset, 'UNO', merge_results=True, plot_types=('success', 'prec')) ================================================ FILE: tracking/analysis_results_ITP.py ================================================ import _init_paths import argparse from lib.test.analysis.plot_results import print_results from lib.test.evaluation import get_dataset, trackerlist def parse_args(): """ args for evaluation. """ parser = argparse.ArgumentParser(description='Parse args for training') # for train parser.add_argument('--script', type=str, help='training script name') parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name') args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() trackers = [] trackers.extend(trackerlist(args.script, args.config, "None", None, args.config)) dataset = get_dataset('lasot') print_results(trackers, dataset, 'LaSOT', merge_results=True, plot_types=('success', 'prec', 'norm_prec')) ================================================ FILE: tracking/convert_transt.py ================================================ import _init_paths import os from lib.test.evaluation import get_dataset import shutil trackers = [] # dataset_name = 'uav' dataset_name = 'nfs' root_dir = "/data/sda/v-yanbi/iccv21/STARK_Latest/Stark" base_dir = os.path.join(root_dir, "test/tracking_results/TransT_N2") dataset = get_dataset(dataset_name) for x in dataset: seq_name = x.name file_name = "%s.txt" % (seq_name.replace("nfs_", "")) file_path = os.path.join(base_dir, file_name) file_path_new = os.path.join(base_dir, "%s.txt" % seq_name) if os.path.exists(file_path): shutil.move(file_path, file_path_new) ================================================ FILE: tracking/create_default_local_file.py ================================================ import argparse import os import _init_paths from lib.train.admin import create_default_local_file_ITP_train from lib.test.evaluation import create_default_local_file_ITP_test def parse_args(): parser = argparse.ArgumentParser(description='Create default local file on ITP or PAI') parser.add_argument("--workspace_dir", type=str, required=True) # workspace dir parser.add_argument("--data_dir", type=str, required=True) parser.add_argument("--save_dir", type=str, required=True) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() workspace_dir = os.path.realpath(args.workspace_dir) data_dir = os.path.realpath(args.data_dir) save_dir = os.path.realpath(args.save_dir) create_default_local_file_ITP_train(workspace_dir, data_dir) create_default_local_file_ITP_test(workspace_dir, data_dir, save_dir) ================================================ FILE: tracking/download_pytracking_results.py ================================================ import os import sys import gdown import re import shutil import argparse import tempfile import _init_paths from lib.test.evaluation.environment import env_settings pytracking_results_link_dict = { "dimp": { "prdimp50_003.zip": "1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc", "prdimp50_002.zip": "1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz", "prdimp50_001.zip": "17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS", "prdimp50_000.zip": "1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6", "prdimp18_004.zip": "1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM", "prdimp18_003.zip": "1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO", "prdimp18_002.zip": "17lMllYhygCqgE81DoHX4BZar3xc3auzM", "prdimp18_001.zip": "1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G", "prdimp18_000.zip": "1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z", "prdimp50_004.zip": "1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO", "dimp50_004.zip": "1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa", "dimp50_000.zip": "1LCgf5sg453Z4bY37A_W5mbXeG68U1fET", "dimp18_000.zip": "17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g", "dimp18_001.zip": "1AsiliVgISyDTouYOQYVOXA0srj3YskhJ", "dimp18_002.zip": "1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y", "dimp50_001.zip": "1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K", "dimp18_004.zip": "1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg", "dimp18_003.zip": "1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8", "dimp50_003.zip": "1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy", "dimp50_002.zip": "1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4", }, "atom": { "default_004.zip": "1BapnQh_8iRM44DXj862eOZV4q8zQLdmT", "default_003.zip": "1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5", "default_000.zip": "1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5", "default_002.zip": "1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC", "default_001.zip": "1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt", }, "kys": { "default_004.zip": "1QdfkA3d4MzKwdDiBOM1ZhDJWk9NmALxD", "default_000.zip": "1SCs79_ePTc8zxPDzRAgAmbbRlnmE89SN", "default_003.zip": "1TCzq38QW4YiMrgU5VR6NAEefJ85gwzfT", "default_002.zip": "1_9u1ybCFxHu0yJmW5ZzDR4-isJMEUsDf", "default_001.zip": "1utJhdosNj6vlI75dfzUxGM3Vy8OjWslT", }, } def _download_file(file_id, path): link = 'https://drive.google.com/uc?id=' + file_id gdown.download(link, path, quiet=True) def download_results(download_path, trackers='pytracking'): """ Script to automatically download tracker results for PyTracking. args: download_path - Directory where the zipped results are downloaded trackers - Tracker results which are to be downloaded. If set to 'pytracking', results for all pytracking based trackers will be downloaded. If set to 'external', results for available external trackers will be downloaded. If set to 'all', all available results are downloaded. If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded. Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are downloaded. The value can be set to either 'all', in which case all available results for the tracker are downloaded. Else the value should be a list of parameter file names. """ print('Using download path ''{}'''.format(download_path)) os.makedirs(download_path, exist_ok=True) if isinstance(trackers, str): if trackers == 'all': all_trackers = list(pytracking_results_link_dict.keys()) + list(external_results_link_dict.keys()) trackers = {k: 'all' for k in all_trackers} elif trackers == 'pytracking': trackers = {k: 'all' for k in pytracking_results_link_dict.keys()} elif trackers == 'external': trackers = {k: 'all' for k in external_results_link_dict.keys()} elif trackers in pytracking_results_link_dict or trackers in external_results_link_dict: trackers = {trackers: 'all'} else: raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict') elif isinstance(trackers, dict): pass else: raise Exception('tracker_list must be set to ''all'', or be a dict') common_link_dict = pytracking_results_link_dict # for k, v in external_results_link_dict.items(): # common_link_dict[k] = v for trk, runfiles in trackers.items(): trk_path = os.path.join(download_path, trk) if not os.path.exists(trk_path): os.makedirs(trk_path) if runfiles == 'all': for params, fileid in common_link_dict[trk].items(): print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) elif isinstance(runfiles, (list, tuple)): for p in runfiles: for params, fileid in common_link_dict[trk].items(): if re.match(r'{}(|_(\d\d\d)).zip'.format(p), params) is not None: print('Downloading: {}/{}'.format(trk, params)) _download_file(fileid, os.path.join(trk_path, params)) else: raise Exception('tracker_list values must either be set to ''all'', or be a list of param names') def unpack_tracking_results(download_path, output_path=None): """ Unpacks zipped benchmark results. The directory 'download_path' should have the following structure - root - tracker1 - param1.zip - param2.zip . . - tracker2 - param1.zip - param2.zip . . args: download_path - Path to the directory where the zipped results are stored output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path by default """ if output_path is None: output_path = env_settings().results_path if not os.path.exists(output_path): os.makedirs(output_path) trackers = os.listdir(download_path) for t in trackers: runfiles = os.listdir(os.path.join(download_path, t)) for r in runfiles: save_path = os.path.join(output_path, t) if not os.path.exists(save_path): os.makedirs(save_path) shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip') def main(): parser = argparse.ArgumentParser(description='Download and unpack zipped results') parser.add_argument('--tracker', type=str, default='pytracking', help='Name of tracker results to download, or "pytracking" (downloads results for PyTracking' ' based trackers, or "external" (downloads results for external trackers) or "all"') parser.add_argument('--output_path', type=str, default=None, help='Path to the directory where the results will be unpacked.') parser.add_argument('--temp_download_path', type=str, default=None, help='Temporary path used for downloading the Zip files.') parser.add_argument('--download', type=bool, default=True, help='Whether to download results or unpack existing downloaded files.') args = parser.parse_args() download_path = args.temp_download_path if download_path is None: download_path = '{}/pytracking_results/'.format(tempfile.gettempdir()) if args.download: download_results(download_path, args.tracker) unpack_tracking_results(download_path, args.output_path) if __name__ == '__main__': main() ================================================ FILE: tracking/pre_read_datasets.py ================================================ import _init_paths import multiprocessing as mp import argparse import os from lib.utils.lmdb_utils import decode_str import time import json def parse_args(): """ args for training. """ parser = argparse.ArgumentParser(description='Parse args for training') parser.add_argument('--data_dir', type=str, help='directory where lmdb data is located') parser.add_argument('--dataset_str', type=str, help="which datasets to use") args = parser.parse_args() return args def get_trknet_dict(trknet_dir): with open(os.path.join(trknet_dir, "seq_list.json"), "r") as f: seq_list = json.loads(f.read()) res_dict = {} set_idx_pre = -1 for set_idx, seq_name in seq_list: if set_idx != set_idx_pre: res_dict[set_idx] = "anno/%s.txt" % seq_name set_idx_pre = set_idx return res_dict def target(lmdb_dir, key_name): _ = decode_str(lmdb_dir, key_name) if __name__ == "__main__": args = parse_args() data_dir = args.data_dir dataset_str = args.dataset_str key_dict = {"got10k_lmdb": "train/list.txt", "lasot_lmdb": "LaSOTBenchmark.json", "coco_lmdb": "annotations/instances_train2017.json", "vid_lmdb": "cache.json"} print("Ready to pre load datasets") start = time.time() ps = [] datasets = [] if 'g' in dataset_str: datasets.append("got10k_lmdb") if 'l' in dataset_str: datasets.append("lasot_lmdb") if 'c' in dataset_str: datasets.append("coco_lmdb") if 'v' in dataset_str: datasets.append("vid_lmdb") for dataset in datasets: lmdb_dir = os.path.join(data_dir, dataset) p = mp.Process(target=target, args=(lmdb_dir, key_dict[dataset])) print("add %s %s to job queue" % (lmdb_dir, key_dict[dataset])) ps.append(p) # deal with trackingnet if 't' in dataset_str: trknet_dict = get_trknet_dict(os.path.join(data_dir, "trackingnet_lmdb")) for set_idx, seq_path in trknet_dict.items(): lmdb_dir = os.path.join(data_dir, "trackingnet_lmdb", "TRAIN_%d_lmdb" % set_idx) p = mp.Process(target=target, args=(lmdb_dir, seq_path)) print("add %s %s to job queue" % (lmdb_dir, seq_path)) ps.append(p) for p in ps: p.start() for p in ps: p.join() print("Pre read over") end = time.time() hour = (end - start) / 3600 print("it takes %.2f hours to pre-read data" % hour) ================================================ FILE: tracking/test.py ================================================ import os import sys import argparse prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from lib.test.evaluation import get_dataset from lib.test.evaluation.running import run_dataset from lib.test.evaluation.tracker import Tracker def run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0, num_gpus=8): """Run tracker on sequence or dataset. args: tracker_name: Name of tracking method. tracker_param: Name of parameter file. run_id: The run id. dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot). sequence: Sequence number or name. debug: Debug level. threads: Number of threads. """ dataset = get_dataset(dataset_name) if sequence is not None: dataset = [dataset[sequence]] trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)] run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus) def main(): parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.') parser.add_argument('tracker_name', type=str, help='Name of tracking method.') parser.add_argument('tracker_param', type=str, help='Name of config file.') parser.add_argument('--runid', type=int, default=None, help='The run id.') parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).') parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.') parser.add_argument('--debug', type=int, default=0, help='Debug level.') parser.add_argument('--threads', type=int, default=0, help='Number of threads.') parser.add_argument('--num_gpus', type=int, default=8) args = parser.parse_args() try: seq_name = int(args.sequence) except: seq_name = args.sequence run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug, args.threads, num_gpus=args.num_gpus) if __name__ == '__main__': main() ================================================ FILE: tracking/test_exp.py ================================================ import os import sys import argparse prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from lib.test.evaluation import get_dataset from lib.test.evaluation.running import run_dataset from lib.test.evaluation.tracker import Tracker def run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0, num_gpus=8): """Run tracker on sequence or dataset. args: tracker_name: Name of tracking method. tracker_param: Name of parameter file. run_id: The run id. dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot). sequence: Sequence number or name. debug: Debug level. threads: Number of threads. """ dataset = get_dataset(*dataset_name) if sequence is not None: dataset = [dataset[sequence]] trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)] run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus) def main(): parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.') parser.add_argument('tracker_name', type=str, help='Name of tracking method.') parser.add_argument('tracker_param', type=str, help='Name of config file.') parser.add_argument('--runid', type=int, default=None, help='The run id.') parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).') parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.') parser.add_argument('--debug', type=int, default=0, help='Debug level.') parser.add_argument('--threads', type=int, default=0, help='Number of threads.') parser.add_argument('--num_gpus', type=int, default=8) args = parser.parse_args() try: seq_name = int(args.sequence) except: seq_name = args.sequence args.dataset_name = ['trackingnet', 'got10k_test', 'lasot'] run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug, args.threads, num_gpus=args.num_gpus) if __name__ == '__main__': main() ================================================ FILE: tracking/train.py ================================================ import os import argparse import random import torch def parse_args(): """ args for training. """ parser = argparse.ArgumentParser(description='Parse args for training') # for train parser.add_argument('--script', type=str, help='training script name') parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name') parser.add_argument('--save_dir', type=str, help='root directory to save checkpoints, logs, and tensorboard') parser.add_argument('--mode', type=str, choices=["single", "multiple", "multi_node"], default="multiple", help="train on single gpu or multiple gpus") parser.add_argument('--nproc_per_node', type=int, help="number of GPUs per node") # specify when mode is multiple parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0) # whether datasets are in lmdb format parser.add_argument('--script_prv', type=str, help='training script name') parser.add_argument('--config_prv', type=str, default='baseline', help='yaml configure file name') parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0) # whether to use wandb # for knowledge distillation parser.add_argument('--distill', type=int, choices=[0, 1], default=0) # whether to use knowledge distillation parser.add_argument('--script_teacher', type=str, help='teacher script name') parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name') # for multiple machines parser.add_argument('--rank', type=int, help='Rank of the current process.') parser.add_argument('--world-size', type=int, help='Number of processes participating in the job.') parser.add_argument('--ip', type=str, default='127.0.0.1', help='IP of the current rank 0.') parser.add_argument('--port', type=int, default='20000', help='Port of the current rank 0.') args = parser.parse_args() return args def main(): torch.set_num_threads(8) args = parse_args() if args.mode == "single": train_cmd = "python lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d " \ "--script_prv %s --config_prv %s --distill %d --script_teacher %s --config_teacher %s --use_wandb %d"\ % (args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.distill, args.script_teacher, args.config_teacher, args.use_wandb) elif args.mode == "multiple": train_cmd = "python -m torch.distributed.launch --nproc_per_node %d --master_port %d lib/train/run_training.py " \ "--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d " \ "--distill %d --script_teacher %s --config_teacher %s" \ % (args.nproc_per_node, random.randint(10000, 50000), args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb, args.distill, args.script_teacher, args.config_teacher) elif args.mode == "multi_node": train_cmd = "python -m torch.distributed.launch --nproc_per_node %d --master_addr %s --master_port %d --nnodes %d --node_rank %d lib/train/run_training.py " \ "--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d " \ "--distill %d --script_teacher %s --config_teacher %s" \ % (args.nproc_per_node, args.ip, args.port, args.world_size, args.rank, args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb, args.distill, args.script_teacher, args.config_teacher) else: raise ValueError("mode should be 'single' or 'multiple'.") os.system(train_cmd) if __name__ == "__main__": main()