Showing preview only (2,935K chars total). Download the full file or copy to clipboard to get everything.
Repository: Luffy03/VoCo
Branch: main
Commit: ba7b7bd22e98
Files: 329
Total size: 2.7 MB
Directory structure:
gitextract_xwu1bqsv/
├── Finetune/
│ ├── AbdomenAtlas/
│ │ ├── Atlas_test.py
│ │ ├── Atlas_test.sh
│ │ ├── check.py
│ │ ├── dataset/
│ │ │ ├── __init__.py
│ │ │ ├── dataloader_bdmap.py
│ │ │ ├── dataloader_test.py
│ │ │ └── dataset_list/
│ │ │ └── AbdomenAtlas1.0.txt
│ │ ├── main.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── preprocess/
│ │ │ └── try_load.py
│ │ ├── readme.md
│ │ ├── requirements.txt
│ │ ├── train.sh
│ │ ├── train.slurm
│ │ ├── trainer.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── data_trans.py
│ │ ├── mixup.py
│ │ └── utils.py
│ ├── Amos/
│ │ ├── check_test.py
│ │ ├── dataset/
│ │ │ ├── __init__.py
│ │ │ ├── dataset.json
│ │ │ └── dataset_test50.json
│ │ ├── dataset_CT.json
│ │ ├── gen_json.py
│ │ ├── inferers.py
│ │ ├── main.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── pre_cache.py
│ │ ├── test.py
│ │ ├── train.sh
│ │ ├── trainer.py
│ │ ├── utils/
│ │ │ ├── __init__.py
│ │ │ ├── data_test.py
│ │ │ ├── data_utils.py
│ │ │ └── utils.py
│ │ └── val.py
│ ├── BTCV/
│ │ ├── dataset/
│ │ │ ├── __init__.py
│ │ │ └── dataset_0.json
│ │ ├── main.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── trainer.py
│ │ ├── utils/
│ │ │ ├── __init__.py
│ │ │ ├── data_test.py
│ │ │ ├── data_utils.py
│ │ │ └── utils.py
│ │ └── val.py
│ ├── CC-CCII/
│ │ ├── csv/
│ │ │ ├── CC_CCII_fold0_train.csv
│ │ │ ├── CC_CCII_fold0_valid.csv
│ │ │ ├── CC_CCII_fold1_train.csv
│ │ │ ├── CC_CCII_fold1_valid.csv
│ │ │ ├── CC_CCII_fold2_train.csv
│ │ │ ├── CC_CCII_fold2_valid.csv
│ │ │ └── CC_CCII_metadata.csv
│ │ ├── dataset/
│ │ │ └── __init__.py
│ │ ├── eval.py
│ │ ├── main.py
│ │ ├── model.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── train.sh
│ │ ├── trainer.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ └── utils.py
│ ├── Flare22/
│ │ ├── __init__.py
│ │ ├── dataset/
│ │ │ ├── __init__.py
│ │ │ ├── dataset.json
│ │ │ └── dataset_test50.json
│ │ ├── inferers.py
│ │ ├── main.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── train.sh
│ │ ├── trainer.py
│ │ ├── utils/
│ │ │ ├── __init__.py
│ │ │ ├── data_test.py
│ │ │ ├── data_utils.py
│ │ │ └── utils.py
│ │ └── val.py
│ ├── MM-WHS/
│ │ ├── dataset.json
│ │ ├── inferers.py
│ │ ├── main.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── pretrained_models/
│ │ │ └── __init__.py
│ │ ├── test.py
│ │ ├── train.sh
│ │ ├── trainer.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ └── utils.py
│ ├── Word/
│ │ ├── dataset/
│ │ │ ├── __init__.py
│ │ │ └── dataset_word.json
│ │ ├── main.py
│ │ ├── optimizers/
│ │ │ ├── __init__.py
│ │ │ └── lr_scheduler.py
│ │ ├── train.sh
│ │ ├── train.slurm
│ │ ├── trainer.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ └── utils.py
│ └── nnUNet/
│ ├── LICENSE
│ ├── documentation/
│ │ ├── __init__.py
│ │ ├── benchmarking.md
│ │ ├── changelog.md
│ │ ├── competitions/
│ │ │ └── AutoPETII.md
│ │ ├── convert_msd_dataset.md
│ │ ├── dataset_format.md
│ │ ├── dataset_format_inference.md
│ │ ├── explanation_normalization.md
│ │ ├── explanation_plans_files.md
│ │ ├── extending_nnunet.md
│ │ ├── how_to_use_nnunet.md
│ │ ├── installation_instructions.md
│ │ ├── manual_data_splits.md
│ │ ├── pretraining_and_finetuning.md
│ │ ├── region_based_training.md
│ │ ├── run_inference_with_pretrained_models.md
│ │ ├── set_environment_variables.md
│ │ ├── setting_up_paths.md
│ │ └── tldr_migration_guide_from_v1.md
│ ├── msd.txt
│ ├── nnunetv2/
│ │ ├── __init__.py
│ │ ├── batch_running/
│ │ │ ├── __init__.py
│ │ │ ├── benchmarking/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── generate_benchmarking_commands.py
│ │ │ │ └── summarize_benchmark_results.py
│ │ │ ├── collect_results_custom_Decathlon.py
│ │ │ ├── collect_results_custom_Decathlon_2d.py
│ │ │ ├── generate_lsf_runs_customDecathlon.py
│ │ │ └── release_trainings/
│ │ │ ├── __init__.py
│ │ │ └── nnunetv2_v1/
│ │ │ ├── __init__.py
│ │ │ ├── collect_results.py
│ │ │ └── generate_lsf_commands.py
│ │ ├── configuration.py
│ │ ├── dataset_conversion/
│ │ │ ├── Dataset017_BTCV.py
│ │ │ ├── Dataset027_ACDC.py
│ │ │ ├── Dataset073_Fluo_C3DH_A549_SIM.py
│ │ │ ├── Dataset114_MNMs.py
│ │ │ ├── Dataset115_EMIDEC.py
│ │ │ ├── Dataset120_RoadSegmentation.py
│ │ │ ├── Dataset137_BraTS21.py
│ │ │ ├── Dataset218_Amos2022_task1.py
│ │ │ ├── Dataset219_Amos2022_task2.py
│ │ │ ├── Dataset220_KiTS2023.py
│ │ │ ├── Dataset221_AutoPETII_2023.py
│ │ │ ├── Dataset988_dummyDataset4.py
│ │ │ ├── __init__.py
│ │ │ ├── convert_MSD_dataset.py
│ │ │ ├── convert_raw_dataset_from_old_nnunet_format.py
│ │ │ ├── datasets_for_integration_tests/
│ │ │ │ ├── Dataset996_IntegrationTest_Hippocampus_regions_ignore.py
│ │ │ │ ├── Dataset997_IntegrationTest_Hippocampus_regions.py
│ │ │ │ ├── Dataset998_IntegrationTest_Hippocampus_ignore.py
│ │ │ │ ├── Dataset999_IntegrationTest_Hippocampus.py
│ │ │ │ └── __init__.py
│ │ │ └── generate_dataset_json.py
│ │ ├── ensembling/
│ │ │ ├── __init__.py
│ │ │ └── ensemble.py
│ │ ├── evaluation/
│ │ │ ├── __init__.py
│ │ │ ├── accumulate_cv_results.py
│ │ │ ├── evaluate_predictions.py
│ │ │ └── find_best_configuration.py
│ │ ├── experiment_planning/
│ │ │ ├── __init__.py
│ │ │ ├── dataset_fingerprint/
│ │ │ │ ├── __init__.py
│ │ │ │ └── fingerprint_extractor.py
│ │ │ ├── experiment_planners/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── default_experiment_planner.py
│ │ │ │ ├── network_topology.py
│ │ │ │ ├── readme.md
│ │ │ │ └── resencUNet_planner.py
│ │ │ ├── plan_and_preprocess_api.py
│ │ │ ├── plan_and_preprocess_entrypoints.py
│ │ │ ├── plans_for_pretraining/
│ │ │ │ ├── __init__.py
│ │ │ │ └── move_plans_between_datasets.py
│ │ │ └── verify_dataset_integrity.py
│ │ ├── imageio/
│ │ │ ├── __init__.py
│ │ │ ├── base_reader_writer.py
│ │ │ ├── natural_image_reader_writer.py
│ │ │ ├── nibabel_reader_writer.py
│ │ │ ├── reader_writer_registry.py
│ │ │ ├── readme.md
│ │ │ ├── simpleitk_reader_writer.py
│ │ │ └── tif_reader_writer.py
│ │ ├── inference/
│ │ │ ├── __init__.py
│ │ │ ├── data_iterators.py
│ │ │ ├── examples.py
│ │ │ ├── export_prediction.py
│ │ │ ├── predict_from_raw_data.py
│ │ │ ├── readme.md
│ │ │ └── sliding_window_prediction.py
│ │ ├── model_sharing/
│ │ │ ├── __init__.py
│ │ │ ├── entry_points.py
│ │ │ ├── model_download.py
│ │ │ ├── model_export.py
│ │ │ └── model_import.py
│ │ ├── paths.py
│ │ ├── postprocessing/
│ │ │ ├── __init__.py
│ │ │ └── remove_connected_components.py
│ │ ├── preprocessing/
│ │ │ ├── __init__.py
│ │ │ ├── cropping/
│ │ │ │ ├── __init__.py
│ │ │ │ └── cropping.py
│ │ │ ├── normalization/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── default_normalization_schemes.py
│ │ │ │ ├── map_channel_name_to_normalization.py
│ │ │ │ └── readme.md
│ │ │ ├── preprocessors/
│ │ │ │ ├── __init__.py
│ │ │ │ └── default_preprocessor.py
│ │ │ └── resampling/
│ │ │ ├── __init__.py
│ │ │ ├── default_resampling.py
│ │ │ └── utils.py
│ │ ├── run/
│ │ │ ├── __init__.py
│ │ │ ├── load_pretrained_weights.py
│ │ │ └── run_training.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ └── integration_tests/
│ │ │ ├── __init__.py
│ │ │ ├── add_lowres_and_cascade.py
│ │ │ ├── cleanup_integration_test.py
│ │ │ ├── lsf_commands.sh
│ │ │ ├── prepare_integration_tests.sh
│ │ │ ├── readme.md
│ │ │ ├── run_integration_test.sh
│ │ │ ├── run_integration_test_bestconfig_inference.py
│ │ │ └── run_integration_test_trainingOnly_DDP.sh
│ │ ├── training/
│ │ │ ├── __init__.py
│ │ │ ├── data_augmentation/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── compute_initial_patch_size.py
│ │ │ │ └── custom_transforms/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cascade_transforms.py
│ │ │ │ ├── deep_supervision_donwsampling.py
│ │ │ │ ├── limited_length_multithreaded_augmenter.py
│ │ │ │ ├── manipulating_data_dict.py
│ │ │ │ ├── masking.py
│ │ │ │ ├── region_based_training.py
│ │ │ │ └── transforms_for_dummy_2d.py
│ │ │ ├── dataloading/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_data_loader.py
│ │ │ │ ├── data_loader_2d.py
│ │ │ │ ├── data_loader_3d.py
│ │ │ │ ├── nnunet_dataset.py
│ │ │ │ └── utils.py
│ │ │ ├── logging/
│ │ │ │ ├── __init__.py
│ │ │ │ └── nnunet_logger.py
│ │ │ ├── loss/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── compound_losses.py
│ │ │ │ ├── deep_supervision.py
│ │ │ │ ├── dice.py
│ │ │ │ └── robust_ce_loss.py
│ │ │ ├── lr_scheduler/
│ │ │ │ ├── __init__.py
│ │ │ │ └── polylr.py
│ │ │ └── nnUNetTrainer/
│ │ │ ├── __init__.py
│ │ │ ├── nnUNetTrainer.py
│ │ │ ├── nnUNetTrainer_swin.py
│ │ │ ├── variants/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── benchmarking/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerBenchmark_5epochs.py
│ │ │ │ │ └── nnUNetTrainerBenchmark_5epochs_noDataLoading.py
│ │ │ │ ├── data_augmentation/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerDA5.py
│ │ │ │ │ ├── nnUNetTrainerDAOrd0.py
│ │ │ │ │ ├── nnUNetTrainerNoDA.py
│ │ │ │ │ └── nnUNetTrainerNoMirroring.py
│ │ │ │ ├── loss/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerCELoss.py
│ │ │ │ │ ├── nnUNetTrainerDiceLoss.py
│ │ │ │ │ └── nnUNetTrainerTopkLoss.py
│ │ │ │ ├── lr_schedule/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nnUNetTrainerCosAnneal.py
│ │ │ │ ├── network_architecture/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerBN.py
│ │ │ │ │ └── nnUNetTrainerNoDeepSupervision.py
│ │ │ │ ├── optimizer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── nnUNetTrainerAdam.py
│ │ │ │ │ └── nnUNetTrainerAdan.py
│ │ │ │ ├── sampling/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nnUNetTrainer_probabilisticOversampling.py
│ │ │ │ └── training_length/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── nnUNetTrainer_Xepochs.py
│ │ │ │ └── nnUNetTrainer_Xepochs_NoMirroring.py
│ │ │ └── vit.py
│ │ └── utilities/
│ │ ├── __init__.py
│ │ ├── collate_outputs.py
│ │ ├── dataset_name_id_conversion.py
│ │ ├── ddp_allgather.py
│ │ ├── default_n_proc_DA.py
│ │ ├── file_path_utilities.py
│ │ ├── find_class_by_name.py
│ │ ├── get_network_from_plans.py
│ │ ├── helpers.py
│ │ ├── json_export.py
│ │ ├── label_handling/
│ │ │ ├── __init__.py
│ │ │ └── label_handling.py
│ │ ├── network_initialization.py
│ │ ├── overlay_plots.py
│ │ ├── plans_handling/
│ │ │ ├── __init__.py
│ │ │ └── plans_handler.py
│ │ └── utils.py
│ ├── nnunetv2.egg-info/
│ │ ├── PKG-INFO
│ │ ├── SOURCES.txt
│ │ ├── dependency_links.txt
│ │ ├── entry_points.txt
│ │ ├── requires.txt
│ │ └── top_level.txt
│ ├── pyproject.toml
│ └── setup.py
├── LICENSE
├── README.md
├── jsons/
│ ├── HNSCC.json
│ ├── Totalsegmentator_dataset.json
│ ├── __init__.py
│ ├── btcv.json
│ ├── dataset_LUNA16_0.json
│ ├── dataset_TCIAcovid19_0.json
│ ├── flare23.json
│ └── stoic21.json
├── models/
│ └── voco_head.py
├── optimizers/
│ ├── __init__.py
│ └── lr_scheduler.py
├── requirements.txt
├── train.sh
├── utils/
│ ├── __init__.py
│ ├── data_utils.py
│ ├── ops.py
│ └── utils.py
└── voco_train.py
================================================
FILE CONTENTS
================================================
================================================
FILE: Finetune/AbdomenAtlas/Atlas_test.py
================================================
# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from functools import partial
import nibabel as nib
import numpy as np
import torch
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
from dataset.dataloader_test import get_test_loader_Atlas
import SimpleITK as sitk
from monai.inferers import sliding_window_inference
# from monai.data import decollate_batch
from monai.losses import DiceCELoss
from monai.metrics import DiceMetric
from monai.networks.nets import SwinUNETR
from monai.transforms import *
from monai.utils.enums import MetricReduction
from monai.handlers import StatsHandler, from_engine
import matplotlib.pyplot as plt
from utils.utils import *
from PIL import Image
from monai import data, transforms
from monai.data import *
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
print('Setting resource limit:', str(resource.getrlimit(resource.RLIMIT_NOFILE)))
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '28890'
parser = argparse.ArgumentParser(description="Swin UNETR segmentation pipeline")
parser.add_argument(
"--test_data_path", default="./test_examples/AbdomenAtlasTest/", type=str, help="test_data_path")
parser.add_argument(
"--save_prediction_path", default="./test_examples/AbdomenAtlasPredict/", type=str, help="test_prediction_path")
parser.add_argument(
"--trained_pth", default="./runs/logs/model_val50_91.88.pt", type=str, help="trained checkpoint directory")
roi = 96
parser.add_argument("--use_normal_dataset", default=True, help="use monai Dataset class")
parser.add_argument("--feature_size", default=48, type=int, help="feature size")
parser.add_argument("--batch_size", default=1, type=int, help="number of batch size")
parser.add_argument("--sw_batch_size", default=4, type=int, help="number of sliding window batch size")
parser.add_argument("--infer_overlap", default=0.75, type=float, help="sliding window inference overlap")
parser.add_argument("--in_channels", default=1, type=int, help="number of input channels")
parser.add_argument("--out_channels", default=10, type=int, help="number of output channels")
parser.add_argument("--a_min", default=-175.0, type=float, help="a_min in ScaleIntensityRanged")
parser.add_argument("--a_max", default=250.0, type=float, help="a_max in ScaleIntensityRanged")
parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
parser.add_argument("--space_z", default=2.0, type=float, help="spacing in z direction")
parser.add_argument("--roi_x", default=roi, type=int, help="roi size in x direction")
parser.add_argument("--roi_y", default=roi, type=int, help="roi size in y direction")
parser.add_argument("--roi_z", default=roi, type=int, help="roi size in z direction")
parser.add_argument("--dropout_rate", default=0.0, type=float, help="dropout rate")
parser.add_argument("--distributed", action="store_true", help="start distributed training")
parser.add_argument("--workers", default=16, type=int, help="number of workers")
parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
parser.add_argument("--use_checkpoint", default=True, help="use gradient checkpointing to save memory")
def main():
args = parser.parse_args()
test_loader, test_transforms = get_test_loader_Atlas(args)
model = SwinUNETR(
img_size=(args.roi_x, args.roi_y, args.roi_z),
in_channels=args.in_channels,
out_channels=args.out_channels,
feature_size=args.feature_size,
drop_rate=0.0,
attn_drop_rate=0.0,
dropout_path_rate=0.0,
use_checkpoint=args.use_checkpoint,
use_v2=True
)
inf_size = [args.roi_x, args.roi_y, args.roi_z]
model_inferer = partial(
sliding_window_inference,
roi_size=inf_size,
sw_batch_size=args.sw_batch_size,
predictor=model,
overlap=args.infer_overlap,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dict = torch.load(args.trained_pth)["state_dict"]
model.load_state_dict(model_dict, strict=True)
model.eval()
model.to(device)
# enable cuDNN benchmark
torch.backends.cudnn.benchmark = True
post_transforms = Compose([EnsureTyped(keys=["pred"]),
Invertd(keys=["pred"],
transform=test_transforms,
orig_keys="image",
meta_keys="pred_meta_dict",
orig_meta_keys="image_meta_dict",
meta_key_postfix="meta_dict",
nearest_interp=True,
to_tensor=True),
AsDiscreted(keys="pred", argmax=False, to_onehot=args.out_channels),
])
with torch.no_grad():
for idx, batch_data in enumerate(test_loader):
torch.cuda.empty_cache()
# data = batch_data["image"].cuda()
data = batch_data["image"]
data = data.cuda()
with autocast(enabled=True):
if model_inferer is not None:
logits = model_inferer(data)
else:
logits = model(data)
batch_data['pred'] = logits.argmax(1)
batch_data = post_transforms(batch_data)
save_pred_dir = os.path.join(args.save_prediction_path, batch_data['name'][0], 'predictions')
check_dir(save_pred_dir)
organ_ls = ["aorta", "gall_bladder", "kidney_left", "kidney_right", "liver", "pancreas", "postcava",
"spleen", "stomach"]
for idx, organ_name in enumerate(organ_ls):
organ = batch_data['pred'][idx+1, :, :, :]
batch_data['organ'] = organ
save_transforms = Compose([SaveImaged(keys="organ", meta_keys="pred_meta_dict", output_dir=save_pred_dir,
separate_folder=False, folder_layout=None, output_postfix=organ_name,
resample=False)])
save_transforms(batch_data)
os.rename(os.path.join(save_pred_dir, 'ct_'+organ_name+'.nii.gz'), os.path.join(save_pred_dir, organ_name+'.nii.gz'))
if __name__ == "__main__":
main()
================================================
FILE: Finetune/AbdomenAtlas/Atlas_test.sh
================================================
test_data_path=./test_examples/AbdomenAtlasTest/
save_prediction_path=./test_examples/AbdomenAtlasPredict/
torchrun --master_port=21472 Atlas_test.py \
--test_data_path $test_data_path --save_prediction_path $save_prediction_path
================================================
FILE: Finetune/AbdomenAtlas/check.py
================================================
import torch
import os
from tqdm import tqdm
import numpy as np
from utils.utils import *
from PIL import Image
import matplotlib.pyplot as plt
def read(img, transpose=False):
img = sitk.ReadImage(img)
direction = img.GetDirection()
origin = img.GetOrigin()
Spacing = img.GetSpacing()
img = sitk.GetArrayFromImage(img)
if transpose:
img = img.transpose(1, 2, 0)
return img, direction, origin, Spacing
def vis():
path = 'D:\data\cache\Atlas'
ls = os.listdir(path)
num = 0
for i in ls:
data = torch.load(os.path.join(path, i))
img, lab = data['image'], data['label']
print(img.shape, lab.shape)
img = img[0].data.cpu().numpy()
#
# lab_bg = lab.sum(0).unsqueeze(0)
#
# la = lab.argmax(0).unsqueeze(0)
# la += 1
# la[lab_bg == 0] = 0
#
lab = lab[0].data.cpu().numpy()
cls_set = list(np.unique(lab))
print(cls_set)
h, w, c = img.shape
cmap = color_map()
for j in range(c):
im = img[:, :, j]
la = lab[:, :, j]
if len(list(np.unique(la))) > 5:
im = (255 * im).astype(np.uint8)
la = Image.fromarray(la.astype(np.uint8), mode='P')
la.putpalette(cmap)
num += 1
fig, axs = plt.subplots(1, 2, figsize=(16, 5))
axs[0].imshow(im, cmap='gray')
axs[0].axis("off")
axs[1].imshow(la)
axs[1].axis("off")
plt.tight_layout()
plt.show()
plt.close()
def check_original():
path = 'D:\data\cache\Atlas\BDMAP_00000870/'
img = read(path + 'ct.nii.gz', True)[0]
gt = read(path + 'label.nii.gz', True)[0]
label_path = path+'segmentations'
organ_ls = ["aorta", "gall_bladder", "kidney_left", "kidney_right", "liver", "pancreas", "postcava", "spleen",
"stomach"]
lab = []
for i in organ_ls:
la = read(label_path + '/' + i + '.nii.gz', True)[0]
la = np.expand_dims(la, 0)
lab.append(la)
labs = np.concatenate(lab, 0)
print(img.shape, labs.shape)
lab_bg = labs.sum(0)
print(np.unique(labs.sum(0)))
lab = labs.argmax(0)
lab += 1
lab[lab_bg == 0] = 0
print(np.unique(lab))
h, w, c = img.shape
cmap = color_map()
for j in range(c):
im = img[:, :, j]
la = lab[:, :, j]
g = gt[:, :, j]
if len(list(np.unique(la))) > 1:
im = (255 * im).astype(np.uint8)
la = Image.fromarray(la.astype(np.uint8), mode='P')
la.putpalette(cmap)
g = Image.fromarray(g.astype(np.uint8), mode='P')
g.putpalette(cmap)
fig, axs = plt.subplots(1, 3, figsize=(16, 5))
axs[0].imshow(im, cmap='gray')
axs[0].axis("off")
axs[1].imshow(la)
axs[1].axis("off")
axs[2].imshow(g)
axs[2].axis("off")
plt.tight_layout()
plt.show()
plt.close()
def exe(path):
root = '/project/medimgfmod/CT/AbdomenAtlasMini1.0/'
path = root + path
label_path = path + '/segmentations'
organ_ls = ["aorta", "gall_bladder", "kidney_left", "kidney_right", "liver", "pancreas", "postcava", "spleen", "stomach"]
lab = []
for i in organ_ls:
la, direction, origin, Spacing = read(label_path + '/' + i+'.nii.gz')
la = np.expand_dims(la, 0)
lab.append(la)
labs = np.concatenate(lab, 0)
lab_bg = labs.sum(0)
lab = labs.argmax(0)
lab += 1
lab[lab_bg == 0] = 0
new = sitk.GetImageFromArray(lab)
new.SetDirection(direction)
new.SetOrigin(origin)
new.SetSpacing(Spacing)
sitk.WriteImage(new, path + '/' + 'label.nii.gz')
print('save:', path + '/' + 'label.nii.gz')
def trans_lab(path):
organ_ls = ["aorta", "gall_bladder", "kidney_left", "kidney_right", "liver", "pancreas", "postcava", "spleen",
"stomach"]
lab = []
for i in organ_ls:
la = read(path + '/' + i + '.nii.gz', True)[0]
la = np.expand_dims(la, 0)
lab.append(la)
labs = np.concatenate(lab, 0)
lab_bg = labs.sum(0)
lab = labs.argmax(0)
lab += 1
lab[lab_bg == 0] = 0
return lab
def check_pred_vis():
path = 'test_examples/AbdomenAtlasPredict/BDMAP_A0000002/predictions'
path_temp = 'test_examples/AbdomenAtlasPredict_temp/BDMAP_A0000002/predictions'
pred, pred_temp = trans_lab(path), trans_lab(path_temp)
print(np.unique(pred), np.unique(pred_temp))
h, w, c = pred.shape
cmap = color_map()
for j in range(c):
la = pred[:, :, j]
g = pred_temp[:, :, j]
if len(list(np.unique(la))) > 5:
la = Image.fromarray(la.astype(np.uint8), mode='P')
la.putpalette(cmap)
g = Image.fromarray(g.astype(np.uint8), mode='P')
g.putpalette(cmap)
fig, axs = plt.subplots(1, 2, figsize=(16, 5))
axs[0].imshow(la)
axs[0].axis("off")
axs[1].imshow(g)
axs[1].axis("off")
plt.tight_layout()
plt.show()
plt.close()
def check_pred_acc():
root = '/project/medimgfmod/CT/AbdomenAtlasMini1.0/'
ls = os.listdir(root)
num = np.zeros(9)
from utils.utils import dice
all_dice = None
for i in ls:
path = root + i
label_path = path + '/segmentations'
lab = trans_lab(label_path)
pred_path = os.path.join('./test_examples/AbdomenAtlasPredict_train/' + i, 'predictions')
pred = trans_lab(pred_path)
dice_list_sub = []
for i in range(1, 10):
num[i - 1] += (np.sum(lab == i) > 0).astype(np.uint8)
organ_Dice = dice(pred == i, lab == i)
dice_list_sub.append(organ_Dice)
if all_dice is None:
all_dice = (np.asarray(dice_list_sub)).copy()
else:
all_dice = all_dice + np.asarray(dice_list_sub)
print("Organ Dice accumulate:", (all_dice / num), (all_dice / num).mean())
if __name__=='__main__':
# vis()
# check_pred_acc()
# the path to Atlas train
path = '/project/medimgfmod/CT/AbdomenAtlasMini1.0/'
ls = os.listdir(path)
import multiprocessing
with multiprocessing.Pool(20) as pool:
pool.map(exe, ls, 1)
================================================
FILE: Finetune/AbdomenAtlas/dataset/__init__.py
================================================
================================================
FILE: Finetune/AbdomenAtlas/dataset/dataloader_bdmap.py
================================================
from monai.transforms import *
import sys
import nibabel as nib
import os
import torch
import numpy as np
from typing import Optional, Union
import math
import pickle
from monai.data import *
from monai.data import DataLoader, Dataset, list_data_collate, DistributedSampler, CacheDataset, SmartCacheDataset
from monai.config import DtypeLike, KeysCollection
from monai.transforms.transform import MapTransform
from monai.transforms.io.array import LoadImage
from monai.utils import ensure_tuple, ensure_tuple_rep
from monai.data.image_reader import ImageReader
from monai.utils.enums import PostFix
from utils.data_trans import *
DEFAULT_POST_FIX = PostFix.meta()
# class map for the AbdomenAtlas 1.0 dataset
class_map_abdomenatlas_1_0 = {
0: "aorta",
1: "gall_bladder",
2: "kidney_left",
3: "kidney_right",
4: "liver",
5: "pancreas",
6: "postcava",
7: "spleen",
8: "stomach",
}
# class map for the AbdomenAtlas 1.1 dataset
class_map_abdomenatlas_1_1 = {
0: 'aorta',
1: 'gall_bladder',
2: 'kidney_left',
3: 'kidney_right',
4: 'liver',
5: 'pancreas',
6: 'postcava',
7: 'spleen',
8: 'stomach',
9: 'adrenal_gland_left',
10: 'adrenal_gland_right',
11: 'bladder',
12: 'celiac_truck',
13: 'colon',
14: 'duodenum',
15: 'esophagus',
16: 'femur_left',
17: 'femur_right',
18: 'hepatic_vessel',
19: 'intestine',
20: 'lung_left',
21: 'lung_right',
22: 'portal_vein_and_splenic_vein',
23: 'prostate',
24: 'rectum'
}
class Sampler(torch.utils.data.Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, make_even=True):
if num_replicas is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = torch.distributed.get_world_size()
if rank is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = torch.distributed.get_rank()
self.shuffle = shuffle
self.make_even = make_even
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
indices = list(range(len(self.dataset)))
self.valid_length = len(indices[self.rank: self.total_size: self.num_replicas])
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
if self.make_even:
if len(indices) < self.total_size:
if self.total_size - len(indices) < len(indices):
indices += indices[: (self.total_size - len(indices))]
else:
extra_ids = np.random.randint(low=0, high=len(indices), size=self.total_size - len(indices))
indices += [indices[ids] for ids in extra_ids]
assert len(indices) == self.total_size
indices = indices[self.rank: self.total_size: self.num_replicas]
self.num_samples = len(indices)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class LoadSelectedImaged(MapTransform):
"""
Custom transform to load a specific image and metadata using a flexible reader.
Args:
keys: Keys of the data dictionary to load selected images.
reader: Image reader object or string reference.
dtype: Data type for loaded images.
meta_keys: Keys to store metadata along with image data.
meta_key_postfix: Suffix for metadata keys.
overwriting: Flag to allow overwriting existing metadata.
image_only: Load only the image data (not metadata).
ensure_channel_first: Reshape image into channel-first format if necessary.
simple_keys: Use simplified, top-level data keys.
allow_missing_keys: If True, missing data keys are ignored
"""
def __init__(
self,
keys: KeysCollection,
reader: Optional[Union[ImageReader, str]] = None,
dtype: DtypeLike = np.float32,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = DEFAULT_POST_FIX,
overwriting: bool = False,
image_only: bool = False,
ensure_channel_first: bool = False,
simple_keys: bool = False,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(keys, allow_missing_keys)
self._loader = LoadImage(reader, image_only, dtype, ensure_channel_first, simple_keys, *args, **kwargs)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.overwriting = overwriting
def register(self, reader: ImageReader):
self._loader.register(reader)
def __call__(self, data, reader: Optional[ImageReader] = None):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
data = self._loader(d[key], reader)
if self._loader.image_only:
d[key] = data
else:
if not isinstance(data, (tuple, list)):
raise ValueError("loader must return a tuple or list (because image_only=False was used).")
d[key] = data[0]
if not isinstance(data[1], dict):
raise ValueError("metadata must be a dict.")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
if meta_key in d and not self.overwriting:
raise KeyError(f"Metadata with key {meta_key} already exists and overwriting=False.")
d[meta_key] = data[1]
return d
def get_loader_Atlas(args):
"""
Creates training transforms, constructs a dataset, and returns a dataloader.
Args:
args: Command line arguments containing dataset paths and hyperparameters.
"""
base_trans, random_trans = get_trans(args)
train_transforms = base_trans + random_trans
val_transforms = base_trans
# constructing training dataset
train_img = []
label_img = []
# train_lbl_parents = []
train_name = []
for item in args.dataset_list:
for line in open(os.path.join(args.data_txt_path, item + '.txt')):
name = line.strip().split('\t')[0]
train_img_path = os.path.join(args.data_dir, name, 'ct.nii.gz')
label_img_path = os.path.join(args.data_dir, name, 'label.nii.gz')
train_img.append(train_img_path)
label_img.append(label_img_path)
train_name.append(name)
data_dicts_train = [{'image': image, 'label': label, 'name': name}
for image, label, name in zip(train_img, label_img, train_name)]
print('train len {}'.format(len(data_dicts_train)))
if args.cache_dataset:
train_ds = PersistentDataset(data=data_dicts_train[:-50],
transform=train_transforms,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
cache_dir=args.cache_dir)
val_ds = PersistentDataset(data=data_dicts_train[-50:],
transform=val_transforms,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
cache_dir=args.cache_dir)
else:
train_ds = Dataset(data=data_dicts_train[:-50], transform=Compose(train_transforms))
val_ds = Dataset(data=data_dicts_train[-50:], transform=Compose(val_transforms))
# distributed sampler settings
train_sampler = Sampler(train_ds) if args.distributed else None
train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True,
collate_fn=list_data_collate, sampler=train_sampler)
val_sampler = Sampler(val_ds, shuffle=False) if args.distributed else None
val_loader = DataLoader(
val_ds, batch_size=1, shuffle=False, num_workers=args.workers, sampler=val_sampler, pin_memory=True
)
loader = [train_loader, val_loader]
return loader
class Filter_Atlas_Labels(MapTransform):
"""Filter unsed label.
"""
def __call__(self, data):
d = dict(data)
for key in self.keys:
lab_bg = d[key].clone().sum(0).unsqueeze(0)
la = d[key].argmax(0).unsqueeze(0)
la += 1
la[lab_bg == 0] = 0
d[key] = la.float()
return d
================================================
FILE: Finetune/AbdomenAtlas/dataset/dataloader_test.py
================================================
from monai.transforms import *
import sys
import nibabel as nib
import os
import torch
import numpy as np
from typing import Optional, Union
import math
import pickle
from monai.data import *
from monai.data import DataLoader, Dataset, list_data_collate, DistributedSampler, CacheDataset, SmartCacheDataset
from monai.config import DtypeLike, KeysCollection
from monai.transforms.transform import MapTransform
from monai.transforms.io.array import LoadImage
from monai.utils import ensure_tuple, ensure_tuple_rep
from monai.data.image_reader import ImageReader
from monai.utils.enums import PostFix
from utils.data_trans import *
DEFAULT_POST_FIX = PostFix.meta()
# class map for the AbdomenAtlas 1.0 dataset
class_map_abdomenatlas_1_0 = {
0: "aorta",
1: "gall_bladder",
2: "kidney_left",
3: "kidney_right",
4: "liver",
5: "pancreas",
6: "postcava",
7: "spleen",
8: "stomach",
}
# class map for the AbdomenAtlas 1.1 dataset
class_map_abdomenatlas_1_1 = {
0: 'aorta',
1: 'gall_bladder',
2: 'kidney_left',
3: 'kidney_right',
4: 'liver',
5: 'pancreas',
6: 'postcava',
7: 'spleen',
8: 'stomach',
9: 'adrenal_gland_left',
10: 'adrenal_gland_right',
11: 'bladder',
12: 'celiac_truck',
13: 'colon',
14: 'duodenum',
15: 'esophagus',
16: 'femur_left',
17: 'femur_right',
18: 'hepatic_vessel',
19: 'intestine',
20: 'lung_left',
21: 'lung_right',
22: 'portal_vein_and_splenic_vein',
23: 'prostate',
24: 'rectum'
}
class Sampler(torch.utils.data.Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, make_even=True):
if num_replicas is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = torch.distributed.get_world_size()
if rank is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = torch.distributed.get_rank()
self.shuffle = shuffle
self.make_even = make_even
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
indices = list(range(len(self.dataset)))
self.valid_length = len(indices[self.rank: self.total_size: self.num_replicas])
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
if self.make_even:
if len(indices) < self.total_size:
if self.total_size - len(indices) < len(indices):
indices += indices[: (self.total_size - len(indices))]
else:
extra_ids = np.random.randint(low=0, high=len(indices), size=self.total_size - len(indices))
indices += [indices[ids] for ids in extra_ids]
assert len(indices) == self.total_size
indices = indices[self.rank: self.total_size: self.num_replicas]
self.num_samples = len(indices)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class LoadSelectedImaged(MapTransform):
"""
Custom transform to load a specific image and metadata using a flexible reader.
Args:
keys: Keys of the data dictionary to load selected images.
reader: Image reader object or string reference.
dtype: Data type for loaded images.
meta_keys: Keys to store metadata along with image data.
meta_key_postfix: Suffix for metadata keys.
overwriting: Flag to allow overwriting existing metadata.
image_only: Load only the image data (not metadata).
ensure_channel_first: Reshape image into channel-first format if necessary.
simple_keys: Use simplified, top-level data keys.
allow_missing_keys: If True, missing data keys are ignored
"""
def __init__(
self,
keys: KeysCollection,
reader: Optional[Union[ImageReader, str]] = None,
dtype: DtypeLike = np.float32,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = DEFAULT_POST_FIX,
overwriting: bool = False,
image_only: bool = False,
ensure_channel_first: bool = False,
simple_keys: bool = False,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(keys, allow_missing_keys)
self._loader = LoadImage(reader, image_only, dtype, ensure_channel_first, simple_keys, *args, **kwargs)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.overwriting = overwriting
def register(self, reader: ImageReader):
self._loader.register(reader)
def __call__(self, data, reader: Optional[ImageReader] = None):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
data = self._loader(d[key], reader)
if self._loader.image_only:
d[key] = data
else:
if not isinstance(data, (tuple, list)):
raise ValueError("loader must return a tuple or list (because image_only=False was used).")
d[key] = data[0]
if not isinstance(data[1], dict):
raise ValueError("metadata must be a dict.")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
if meta_key in d and not self.overwriting:
raise KeyError(f"Metadata with key {meta_key} already exists and overwriting=False.")
d[meta_key] = data[1]
return d
def get_test_loader_Atlas(args):
"""
Creates training transforms, constructs a dataset, and returns a dataloader.
Args:
args: Command line arguments containing dataset paths and hyperparameters.
"""
test_transforms = transforms.Compose([
LoadImaged(keys=["image"]),
EnsureChannelFirstd(keys=["image"]),
Orientationd(keys=["image"], axcodes="RAS"),
Spacingd(keys=["image"], pixdim=(args.space_x, args.space_y, args.space_z),
mode=("bilinear")),
ScaleIntensityRanged(
keys=["image"],
a_min=args.a_min,
a_max=args.a_max,
b_min=0.0,
b_max=1.0,
clip=True,
),
CropForegroundd(keys=["image"], source_key="image"),
SpatialPadd(keys=["image"], spatial_size=(args.roi_x, args.roi_y, args.roi_z),
mode='constant'),
])
# constructing training dataset
test_img = []
test_name = []
dataset_list = os.listdir(args.test_data_path)
for item in dataset_list:
name = item
test_img_path = os.path.join(args.test_data_path, name, 'ct.nii.gz')
test_img.append(test_img_path)
test_name.append(name)
data_dicts_test = [{'image': image, 'name': name}
for image, name in zip(test_img, test_name)]
print('test len {}'.format(len(data_dicts_test)))
test_ds = Dataset(data=data_dicts_test, transform=test_transforms)
test_loader = DataLoader(
test_ds, batch_size=1, shuffle=False, num_workers=args.workers, sampler=None, pin_memory=True
)
return test_loader, test_transforms
================================================
FILE: Finetune/AbdomenAtlas/dataset/dataset_list/AbdomenAtlas1.0.txt
================================================
BDMAP_00000001
BDMAP_00000002
BDMAP_00000003
BDMAP_00000004
BDMAP_00000005
BDMAP_00000006
BDMAP_00000007
BDMAP_00000008
BDMAP_00000009
BDMAP_00000010
BDMAP_00000011
BDMAP_00000012
BDMAP_00000013
BDMAP_00000014
BDMAP_00000015
BDMAP_00000016
BDMAP_00000017
BDMAP_00000018
BDMAP_00000019
BDMAP_00000020
BDMAP_00000021
BDMAP_00000022
BDMAP_00000023
BDMAP_00000024
BDMAP_00000025
BDMAP_00000026
BDMAP_00000027
BDMAP_00000028
BDMAP_00000029
BDMAP_00000030
BDMAP_00000031
BDMAP_00000032
BDMAP_00000033
BDMAP_00000034
BDMAP_00000035
BDMAP_00000036
BDMAP_00000037
BDMAP_00000038
BDMAP_00000039
BDMAP_00000040
BDMAP_00000041
BDMAP_00000042
BDMAP_00000043
BDMAP_00000044
BDMAP_00000045
BDMAP_00000046
BDMAP_00000047
BDMAP_00000048
BDMAP_00000049
BDMAP_00000050
BDMAP_00000051
BDMAP_00000052
BDMAP_00000053
BDMAP_00000054
BDMAP_00000055
BDMAP_00000056
BDMAP_00000057
BDMAP_00000058
BDMAP_00000059
BDMAP_00000060
BDMAP_00000061
BDMAP_00000062
BDMAP_00000063
BDMAP_00000064
BDMAP_00000065
BDMAP_00000066
BDMAP_00000067
BDMAP_00000068
BDMAP_00000069
BDMAP_00000070
BDMAP_00000071
BDMAP_00000072
BDMAP_00000073
BDMAP_00000074
BDMAP_00000075
BDMAP_00000076
BDMAP_00000077
BDMAP_00000078
BDMAP_00000079
BDMAP_00000080
BDMAP_00000081
BDMAP_00000082
BDMAP_00000083
BDMAP_00000084
BDMAP_00000085
BDMAP_00000086
BDMAP_00000087
BDMAP_00000088
BDMAP_00000089
BDMAP_00000090
BDMAP_00000091
BDMAP_00000092
BDMAP_00000093
BDMAP_00000094
BDMAP_00000095
BDMAP_00000096
BDMAP_00000097
BDMAP_00000098
BDMAP_00000099
BDMAP_00000100
BDMAP_00000101
BDMAP_00000102
BDMAP_00000103
BDMAP_00000104
BDMAP_00000105
BDMAP_00000106
BDMAP_00000107
BDMAP_00000108
BDMAP_00000109
BDMAP_00000110
BDMAP_00000111
BDMAP_00000112
BDMAP_00000113
BDMAP_00000114
BDMAP_00000115
BDMAP_00000116
BDMAP_00000117
BDMAP_00000118
BDMAP_00000119
BDMAP_00000120
BDMAP_00000121
BDMAP_00000122
BDMAP_00000123
BDMAP_00000124
BDMAP_00000125
BDMAP_00000126
BDMAP_00000127
BDMAP_00000128
BDMAP_00000129
BDMAP_00000130
BDMAP_00000131
BDMAP_00000132
BDMAP_00000133
BDMAP_00000134
BDMAP_00000135
BDMAP_00000136
BDMAP_00000137
BDMAP_00000138
BDMAP_00000139
BDMAP_00000140
BDMAP_00000141
BDMAP_00000142
BDMAP_00000143
BDMAP_00000144
BDMAP_00000145
BDMAP_00000146
BDMAP_00000147
BDMAP_00000148
BDMAP_00000149
BDMAP_00000150
BDMAP_00000151
BDMAP_00000152
BDMAP_00000153
BDMAP_00000154
BDMAP_00000155
BDMAP_00000156
BDMAP_00000157
BDMAP_00000158
BDMAP_00000159
BDMAP_00000160
BDMAP_00000161
BDMAP_00000162
BDMAP_00000163
BDMAP_00000164
BDMAP_00000165
BDMAP_00000166
BDMAP_00000167
BDMAP_00000168
BDMAP_00000169
BDMAP_00000170
BDMAP_00000171
BDMAP_00000172
BDMAP_00000173
BDMAP_00000174
BDMAP_00000175
BDMAP_00000176
BDMAP_00000177
BDMAP_00000178
BDMAP_00000179
BDMAP_00000180
BDMAP_00000181
BDMAP_00000182
BDMAP_00000183
BDMAP_00000184
BDMAP_00000185
BDMAP_00000186
BDMAP_00000187
BDMAP_00000188
BDMAP_00000189
BDMAP_00000190
BDMAP_00000191
BDMAP_00000192
BDMAP_00000193
BDMAP_00000194
BDMAP_00000195
BDMAP_00000196
BDMAP_00000197
BDMAP_00000198
BDMAP_00000199
BDMAP_00000200
BDMAP_00000201
BDMAP_00000202
BDMAP_00000203
BDMAP_00000204
BDMAP_00000205
BDMAP_00000206
BDMAP_00000207
BDMAP_00000208
BDMAP_00000209
BDMAP_00000210
BDMAP_00000211
BDMAP_00000212
BDMAP_00000213
BDMAP_00000214
BDMAP_00000215
BDMAP_00000216
BDMAP_00000217
BDMAP_00000218
BDMAP_00000219
BDMAP_00000220
BDMAP_00000221
BDMAP_00000222
BDMAP_00000223
BDMAP_00000224
BDMAP_00000225
BDMAP_00000226
BDMAP_00000227
BDMAP_00000228
BDMAP_00000229
BDMAP_00000230
BDMAP_00000231
BDMAP_00000232
BDMAP_00000233
BDMAP_00000234
BDMAP_00000235
BDMAP_00000236
BDMAP_00000237
BDMAP_00000238
BDMAP_00000239
BDMAP_00000240
BDMAP_00000241
BDMAP_00000242
BDMAP_00000243
BDMAP_00000244
BDMAP_00000245
BDMAP_00000246
BDMAP_00000247
BDMAP_00000248
BDMAP_00000249
BDMAP_00000250
BDMAP_00000251
BDMAP_00000252
BDMAP_00000253
BDMAP_00000254
BDMAP_00000255
BDMAP_00000256
BDMAP_00000257
BDMAP_00000258
BDMAP_00000259
BDMAP_00000260
BDMAP_00000261
BDMAP_00000262
BDMAP_00000263
BDMAP_00000264
BDMAP_00000265
BDMAP_00000266
BDMAP_00000267
BDMAP_00000268
BDMAP_00000269
BDMAP_00000270
BDMAP_00000271
BDMAP_00000272
BDMAP_00000273
BDMAP_00000274
BDMAP_00000275
BDMAP_00000276
BDMAP_00000277
BDMAP_00000278
BDMAP_00000279
BDMAP_00000280
BDMAP_00000281
BDMAP_00000282
BDMAP_00000283
BDMAP_00000284
BDMAP_00000285
BDMAP_00000286
BDMAP_00000287
BDMAP_00000288
BDMAP_00000289
BDMAP_00000290
BDMAP_00000291
BDMAP_00000292
BDMAP_00000293
BDMAP_00000294
BDMAP_00000295
BDMAP_00000296
BDMAP_00000297
BDMAP_00000298
BDMAP_00000299
BDMAP_00000300
BDMAP_00000301
BDMAP_00000302
BDMAP_00000303
BDMAP_00000304
BDMAP_00000305
BDMAP_00000306
BDMAP_00000307
BDMAP_00000308
BDMAP_00000309
BDMAP_00000310
BDMAP_00000311
BDMAP_00000312
BDMAP_00000313
BDMAP_00000314
BDMAP_00000315
BDMAP_00000316
BDMAP_00000317
BDMAP_00000318
BDMAP_00000319
BDMAP_00000320
BDMAP_00000321
BDMAP_00000322
BDMAP_00000323
BDMAP_00000324
BDMAP_00000325
BDMAP_00000326
BDMAP_00000327
BDMAP_00000328
BDMAP_00000329
BDMAP_00000330
BDMAP_00000331
BDMAP_00000332
BDMAP_00000333
BDMAP_00000334
BDMAP_00000335
BDMAP_00000336
BDMAP_00000337
BDMAP_00000338
BDMAP_00000339
BDMAP_00000340
BDMAP_00000341
BDMAP_00000342
BDMAP_00000343
BDMAP_00000344
BDMAP_00000345
BDMAP_00000346
BDMAP_00000347
BDMAP_00000348
BDMAP_00000349
BDMAP_00000350
BDMAP_00000351
BDMAP_00000352
BDMAP_00000353
BDMAP_00000354
BDMAP_00000355
BDMAP_00000356
BDMAP_00000357
BDMAP_00000358
BDMAP_00000359
BDMAP_00000360
BDMAP_00000361
BDMAP_00000362
BDMAP_00000363
BDMAP_00000364
BDMAP_00000365
BDMAP_00000366
BDMAP_00000367
BDMAP_00000368
BDMAP_00000369
BDMAP_00000370
BDMAP_00000371
BDMAP_00000372
BDMAP_00000373
BDMAP_00000374
BDMAP_00000375
BDMAP_00000376
BDMAP_00000377
BDMAP_00000378
BDMAP_00000379
BDMAP_00000380
BDMAP_00000381
BDMAP_00000382
BDMAP_00000383
BDMAP_00000384
BDMAP_00000385
BDMAP_00000386
BDMAP_00000387
BDMAP_00000388
BDMAP_00000389
BDMAP_00000390
BDMAP_00000391
BDMAP_00000392
BDMAP_00000393
BDMAP_00000394
BDMAP_00000395
BDMAP_00000396
BDMAP_00000397
BDMAP_00000398
BDMAP_00000399
BDMAP_00000400
BDMAP_00000401
BDMAP_00000402
BDMAP_00000403
BDMAP_00000404
BDMAP_00000405
BDMAP_00000406
BDMAP_00000407
BDMAP_00000408
BDMAP_00000409
BDMAP_00000410
BDMAP_00000411
BDMAP_00000412
BDMAP_00000413
BDMAP_00000414
BDMAP_00000415
BDMAP_00000416
BDMAP_00000417
BDMAP_00000418
BDMAP_00000419
BDMAP_00000420
BDMAP_00000421
BDMAP_00000422
BDMAP_00000423
BDMAP_00000424
BDMAP_00000425
BDMAP_00000426
BDMAP_00000427
BDMAP_00000428
BDMAP_00000429
BDMAP_00000430
BDMAP_00000431
BDMAP_00000432
BDMAP_00000433
BDMAP_00000434
BDMAP_00000435
BDMAP_00000436
BDMAP_00000437
BDMAP_00000438
BDMAP_00000439
BDMAP_00000440
BDMAP_00000441
BDMAP_00000442
BDMAP_00000443
BDMAP_00000444
BDMAP_00000445
BDMAP_00000446
BDMAP_00000447
BDMAP_00000448
BDMAP_00000449
BDMAP_00000450
BDMAP_00000451
BDMAP_00000452
BDMAP_00000453
BDMAP_00000454
BDMAP_00000455
BDMAP_00000456
BDMAP_00000457
BDMAP_00000458
BDMAP_00000459
BDMAP_00000460
BDMAP_00000461
BDMAP_00000462
BDMAP_00000463
BDMAP_00000464
BDMAP_00000465
BDMAP_00000466
BDMAP_00000467
BDMAP_00000468
BDMAP_00000469
BDMAP_00000470
BDMAP_00000471
BDMAP_00000472
BDMAP_00000473
BDMAP_00000474
BDMAP_00000475
BDMAP_00000476
BDMAP_00000477
BDMAP_00000478
BDMAP_00000479
BDMAP_00000480
BDMAP_00000481
BDMAP_00000482
BDMAP_00000483
BDMAP_00000484
BDMAP_00000485
BDMAP_00000486
BDMAP_00000487
BDMAP_00000488
BDMAP_00000489
BDMAP_00000490
BDMAP_00000491
BDMAP_00000492
BDMAP_00000493
BDMAP_00000494
BDMAP_00000495
BDMAP_00000496
BDMAP_00000497
BDMAP_00000498
BDMAP_00000499
BDMAP_00000500
BDMAP_00000501
BDMAP_00000502
BDMAP_00000503
BDMAP_00000504
BDMAP_00000505
BDMAP_00000506
BDMAP_00000507
BDMAP_00000508
BDMAP_00000509
BDMAP_00000510
BDMAP_00000511
BDMAP_00000512
BDMAP_00000513
BDMAP_00000514
BDMAP_00000515
BDMAP_00000516
BDMAP_00000517
BDMAP_00000518
BDMAP_00000519
BDMAP_00000520
BDMAP_00000521
BDMAP_00000522
BDMAP_00000523
BDMAP_00000524
BDMAP_00000525
BDMAP_00000526
BDMAP_00000527
BDMAP_00000528
BDMAP_00000529
BDMAP_00000530
BDMAP_00000531
BDMAP_00000532
BDMAP_00000533
BDMAP_00000534
BDMAP_00000535
BDMAP_00000536
BDMAP_00000537
BDMAP_00000538
BDMAP_00000539
BDMAP_00000540
BDMAP_00000541
BDMAP_00000542
BDMAP_00000543
BDMAP_00000544
BDMAP_00000545
BDMAP_00000546
BDMAP_00000547
BDMAP_00000548
BDMAP_00000549
BDMAP_00000550
BDMAP_00000551
BDMAP_00000552
BDMAP_00000553
BDMAP_00000554
BDMAP_00000555
BDMAP_00000556
BDMAP_00000557
BDMAP_00000558
BDMAP_00000559
BDMAP_00000560
BDMAP_00000561
BDMAP_00000562
BDMAP_00000563
BDMAP_00000564
BDMAP_00000565
BDMAP_00000566
BDMAP_00000567
BDMAP_00000568
BDMAP_00000569
BDMAP_00000570
BDMAP_00000571
BDMAP_00000572
BDMAP_00000573
BDMAP_00000574
BDMAP_00000575
BDMAP_00000576
BDMAP_00000577
BDMAP_00000578
BDMAP_00000579
BDMAP_00000580
BDMAP_00000581
BDMAP_00000582
BDMAP_00000583
BDMAP_00000584
BDMAP_00000585
BDMAP_00000586
BDMAP_00000587
BDMAP_00000588
BDMAP_00000589
BDMAP_00000590
BDMAP_00000591
BDMAP_00000592
BDMAP_00000593
BDMAP_00000594
BDMAP_00000595
BDMAP_00000596
BDMAP_00000597
BDMAP_00000598
BDMAP_00000599
BDMAP_00000600
BDMAP_00000601
BDMAP_00000602
BDMAP_00000603
BDMAP_00000604
BDMAP_00000605
BDMAP_00000606
BDMAP_00000607
BDMAP_00000608
BDMAP_00000609
BDMAP_00000610
BDMAP_00000611
BDMAP_00000612
BDMAP_00000613
BDMAP_00000614
BDMAP_00000615
BDMAP_00000616
BDMAP_00000617
BDMAP_00000618
BDMAP_00000619
BDMAP_00000620
BDMAP_00000621
BDMAP_00000622
BDMAP_00000623
BDMAP_00000624
BDMAP_00000625
BDMAP_00000626
BDMAP_00000627
BDMAP_00000628
BDMAP_00000629
BDMAP_00000630
BDMAP_00000631
BDMAP_00000632
BDMAP_00000633
BDMAP_00000634
BDMAP_00000635
BDMAP_00000636
BDMAP_00000637
BDMAP_00000638
BDMAP_00000639
BDMAP_00000640
BDMAP_00000641
BDMAP_00000642
BDMAP_00000643
BDMAP_00000644
BDMAP_00000645
BDMAP_00000646
BDMAP_00000647
BDMAP_00000648
BDMAP_00000649
BDMAP_00000650
BDMAP_00000651
BDMAP_00000652
BDMAP_00000653
BDMAP_00000654
BDMAP_00000655
BDMAP_00000656
BDMAP_00000657
BDMAP_00000658
BDMAP_00000659
BDMAP_00000660
BDMAP_00000661
BDMAP_00000662
BDMAP_00000663
BDMAP_00000664
BDMAP_00000665
BDMAP_00000666
BDMAP_00000667
BDMAP_00000668
BDMAP_00000669
BDMAP_00000670
BDMAP_00000671
BDMAP_00000672
BDMAP_00000673
BDMAP_00000674
BDMAP_00000675
BDMAP_00000676
BDMAP_00000677
BDMAP_00000678
BDMAP_00000679
BDMAP_00000680
BDMAP_00000681
BDMAP_00000682
BDMAP_00000683
BDMAP_00000684
BDMAP_00000685
BDMAP_00000686
BDMAP_00000687
BDMAP_00000688
BDMAP_00000689
BDMAP_00000690
BDMAP_00000691
BDMAP_00000692
BDMAP_00000693
BDMAP_00000694
BDMAP_00000695
BDMAP_00000696
BDMAP_00000697
BDMAP_00000698
BDMAP_00000699
BDMAP_00000700
BDMAP_00000701
BDMAP_00000702
BDMAP_00000703
BDMAP_00000704
BDMAP_00000705
BDMAP_00000706
BDMAP_00000707
BDMAP_00000708
BDMAP_00000709
BDMAP_00000710
BDMAP_00000711
BDMAP_00000712
BDMAP_00000713
BDMAP_00000714
BDMAP_00000715
BDMAP_00000716
BDMAP_00000717
BDMAP_00000718
BDMAP_00000719
BDMAP_00000720
BDMAP_00000721
BDMAP_00000722
BDMAP_00000723
BDMAP_00000724
BDMAP_00000725
BDMAP_00000726
BDMAP_00000727
BDMAP_00000728
BDMAP_00000729
BDMAP_00000730
BDMAP_00000731
BDMAP_00000732
BDMAP_00000733
BDMAP_00000734
BDMAP_00000735
BDMAP_00000736
BDMAP_00000737
BDMAP_00000738
BDMAP_00000739
BDMAP_00000740
BDMAP_00000741
BDMAP_00000742
BDMAP_00000743
BDMAP_00000744
BDMAP_00000745
BDMAP_00000746
BDMAP_00000747
BDMAP_00000748
BDMAP_00000749
BDMAP_00000750
BDMAP_00000751
BDMAP_00000752
BDMAP_00000753
BDMAP_00000754
BDMAP_00000755
BDMAP_00000756
BDMAP_00000757
BDMAP_00000758
BDMAP_00000759
BDMAP_00000760
BDMAP_00000761
BDMAP_00000762
BDMAP_00000763
BDMAP_00000764
BDMAP_00000765
BDMAP_00000766
BDMAP_00000767
BDMAP_00000768
BDMAP_00000769
BDMAP_00000770
BDMAP_00000771
BDMAP_00000772
BDMAP_00000773
BDMAP_00000774
BDMAP_00000775
BDMAP_00000776
BDMAP_00000777
BDMAP_00000778
BDMAP_00000779
BDMAP_00000780
BDMAP_00000781
BDMAP_00000782
BDMAP_00000783
BDMAP_00000784
BDMAP_00000785
BDMAP_00000786
BDMAP_00000787
BDMAP_00000788
BDMAP_00000789
BDMAP_00000790
BDMAP_00000791
BDMAP_00000792
BDMAP_00000793
BDMAP_00000794
BDMAP_00000795
BDMAP_00000796
BDMAP_00000797
BDMAP_00000798
BDMAP_00000799
BDMAP_00000800
BDMAP_00000801
BDMAP_00000802
BDMAP_00000803
BDMAP_00000804
BDMAP_00000805
BDMAP_00000806
BDMAP_00000807
BDMAP_00000808
BDMAP_00000809
BDMAP_00000810
BDMAP_00000811
BDMAP_00000812
BDMAP_00000813
BDMAP_00000814
BDMAP_00000815
BDMAP_00000816
BDMAP_00000817
BDMAP_00000818
BDMAP_00000819
BDMAP_00000820
BDMAP_00000821
BDMAP_00000822
BDMAP_00000823
BDMAP_00000824
BDMAP_00000825
BDMAP_00000826
BDMAP_00000827
BDMAP_00000828
BDMAP_00000829
BDMAP_00000830
BDMAP_00000831
BDMAP_00000832
BDMAP_00000833
BDMAP_00000834
BDMAP_00000835
BDMAP_00000836
BDMAP_00000837
BDMAP_00000838
BDMAP_00000839
BDMAP_00000840
BDMAP_00000841
BDMAP_00000842
BDMAP_00000843
BDMAP_00000844
BDMAP_00000845
BDMAP_00000846
BDMAP_00000847
BDMAP_00000848
BDMAP_00000849
BDMAP_00000850
BDMAP_00000851
BDMAP_00000852
BDMAP_00000853
BDMAP_00000854
BDMAP_00000855
BDMAP_00000856
BDMAP_00000857
BDMAP_00000858
BDMAP_00000859
BDMAP_00000860
BDMAP_00000861
BDMAP_00000862
BDMAP_00000863
BDMAP_00000864
BDMAP_00000865
BDMAP_00000866
BDMAP_00000867
BDMAP_00000868
BDMAP_00000869
BDMAP_00000870
BDMAP_00000871
BDMAP_00000872
BDMAP_00000873
BDMAP_00000874
BDMAP_00000875
BDMAP_00000876
BDMAP_00000877
BDMAP_00000878
BDMAP_00000879
BDMAP_00000880
BDMAP_00000881
BDMAP_00000882
BDMAP_00000883
BDMAP_00000884
BDMAP_00000885
BDMAP_00000886
BDMAP_00000887
BDMAP_00000888
BDMAP_00000889
BDMAP_00000890
BDMAP_00000891
BDMAP_00000892
BDMAP_00000893
BDMAP_00000894
BDMAP_00000895
BDMAP_00000896
BDMAP_00000897
BDMAP_00000898
BDMAP_00000899
BDMAP_00000900
BDMAP_00000901
BDMAP_00000902
BDMAP_00000903
BDMAP_00000904
BDMAP_00000905
BDMAP_00000906
BDMAP_00000907
BDMAP_00000908
BDMAP_00000909
BDMAP_00000910
BDMAP_00000911
BDMAP_00000912
BDMAP_00000913
BDMAP_00000914
BDMAP_00000915
BDMAP_00000916
BDMAP_00000917
BDMAP_00000918
BDMAP_00000919
BDMAP_00000920
BDMAP_00000921
BDMAP_00000922
BDMAP_00000923
BDMAP_00000924
BDMAP_00000925
BDMAP_00000926
BDMAP_00000927
BDMAP_00000928
BDMAP_00000929
BDMAP_00000930
BDMAP_00000931
BDMAP_00000932
BDMAP_00000933
BDMAP_00000934
BDMAP_00000935
BDMAP_00000936
BDMAP_00000937
BDMAP_00000938
BDMAP_00000939
BDMAP_00000940
BDMAP_00000941
BDMAP_00000942
BDMAP_00000943
BDMAP_00000944
BDMAP_00000945
BDMAP_00000946
BDMAP_00000947
BDMAP_00000948
BDMAP_00000949
BDMAP_00000950
BDMAP_00000951
BDMAP_00000952
BDMAP_00000953
BDMAP_00000954
BDMAP_00000955
BDMAP_00000956
BDMAP_00000957
BDMAP_00000958
BDMAP_00000959
BDMAP_00000960
BDMAP_00000961
BDMAP_00000962
BDMAP_00000963
BDMAP_00000964
BDMAP_00000965
BDMAP_00000966
BDMAP_00000967
BDMAP_00000968
BDMAP_00000969
BDMAP_00000970
BDMAP_00000971
BDMAP_00000972
BDMAP_00000973
BDMAP_00000974
BDMAP_00000975
BDMAP_00000976
BDMAP_00000977
BDMAP_00000978
BDMAP_00000979
BDMAP_00000980
BDMAP_00000981
BDMAP_00000982
BDMAP_00000983
BDMAP_00000984
BDMAP_00000985
BDMAP_00000986
BDMAP_00000987
BDMAP_00000988
BDMAP_00000989
BDMAP_00000990
BDMAP_00000991
BDMAP_00000992
BDMAP_00000993
BDMAP_00000994
BDMAP_00000995
BDMAP_00000996
BDMAP_00000997
BDMAP_00000998
BDMAP_00000999
BDMAP_00001000
BDMAP_00001001
BDMAP_00001002
BDMAP_00001003
BDMAP_00001004
BDMAP_00001005
BDMAP_00001006
BDMAP_00001007
BDMAP_00001008
BDMAP_00001009
BDMAP_00001010
BDMAP_00001011
BDMAP_00001012
BDMAP_00001013
BDMAP_00001014
BDMAP_00001015
BDMAP_00001016
BDMAP_00001017
BDMAP_00001018
BDMAP_00001019
BDMAP_00001020
BDMAP_00001021
BDMAP_00001022
BDMAP_00001023
BDMAP_00001024
BDMAP_00001025
BDMAP_00001026
BDMAP_00001027
BDMAP_00001028
BDMAP_00001029
BDMAP_00001030
BDMAP_00001031
BDMAP_00001032
BDMAP_00001033
BDMAP_00001034
BDMAP_00001035
BDMAP_00001036
BDMAP_00001037
BDMAP_00001038
BDMAP_00001039
BDMAP_00001040
BDMAP_00001041
BDMAP_00001042
BDMAP_00001043
BDMAP_00001044
BDMAP_00001045
BDMAP_00001046
BDMAP_00001047
BDMAP_00001048
BDMAP_00001049
BDMAP_00001050
BDMAP_00001051
BDMAP_00001052
BDMAP_00001053
BDMAP_00001054
BDMAP_00001055
BDMAP_00001056
BDMAP_00001057
BDMAP_00001058
BDMAP_00001059
BDMAP_00001060
BDMAP_00001061
BDMAP_00001062
BDMAP_00001063
BDMAP_00001064
BDMAP_00001065
BDMAP_00001066
BDMAP_00001067
BDMAP_00001068
BDMAP_00001069
BDMAP_00001070
BDMAP_00001071
BDMAP_00001072
BDMAP_00001073
BDMAP_00001074
BDMAP_00001075
BDMAP_00001076
BDMAP_00001077
BDMAP_00001078
BDMAP_00001079
BDMAP_00001080
BDMAP_00001081
BDMAP_00001082
BDMAP_00001083
BDMAP_00001084
BDMAP_00001085
BDMAP_00001086
BDMAP_00001087
BDMAP_00001088
BDMAP_00001089
BDMAP_00001090
BDMAP_00001091
BDMAP_00001092
BDMAP_00001093
BDMAP_00001094
BDMAP_00001095
BDMAP_00001096
BDMAP_00001097
BDMAP_00001098
BDMAP_00001099
BDMAP_00001100
BDMAP_00001101
BDMAP_00001102
BDMAP_00001103
BDMAP_00001104
BDMAP_00001105
BDMAP_00001106
BDMAP_00001107
BDMAP_00001108
BDMAP_00001109
BDMAP_00001110
BDMAP_00001111
BDMAP_00001112
BDMAP_00001113
BDMAP_00001114
BDMAP_00001115
BDMAP_00001116
BDMAP_00001117
BDMAP_00001118
BDMAP_00001119
BDMAP_00001120
BDMAP_00001121
BDMAP_00001122
BDMAP_00001123
BDMAP_00001124
BDMAP_00001125
BDMAP_00001126
BDMAP_00001127
BDMAP_00001128
BDMAP_00001129
BDMAP_00001130
BDMAP_00001131
BDMAP_00001132
BDMAP_00001133
BDMAP_00001134
BDMAP_00001135
BDMAP_00001136
BDMAP_00001137
BDMAP_00001138
BDMAP_00001139
BDMAP_00001140
BDMAP_00001141
BDMAP_00001142
BDMAP_00001143
BDMAP_00001144
BDMAP_00001145
BDMAP_00001146
BDMAP_00001147
BDMAP_00001148
BDMAP_00001149
BDMAP_00001150
BDMAP_00001151
BDMAP_00001152
BDMAP_00001153
BDMAP_00001154
BDMAP_00001155
BDMAP_00001156
BDMAP_00001157
BDMAP_00001158
BDMAP_00001159
BDMAP_00001160
BDMAP_00001161
BDMAP_00001162
BDMAP_00001163
BDMAP_00001164
BDMAP_00001165
BDMAP_00001166
BDMAP_00001167
BDMAP_00001168
BDMAP_00001169
BDMAP_00001170
BDMAP_00001171
BDMAP_00001172
BDMAP_00001173
BDMAP_00001174
BDMAP_00001175
BDMAP_00001176
BDMAP_00001177
BDMAP_00001178
BDMAP_00001179
BDMAP_00001180
BDMAP_00001181
BDMAP_00001182
BDMAP_00001183
BDMAP_00001184
BDMAP_00001185
BDMAP_00001186
BDMAP_00001187
BDMAP_00001188
BDMAP_00001189
BDMAP_00001190
BDMAP_00001191
BDMAP_00001192
BDMAP_00001193
BDMAP_00001194
BDMAP_00001195
BDMAP_00001196
BDMAP_00001197
BDMAP_00001198
BDMAP_00001199
BDMAP_00001200
BDMAP_00001201
BDMAP_00001202
BDMAP_00001203
BDMAP_00001204
BDMAP_00001205
BDMAP_00001206
BDMAP_00001207
BDMAP_00001208
BDMAP_00001209
BDMAP_00001210
BDMAP_00001211
BDMAP_00001212
BDMAP_00001213
BDMAP_00001214
BDMAP_00001215
BDMAP_00001216
BDMAP_00001217
BDMAP_00001218
BDMAP_00001219
BDMAP_00001220
BDMAP_00001221
BDMAP_00001222
BDMAP_00001223
BDMAP_00001224
BDMAP_00001225
BDMAP_00001226
BDMAP_00001227
BDMAP_00001228
BDMAP_00001229
BDMAP_00001230
BDMAP_00001231
BDMAP_00001232
BDMAP_00001233
BDMAP_00001234
BDMAP_00001235
BDMAP_00001236
BDMAP_00001237
BDMAP_00001238
BDMAP_00001239
BDMAP_00001240
BDMAP_00001241
BDMAP_00001242
BDMAP_00001243
BDMAP_00001244
BDMAP_00001245
BDMAP_00001246
BDMAP_00001247
BDMAP_00001248
BDMAP_00001249
BDMAP_00001250
BDMAP_00001251
BDMAP_00001252
BDMAP_00001253
BDMAP_00001254
BDMAP_00001255
BDMAP_00001256
BDMAP_00001257
BDMAP_00001258
BDMAP_00001259
BDMAP_00001260
BDMAP_00001261
BDMAP_00001262
BDMAP_00001263
BDMAP_00001264
BDMAP_00001265
BDMAP_00001266
BDMAP_00001267
BDMAP_00001268
BDMAP_00001269
BDMAP_00001270
BDMAP_00001271
BDMAP_00001272
BDMAP_00001273
BDMAP_00001274
BDMAP_00001275
BDMAP_00001276
BDMAP_00001277
BDMAP_00001278
BDMAP_00001279
BDMAP_00001280
BDMAP_00001281
BDMAP_00001282
BDMAP_00001283
BDMAP_00001284
BDMAP_00001285
BDMAP_00001286
BDMAP_00001287
BDMAP_00001288
BDMAP_00001289
BDMAP_00001290
BDMAP_00001291
BDMAP_00001292
BDMAP_00001293
BDMAP_00001294
BDMAP_00001295
BDMAP_00001296
BDMAP_00001297
BDMAP_00001298
BDMAP_00001299
BDMAP_00001300
BDMAP_00001301
BDMAP_00001302
BDMAP_00001303
BDMAP_00001304
BDMAP_00001305
BDMAP_00001306
BDMAP_00001307
BDMAP_00001308
BDMAP_00001309
BDMAP_00001310
BDMAP_00001311
BDMAP_00001312
BDMAP_00001313
BDMAP_00001314
BDMAP_00001315
BDMAP_00001316
BDMAP_00001317
BDMAP_00001318
BDMAP_00001319
BDMAP_00001320
BDMAP_00001321
BDMAP_00001322
BDMAP_00001323
BDMAP_00001324
BDMAP_00001325
BDMAP_00001326
BDMAP_00001327
BDMAP_00001328
BDMAP_00001329
BDMAP_00001330
BDMAP_00001331
BDMAP_00001332
BDMAP_00001333
BDMAP_00001334
BDMAP_00001335
BDMAP_00001336
BDMAP_00001337
BDMAP_00001338
BDMAP_00001339
BDMAP_00001340
BDMAP_00001341
BDMAP_00001342
BDMAP_00001343
BDMAP_00001344
BDMAP_00001345
BDMAP_00001346
BDMAP_00001347
BDMAP_00001348
BDMAP_00001349
BDMAP_00001350
BDMAP_00001351
BDMAP_00001352
BDMAP_00001353
BDMAP_00001354
BDMAP_00001355
BDMAP_00001356
BDMAP_00001357
BDMAP_00001358
BDMAP_00001359
BDMAP_00001360
BDMAP_00001361
BDMAP_00001362
BDMAP_00001363
BDMAP_00001364
BDMAP_00001365
BDMAP_00001366
BDMAP_00001367
BDMAP_00001368
BDMAP_00001369
BDMAP_00001370
BDMAP_00001371
BDMAP_00001372
BDMAP_00001373
BDMAP_00001374
BDMAP_00001375
BDMAP_00001376
BDMAP_00001377
BDMAP_00001378
BDMAP_00001379
BDMAP_00001380
BDMAP_00001381
BDMAP_00001382
BDMAP_00001383
BDMAP_00001384
BDMAP_00001385
BDMAP_00001386
BDMAP_00001387
BDMAP_00001388
BDMAP_00001389
BDMAP_00001390
BDMAP_00001391
BDMAP_00001392
BDMAP_00001393
BDMAP_00001394
BDMAP_00001395
BDMAP_00001396
BDMAP_00001397
BDMAP_00001398
BDMAP_00001399
BDMAP_00001400
BDMAP_00001401
BDMAP_00001402
BDMAP_00001403
BDMAP_00001404
BDMAP_00001405
BDMAP_00001406
BDMAP_00001407
BDMAP_00001408
BDMAP_00001409
BDMAP_00001410
BDMAP_00001411
BDMAP_00001412
BDMAP_00001413
BDMAP_00001414
BDMAP_00001415
BDMAP_00001416
BDMAP_00001417
BDMAP_00001418
BDMAP_00001419
BDMAP_00001420
BDMAP_00001421
BDMAP_00001422
BDMAP_00001423
BDMAP_00001424
BDMAP_00001425
BDMAP_00001426
BDMAP_00001427
BDMAP_00001428
BDMAP_00001429
BDMAP_00001430
BDMAP_00001431
BDMAP_00001432
BDMAP_00001433
BDMAP_00001434
BDMAP_00001435
BDMAP_00001436
BDMAP_00001437
BDMAP_00001438
BDMAP_00001439
BDMAP_00001440
BDMAP_00001441
BDMAP_00001442
BDMAP_00001443
BDMAP_00001444
BDMAP_00001445
BDMAP_00001446
BDMAP_00001447
BDMAP_00001448
BDMAP_00001449
BDMAP_00001450
BDMAP_00001451
BDMAP_00001452
BDMAP_00001453
BDMAP_00001454
BDMAP_00001455
BDMAP_00001456
BDMAP_00001457
BDMAP_00001458
BDMAP_00001459
BDMAP_00001460
BDMAP_00001461
BDMAP_00001462
BDMAP_00001463
BDMAP_00001464
BDMAP_00001465
BDMAP_00001466
BDMAP_00001467
BDMAP_00001468
BDMAP_00001469
BDMAP_00001470
BDMAP_00001471
BDMAP_00001472
BDMAP_00001473
BDMAP_00001474
BDMAP_00001475
BDMAP_00001476
BDMAP_00001477
BDMAP_00001478
BDMAP_00001479
BDMAP_00001480
BDMAP_00001481
BDMAP_00001482
BDMAP_00001483
BDMAP_00001484
BDMAP_00001485
BDMAP_00001486
BDMAP_00001487
BDMAP_00001488
BDMAP_00001489
BDMAP_00001490
BDMAP_00001491
BDMAP_00001492
BDMAP_00001493
BDMAP_00001494
BDMAP_00001495
BDMAP_00001496
BDMAP_00001497
BDMAP_00001498
BDMAP_00001499
BDMAP_00001500
BDMAP_00001501
BDMAP_00001502
BDMAP_00001503
BDMAP_00001504
BDMAP_00001505
BDMAP_00001506
BDMAP_00001507
BDMAP_00001508
BDMAP_00001509
BDMAP_00001510
BDMAP_00001511
BDMAP_00001512
BDMAP_00001513
BDMAP_00001514
BDMAP_00001515
BDMAP_00001516
BDMAP_00001517
BDMAP_00001518
BDMAP_00001519
BDMAP_00001520
BDMAP_00001521
BDMAP_00001522
BDMAP_00001523
BDMAP_00001524
BDMAP_00001525
BDMAP_00001526
BDMAP_00001527
BDMAP_00001528
BDMAP_00001529
BDMAP_00001530
BDMAP_00001531
BDMAP_00001532
BDMAP_00001533
BDMAP_00001534
BDMAP_00001535
BDMAP_00001536
BDMAP_00001537
BDMAP_00001538
BDMAP_00001539
BDMAP_00001540
BDMAP_00001541
BDMAP_00001542
BDMAP_00001543
BDMAP_00001544
BDMAP_00001545
BDMAP_00001546
BDMAP_00001547
BDMAP_00001548
BDMAP_00001549
BDMAP_00001550
BDMAP_00001551
BDMAP_00001552
BDMAP_00001553
BDMAP_00001554
BDMAP_00001555
BDMAP_00001556
BDMAP_00001557
BDMAP_00001558
BDMAP_00001559
BDMAP_00001560
BDMAP_00001561
BDMAP_00001562
BDMAP_00001563
BDMAP_00001564
BDMAP_00001565
BDMAP_00001566
BDMAP_00001567
BDMAP_00001568
BDMAP_00001569
BDMAP_00001570
BDMAP_00001571
BDMAP_00001572
BDMAP_00001573
BDMAP_00001574
BDMAP_00001575
BDMAP_00001576
BDMAP_00001577
BDMAP_00001578
BDMAP_00001579
BDMAP_00001580
BDMAP_00001581
BDMAP_00001582
BDMAP_00001583
BDMAP_00001584
BDMAP_00001585
BDMAP_00001586
BDMAP_00001587
BDMAP_00001588
BDMAP_00001589
BDMAP_00001590
BDMAP_00001591
BDMAP_00001592
BDMAP_00001593
BDMAP_00001594
BDMAP_00001595
BDMAP_00001596
BDMAP_00001597
BDMAP_00001598
BDMAP_00001599
BDMAP_00001600
BDMAP_00001601
BDMAP_00001602
BDMAP_00001603
BDMAP_00001604
BDMAP_00001605
BDMAP_00001606
BDMAP_00001607
BDMAP_00001608
BDMAP_00001609
BDMAP_00001610
BDMAP_00001611
BDMAP_00001612
BDMAP_00001613
BDMAP_00001614
BDMAP_00001615
BDMAP_00001616
BDMAP_00001617
BDMAP_00001618
BDMAP_00001619
BDMAP_00001620
BDMAP_00001621
BDMAP_00001622
BDMAP_00001623
BDMAP_00001624
BDMAP_00001625
BDMAP_00001626
BDMAP_00001627
BDMAP_00001628
BDMAP_00001629
BDMAP_00001630
BDMAP_00001631
BDMAP_00001632
BDMAP_00001633
BDMAP_00001634
BDMAP_00001635
BDMAP_00001636
BDMAP_00001637
BDMAP_00001638
BDMAP_00001639
BDMAP_00001640
BDMAP_00001641
BDMAP_00001642
BDMAP_00001643
BDMAP_00001644
BDMAP_00001645
BDMAP_00001646
BDMAP_00001647
BDMAP_00001648
BDMAP_00001649
BDMAP_00001650
BDMAP_00001651
BDMAP_00001652
BDMAP_00001653
BDMAP_00001654
BDMAP_00001655
BDMAP_00001656
BDMAP_00001657
BDMAP_00001658
BDMAP_00001659
BDMAP_00001660
BDMAP_00001661
BDMAP_00001662
BDMAP_00001663
BDMAP_00001664
BDMAP_00001665
BDMAP_00001666
BDMAP_00001667
BDMAP_00001668
BDMAP_00001669
BDMAP_00001670
BDMAP_00001671
BDMAP_00001672
BDMAP_00001673
BDMAP_00001674
BDMAP_00001675
BDMAP_00001676
BDMAP_00001677
BDMAP_00001678
BDMAP_00001679
BDMAP_00001680
BDMAP_00001681
BDMAP_00001682
BDMAP_00001683
BDMAP_00001684
BDMAP_00001685
BDMAP_00001686
BDMAP_00001687
BDMAP_00001688
BDMAP_00001689
BDMAP_00001690
BDMAP_00001691
BDMAP_00001692
BDMAP_00001693
BDMAP_00001694
BDMAP_00001695
BDMAP_00001696
BDMAP_00001697
BDMAP_00001698
BDMAP_00001699
BDMAP_00001700
BDMAP_00001701
BDMAP_00001702
BDMAP_00001703
BDMAP_00001704
BDMAP_00001705
BDMAP_00001706
BDMAP_00001707
BDMAP_00001708
BDMAP_00001709
BDMAP_00001710
BDMAP_00001711
BDMAP_00001712
BDMAP_00001713
BDMAP_00001714
BDMAP_00001715
BDMAP_00001716
BDMAP_00001717
BDMAP_00001718
BDMAP_00001719
BDMAP_00001720
BDMAP_00001721
BDMAP_00001722
BDMAP_00001723
BDMAP_00001724
BDMAP_00001725
BDMAP_00001726
BDMAP_00001727
BDMAP_00001728
BDMAP_00001729
BDMAP_00001730
BDMAP_00001731
BDMAP_00001732
BDMAP_00001733
BDMAP_00001734
BDMAP_00001735
BDMAP_00001736
BDMAP_00001737
BDMAP_00001738
BDMAP_00001739
BDMAP_00001740
BDMAP_00001741
BDMAP_00001742
BDMAP_00001743
BDMAP_00001744
BDMAP_00001745
BDMAP_00001746
BDMAP_00001747
BDMAP_00001748
BDMAP_00001749
BDMAP_00001750
BDMAP_00001751
BDMAP_00001752
BDMAP_00001753
BDMAP_00001754
BDMAP_00001755
BDMAP_00001756
BDMAP_00001757
BDMAP_00001758
BDMAP_00001759
BDMAP_00001760
BDMAP_00001761
BDMAP_00001762
BDMAP_00001763
BDMAP_00001764
BDMAP_00001765
BDMAP_00001766
BDMAP_00001767
BDMAP_00001768
BDMAP_00001769
BDMAP_00001770
BDMAP_00001771
BDMAP_00001772
BDMAP_00001773
BDMAP_00001774
BDMAP_00001775
BDMAP_00001776
BDMAP_00001777
BDMAP_00001778
BDMAP_00001779
BDMAP_00001780
BDMAP_00001781
BDMAP_00001782
BDMAP_00001783
BDMAP_00001784
BDMAP_00001785
BDMAP_00001786
BDMAP_00001787
BDMAP_00001788
BDMAP_00001789
BDMAP_00001790
BDMAP_00001791
BDMAP_00001792
BDMAP_00001793
BDMAP_00001794
BDMAP_00001795
BDMAP_00001796
BDMAP_00001797
BDMAP_00001798
BDMAP_00001799
BDMAP_00001800
BDMAP_00001801
BDMAP_00001802
BDMAP_00001803
BDMAP_00001804
BDMAP_00001805
BDMAP_00001806
BDMAP_00001807
BDMAP_00001808
BDMAP_00001809
BDMAP_00001810
BDMAP_00001811
BDMAP_00001812
BDMAP_00001813
BDMAP_00001814
BDMAP_00001815
BDMAP_00001816
BDMAP_00001817
BDMAP_00001818
BDMAP_00001819
BDMAP_00001820
BDMAP_00001821
BDMAP_00001822
BDMAP_00001823
BDMAP_00001824
BDMAP_00001825
BDMAP_00001826
BDMAP_00001827
BDMAP_00001828
BDMAP_00001829
BDMAP_00001830
BDMAP_00001831
BDMAP_00001832
BDMAP_00001833
BDMAP_00001834
BDMAP_00001835
BDMAP_00001836
BDMAP_00001837
BDMAP_00001838
BDMAP_00001839
BDMAP_00001840
BDMAP_00001841
BDMAP_00001842
BDMAP_00001843
BDMAP_00001844
BDMAP_00001845
BDMAP_00001846
BDMAP_00001847
BDMAP_00001848
BDMAP_00001849
BDMAP_00001850
BDMAP_00001851
BDMAP_00001852
BDMAP_00001853
BDMAP_00001854
BDMAP_00001855
BDMAP_00001856
BDMAP_00001857
BDMAP_00001858
BDMAP_00001859
BDMAP_00001860
BDMAP_00001861
BDMAP_00001862
BDMAP_00001863
BDMAP_00001864
BDMAP_00001865
BDMAP_00001866
BDMAP_00001867
BDMAP_00001868
BDMAP_00001869
BDMAP_00001870
BDMAP_00001871
BDMAP_00001872
BDMAP_00001873
BDMAP_00001874
BDMAP_00001875
BDMAP_00001876
BDMAP_00001877
BDMAP_00001878
BDMAP_00001879
BDMAP_00001880
BDMAP_00001881
BDMAP_00001882
BDMAP_00001883
BDMAP_00001884
BDMAP_00001885
BDMAP_00001886
BDMAP_00001887
BDMAP_00001888
BDMAP_00001889
BDMAP_00001890
BDMAP_00001891
BDMAP_00001892
BDMAP_00001893
BDMAP_00001894
BDMAP_00001895
BDMAP_00001896
BDMAP_00001897
BDMAP_00001898
BDMAP_00001899
BDMAP_00001900
BDMAP_00001901
BDMAP_00001902
BDMAP_00001903
BDMAP_00001904
BDMAP_00001905
BDMAP_00001906
BDMAP_00001907
BDMAP_00001908
BDMAP_00001909
BDMAP_00001910
BDMAP_00001911
BDMAP_00001912
BDMAP_00001913
BDMAP_00001914
BDMAP_00001915
BDMAP_00001916
BDMAP_00001917
BDMAP_00001918
BDMAP_00001919
BDMAP_00001920
BDMAP_00001921
BDMAP_00001922
BDMAP_00001923
BDMAP_00001924
BDMAP_00001925
BDMAP_00001926
BDMAP_00001927
BDMAP_00001928
BDMAP_00001929
BDMAP_00001930
BDMAP_00001931
BDMAP_00001932
BDMAP_00001933
BDMAP_00001934
BDMAP_00001935
BDMAP_00001936
BDMAP_00001937
BDMAP_00001938
BDMAP_00001939
BDMAP_00001940
BDMAP_00001941
BDMAP_00001942
BDMAP_00001943
BDMAP_00001944
BDMAP_00001945
BDMAP_00001946
BDMAP_00001947
BDMAP_00001948
BDMAP_00001949
BDMAP_00001950
BDMAP_00001951
BDMAP_00001952
BDMAP_00001953
BDMAP_00001954
BDMAP_00001955
BDMAP_00001956
BDMAP_00001957
BDMAP_00001958
BDMAP_00001959
BDMAP_00001960
BDMAP_00001961
BDMAP_00001962
BDMAP_00001963
BDMAP_00001964
BDMAP_00001965
BDMAP_00001966
BDMAP_00001967
BDMAP_00001968
BDMAP_00001969
BDMAP_00001970
BDMAP_00001971
BDMAP_00001972
BDMAP_00001973
BDMAP_00001974
BDMAP_00001975
BDMAP_00001976
BDMAP_00001977
BDMAP_00001978
BDMAP_00001979
BDMAP_00001980
BDMAP_00001981
BDMAP_00001982
BDMAP_00001983
BDMAP_00001984
BDMAP_00001985
BDMAP_00001986
BDMAP_00001987
BDMAP_00001988
BDMAP_00001989
BDMAP_00001990
BDMAP_00001991
BDMAP_00001992
BDMAP_00001993
BDMAP_00001994
BDMAP_00001995
BDMAP_00001996
BDMAP_00001997
BDMAP_00001998
BDMAP_00001999
BDMAP_00002000
BDMAP_00002001
BDMAP_00002002
BDMAP_00002003
BDMAP_00002004
BDMAP_00002005
BDMAP_00002006
BDMAP_00002007
BDMAP_00002008
BDMAP_00002009
BDMAP_00002010
BDMAP_00002011
BDMAP_00002012
BDMAP_00002013
BDMAP_00002014
BDMAP_00002015
BDMAP_00002016
BDMAP_00002017
BDMAP_00002018
BDMAP_00002019
BDMAP_00002020
BDMAP_00002021
BDMAP_00002022
BDMAP_00002023
BDMAP_00002024
BDMAP_00002025
BDMAP_00002026
BDMAP_00002027
BDMAP_00002028
BDMAP_00002029
BDMAP_00002030
BDMAP_00002031
BDMAP_00002032
BDMAP_00002033
BDMAP_00002034
BDMAP_00002035
BDMAP_00002036
BDMAP_00002037
BDMAP_00002038
BDMAP_00002039
BDMAP_00002040
BDMAP_00002041
BDMAP_00002042
BDMAP_00002043
BDMAP_00002044
BDMAP_00002045
BDMAP_00002046
BDMAP_00002047
BDMAP_00002048
BDMAP_00002049
BDMAP_00002050
BDMAP_00002051
BDMAP_00002052
BDMAP_00002053
BDMAP_00002054
BDMAP_00002055
BDMAP_00002056
BDMAP_00002057
BDMAP_00002058
BDMAP_00002059
BDMAP_00002060
BDMAP_00002061
BDMAP_00002062
BDMAP_00002063
BDMAP_00002064
BDMAP_00002065
BDMAP_00002066
BDMAP_00002067
BDMAP_00002068
BDMAP_00002069
BDMAP_00002070
BDMAP_00002071
BDMAP_00002072
BDMAP_00002073
BDMAP_00002074
BDMAP_00002075
BDMAP_00002076
BDMAP_00002077
BDMAP_00002078
BDMAP_00002079
BDMAP_00002080
BDMAP_00002081
BDMAP_00002082
BDMAP_00002083
BDMAP_00002084
BDMAP_00002085
BDMAP_00002086
BDMAP_00002087
BDMAP_00002088
BDMAP_00002089
BDMAP_00002090
BDMAP_00002091
BDMAP_00002092
BDMAP_00002093
BDMAP_00002094
BDMAP_00002095
BDMAP_00002096
BDMAP_00002097
BDMAP_00002098
BDMAP_00002099
BDMAP_00002100
BDMAP_00002101
BDMAP_00002102
BDMAP_00002103
BDMAP_00002104
BDMAP_00002105
BDMAP_00002106
BDMAP_00002107
BDMAP_00002108
BDMAP_00002109
BDMAP_00002110
BDMAP_00002111
BDMAP_00002112
BDMAP_00002113
BDMAP_00002114
BDMAP_00002115
BDMAP_00002116
BDMAP_00002117
BDMAP_00002118
BDMAP_00002119
BDMAP_00002120
BDMAP_00002121
BDMAP_00002122
BDMAP_00002123
BDMAP_00002124
BDMAP_00002125
BDMAP_00002126
BDMAP_00002127
BDMAP_00002128
BDMAP_00002129
BDMAP_00002130
BDMAP_00002131
BDMAP_00002132
BDMAP_00002133
BDMAP_00002134
BDMAP_00002135
BDMAP_00002136
BDMAP_00002137
BDMAP_00002138
BDMAP_00002139
BDMAP_00002140
BDMAP_00002141
BDMAP_00002142
BDMAP_00002143
BDMAP_00002144
BDMAP_00002145
BDMAP_00002146
BDMAP_00002147
BDMAP_00002148
BDMAP_00002149
BDMAP_00002150
BDMAP_00002151
BDMAP_00002152
BDMAP_00002153
BDMAP_00002154
BDMAP_00002155
BDMAP_00002156
BDMAP_00002157
BDMAP_00002158
BDMAP_00002159
BDMAP_00002160
BDMAP_00002161
BDMAP_00002162
BDMAP_00002163
BDMAP_00002164
BDMAP_00002165
BDMAP_00002166
BDMAP_00002167
BDMAP_00002168
BDMAP_00002169
BDMAP_00002170
BDMAP_00002171
BDMAP_00002172
BDMAP_00002173
BDMAP_00002174
BDMAP_00002175
BDMAP_00002176
BDMAP_00002177
BDMAP_00002178
BDMAP_00002179
BDMAP_00002180
BDMAP_00002181
BDMAP_00002182
BDMAP_00002183
BDMAP_00002184
BDMAP_00002185
BDMAP_00002186
BDMAP_00002187
BDMAP_00002188
BDMAP_00002189
BDMAP_00002190
BDMAP_00002191
BDMAP_00002192
BDMAP_00002193
BDMAP_00002194
BDMAP_00002195
BDMAP_00002196
BDMAP_00002197
BDMAP_00002198
BDMAP_00002199
BDMAP_00002200
BDMAP_00002201
BDMAP_00002202
BDMAP_00002203
BDMAP_00002204
BDMAP_00002205
BDMAP_00002206
BDMAP_00002207
BDMAP_00002208
BDMAP_00002209
BDMAP_00002210
BDMAP_00002211
BDMAP_00002212
BDMAP_00002213
BDMAP_00002214
BDMAP_00002215
BDMAP_00002216
BDMAP_00002217
BDMAP_00002218
BDMAP_00002219
BDMAP_00002220
BDMAP_00002221
BDMAP_00002222
BDMAP_00002223
BDMAP_00002224
BDMAP_00002225
BDMAP_00002226
BDMAP_00002227
BDMAP_00002228
BDMAP_00002229
BDMAP_00002230
BDMAP_00002231
BDMAP_00002232
BDMAP_00002233
BDMAP_00002234
BDMAP_00002235
BDMAP_00002236
BDMAP_00002237
BDMAP_00002238
BDMAP_00002239
BDMAP_00002240
BDMAP_00002241
BDMAP_00002242
BDMAP_00002243
BDMAP_00002244
BDMAP_00002245
BDMAP_00002246
BDMAP_00002247
BDMAP_00002248
BDMAP_00002249
BDMAP_00002250
BDMAP_00002251
BDMAP_00002252
BDMAP_00002253
BDMAP_00002254
BDMAP_00002255
BDMAP_00002256
BDMAP_00002257
BDMAP_00002258
BDMAP_00002259
BDMAP_00002260
BDMAP_00002261
BDMAP_00002262
BDMAP_00002263
BDMAP_00002264
BDMAP_00002265
BDMAP_00002266
BDMAP_00002267
BDMAP_00002268
BDMAP_00002269
BDMAP_00002270
BDMAP_00002271
BDMAP_00002272
BDMAP_00002273
BDMAP_00002274
BDMAP_00002275
BDMAP_00002276
BDMAP_00002277
BDMAP_00002278
BDMAP_00002279
BDMAP_00002280
BDMAP_00002281
BDMAP_00002282
BDMAP_00002283
BDMAP_00002284
BDMAP_00002285
BDMAP_00002286
BDMAP_00002287
BDMAP_00002288
BDMAP_00002289
BDMAP_00002290
BDMAP_00002291
BDMAP_00002292
BDMAP_00002293
BDMAP_00002294
BDMAP_00002295
BDMAP_00002296
BDMAP_00002297
BDMAP_00002298
BDMAP_00002299
BDMAP_00002300
BDMAP_00002301
BDMAP_00002302
BDMAP_00002303
BDMAP_00002304
BDMAP_00002305
BDMAP_00002306
BDMAP_00002307
BDMAP_00002308
BDMAP_00002309
BDMAP_00002310
BDMAP_00002311
BDMAP_00002312
BDMAP_00002313
BDMAP_00002314
BDMAP_00002315
BDMAP_00002316
BDMAP_00002317
BDMAP_00002318
BDMAP_00002319
BDMAP_00002320
BDMAP_00002321
BDMAP_00002322
BDMAP_00002323
BDMAP_00002324
BDMAP_00002325
BDMAP_00002326
BDMAP_00002327
BDMAP_00002328
BDMAP_00002329
BDMAP_00002330
BDMAP_00002331
BDMAP_00002332
BDMAP_00002333
BDMAP_00002334
BDMAP_00002335
BDMAP_00002336
BDMAP_00002337
BDMAP_00002338
BDMAP_00002339
BDMAP_00002340
BDMAP_00002341
BDMAP_00002342
BDMAP_00002343
BDMAP_00002344
BDMAP_00002345
BDMAP_00002346
BDMAP_00002347
BDMAP_00002348
BDMAP_00002349
BDMAP_00002350
BDMAP_00002351
BDMAP_00002352
BDMAP_00002353
BDMAP_00002354
BDMAP_00002355
BDMAP_00002356
BDMAP_00002357
BDMAP_00002358
BDMAP_00002359
BDMAP_00002360
BDMAP_00002361
BDMAP_00002362
BDMAP_00002363
BDMAP_00002364
BDMAP_00002365
BDMAP_00002366
BDMAP_00002367
BDMAP_00002368
BDMAP_00002369
BDMAP_00002370
BDMAP_00002371
BDMAP_00002372
BDMAP_00002373
BDMAP_00002374
BDMAP_00002375
BDMAP_00002376
BDMAP_00002377
BDMAP_00002378
BDMAP_00002379
BDMAP_00002380
BDMAP_00002381
BDMAP_00002382
BDMAP_00002383
BDMAP_00002384
BDMAP_00002385
BDMAP_00002386
BDMAP_00002387
BDMAP_00002388
BDMAP_00002389
BDMAP_00002390
BDMAP_00002391
BDMAP_00002392
BDMAP_00002393
BDMAP_00002394
BDMAP_00002395
BDMAP_00002396
BDMAP_00002397
BDMAP_00002398
BDMAP_00002399
BDMAP_00002400
BDMAP_00002401
BDMAP_00002402
BDMAP_00002403
BDMAP_00002404
BDMAP_00002405
BDMAP_00002406
BDMAP_00002407
BDMAP_00002408
BDMAP_00002409
BDMAP_00002410
BDMAP_00002411
BDMAP_00002412
BDMAP_00002413
BDMAP_00002414
BDMAP_00002415
BDMAP_00002416
BDMAP_00002417
BDMAP_00002418
BDMAP_00002419
BDMAP_00002420
BDMAP_00002421
BDMAP_00002422
BDMAP_00002423
BDMAP_00002424
BDMAP_00002425
BDMAP_00002426
BDMAP_00002427
BDMAP_00002428
BDMAP_00002429
BDMAP_00002430
BDMAP_00002431
BDMAP_00002432
BDMAP_00002433
BDMAP_00002434
BDMAP_00002435
BDMAP_00002436
BDMAP_00002437
BDMAP_00002438
BDMAP_00002439
BDMAP_00002440
BDMAP_00002441
BDMAP_00002442
BDMAP_00002443
BDMAP_00002444
BDMAP_00002445
BDMAP_00002446
BDMAP_00002447
BDMAP_00002448
BDMAP_00002449
BDMAP_00002450
BDMAP_00002451
BDMAP_00002452
BDMAP_00002453
BDMAP_00002454
BDMAP_00002455
BDMAP_00002456
BDMAP_00002457
BDMAP_00002458
BDMAP_00002459
BDMAP_00002460
BDMAP_00002461
BDMAP_00002462
BDMAP_00002463
BDMAP_00002464
BDMAP_00002465
BDMAP_00002466
BDMAP_00002467
BDMAP_00002468
BDMAP_00002469
BDMAP_00002470
BDMAP_00002471
BDMAP_00002472
BDMAP_00002473
BDMAP_00002474
BDMAP_00002475
BDMAP_00002476
BDMAP_00002477
BDMAP_00002478
BDMAP_00002479
BDMAP_00002480
BDMAP_00002481
BDMAP_00002482
BDMAP_00002483
BDMAP_00002484
BDMAP_00002485
BDMAP_00002486
BDMAP_00002487
BDMAP_00002488
BDMAP_00002489
BDMAP_00002490
BDMAP_00002491
BDMAP_00002492
BDMAP_00002493
BDMAP_00002494
BDMAP_00002495
BDMAP_00002496
BDMAP_00002497
BDMAP_00002498
BDMAP_00002499
BDMAP_00002500
BDMAP_00002501
BDMAP_00002502
BDMAP_00002503
BDMAP_00002504
BDMAP_00002505
BDMAP_00002506
BDMAP_00002507
BDMAP_00002508
BDMAP_00002509
BDMAP_00002510
BDMAP_00002511
BDMAP_00002512
BDMAP_00002513
BDMAP_00002514
BDMAP_00002515
BDMAP_00002516
BDMAP_00002517
BDMAP_00002518
BDMAP_00002519
BDMAP_00002520
BDMAP_00002521
BDMAP_00002522
BDMAP_00002523
BDMAP_00002524
BDMAP_00002525
BDMAP_00002526
BDMAP_00002527
BDMAP_00002528
BDMAP_00002529
BDMAP_00002530
BDMAP_00002531
BDMAP_00002532
BDMAP_00002533
BDMAP_00002534
BDMAP_00002535
BDMAP_00002536
BDMAP_00002537
BDMAP_00002538
BDMAP_00002539
BDMAP_00002540
BDMAP_00002541
BDMAP_00002542
BDMAP_00002543
BDMAP_00002544
BDMAP_00002545
BDMAP_00002546
BDMAP_00002547
BDMAP_00002548
BDMAP_00002549
BDMAP_00002550
BDMAP_00002551
BDMAP_00002552
BDMAP_00002553
BDMAP_00002554
BDMAP_00002555
BDMAP_00002556
BDMAP_00002557
BDMAP_00002558
BDMAP_00002559
BDMAP_00002560
BDMAP_00002561
BDMAP_00002562
BDMAP_00002563
BDMAP_00002564
BDMAP_00002565
BDMAP_00002566
BDMAP_00002567
BDMAP_00002568
BDMAP_00002569
BDMAP_00002570
BDMAP_00002571
BDMAP_00002572
BDMAP_00002573
BDMAP_00002574
BDMAP_00002575
BDMAP_00002576
BDMAP_00002577
BDMAP_00002578
BDMAP_00002579
BDMAP_00002580
BDMAP_00002581
BDMAP_00002582
BDMAP_00002583
BDMAP_00002584
BDMAP_00002585
BDMAP_00002586
BDMAP_00002587
BDMAP_00002588
BDMAP_00002589
BDMAP_00002590
BDMAP_00002591
BDMAP_00002592
BDMAP_00002593
BDMAP_00002594
BDMAP_00002595
BDMAP_00002596
BDMAP_00002597
BDMAP_00002598
BDMAP_00002599
BDMAP_00002600
BDMAP_00002601
BDMAP_00002602
BDMAP_00002603
BDMAP_00002604
BDMAP_00002605
BDMAP_00002606
BDMAP_00002607
BDMAP_00002608
BDMAP_00002609
BDMAP_00002610
BDMAP_00002611
BDMAP_00002612
BDMAP_00002613
BDMAP_00002614
BDMAP_00002615
BDMAP_00002616
BDMAP_00002617
BDMAP_00002618
BDMAP_00002619
BDMAP_00002620
BDMAP_00002621
BDMAP_00002622
BDMAP_00002623
BDMAP_00002624
BDMAP_00002625
BDMAP_00002626
BDMAP_00002627
BDMAP_00002628
BDMAP_00002629
BDMAP_00002630
BDMAP_00002631
BDMAP_00002632
BDMAP_00002633
BDMAP_00002634
BDMAP_00002635
BDMAP_00002636
BDMAP_00002637
BDMAP_00002638
BDMAP_00002639
BDMAP_00002640
BDMAP_00002641
BDMAP_00002642
BDMAP_00002643
BDMAP_00002644
BDMAP_00002645
BDMAP_00002646
BDMAP_00002647
BDMAP_00002648
BDMAP_00002649
BDMAP_00002650
BDMAP_00002651
BDMAP_00002652
BDMAP_00002653
BDMAP_00002654
BDMAP_00002655
BDMAP_00002656
BDMAP_00002657
BDMAP_00002658
BDMAP_00002659
BDMAP_00002660
BDMAP_00002661
BDMAP_00002662
BDMAP_00002663
BDMAP_00002664
BDMAP_00002665
BDMAP_00002666
BDMAP_00002667
BDMAP_00002668
BDMAP_00002669
BDMAP_00002670
BDMAP_00002671
BDMAP_00002672
BDMAP_00002673
BDMAP_00002674
BDMAP_00002675
BDMAP_00002676
BDMAP_00002677
BDMAP_00002678
BDMAP_00002679
BDMAP_00002680
BDMAP_00002681
BDMAP_00002682
BDMAP_00002683
BDMAP_00002684
BDMAP_00002685
BDMAP_00002686
BDMAP_00002687
BDMAP_00002688
BDMAP_00002689
BDMAP_00002690
BDMAP_00002691
BDMAP_00002692
BDMAP_00002693
BDMAP_00002694
BDMAP_00002695
BDMAP_00002696
BDMAP_00002697
BDMAP_00002698
BDMAP_00002699
BDMAP_00002700
BDMAP_00002701
BDMAP_00002702
BDMAP_00002703
BDMAP_00002704
BDMAP_00002705
BDMAP_00002706
BDMAP_00002707
BDMAP_00002708
BDMAP_00002709
BDMAP_00002710
BDMAP_00002711
BDMAP_00002712
BDMAP_00002713
BDMAP_00002714
BDMAP_00002715
BDMAP_00002716
BDMAP_00002717
BDMAP_00002718
BDMAP_00002719
BDMAP_00002720
BDMAP_00002721
BDMAP_00002722
BDMAP_00002723
BDMAP_00002724
BDMAP_00002725
BDMAP_00002726
BDMAP_00002727
BDMAP_00002728
BDMAP_00002729
BDMAP_00002730
BDMAP_00002731
BDMAP_00002732
BDMAP_00002733
BDMAP_00002734
BDMAP_00002735
BDMAP_00002736
BDMAP_00002737
BDMAP_00002738
BDMAP_00002739
BDMAP_00002740
BDMAP_00002741
BDMAP_00002742
BDMAP_00002743
BDMAP_00002744
BDMAP_00002745
BDMAP_00002746
BDMAP_00002747
BDMAP_00002748
BDMAP_00002749
BDMAP_00002750
BDMAP_00002751
BDMAP_00002752
BDMAP_00002753
BDMAP_00002754
BDMAP_00002755
BDMAP_00002756
BDMAP_00002757
BDMAP_00002758
BDMAP_00002759
BDMAP_00002760
BDMAP_00002761
BDMAP_00002762
BDMAP_00002763
BDMAP_00002764
BDMAP_00002765
BDMAP_00002766
BDMAP_00002767
BDMAP_00002768
BDMAP_00002769
BDMAP_00002770
BDMAP_00002771
BDMAP_00002772
BDMAP_00002773
BDMAP_00002774
BDMAP_00002775
BDMAP_00002776
BDMAP_00002777
BDMAP_00002778
BDMAP_00002779
BDMAP_00002780
BDMAP_00002781
BDMAP_00002782
BDMAP_00002783
BDMAP_00002784
BDMAP_00002785
BDMAP_00002786
BDMAP_00002787
BDMAP_00002788
BDMAP_00002789
BDMAP_00002790
BDMAP_00002791
BDMAP_00002792
BDMAP_00002793
BDMAP_00002794
BDMAP_00002795
BDMAP_00002796
BDMAP_00002797
BDMAP_00002798
BDMAP_00002799
BDMAP_00002800
BDMAP_00002801
BDMAP_00002802
BDMAP_00002803
BDMAP_00002804
BDMAP_00002805
BDMAP_00002806
BDMAP_00002807
BDMAP_00002808
BDMAP_00002809
BDMAP_00002810
BDMAP_00002811
BDMAP_00002812
BDMAP_00002813
BDMAP_00002814
BDMAP_00002815
BDMAP_00002816
BDMAP_00002817
BDMAP_00002818
BDMAP_00002819
BDMAP_00002820
BDMAP_00002821
BDMAP_00002822
BDMAP_00002823
BDMAP_00002824
BDMAP_00002825
BDMAP_00002826
BDMAP_00002827
BDMAP_00002828
BDMAP_00002829
BDMAP_00002830
BDMAP_00002831
BDMAP_00002832
BDMAP_00002833
BDMAP_00002834
BDMAP_00002835
BDMAP_00002836
BDMAP_00002837
BDMAP_00002838
BDMAP_00002839
BDMAP_00002840
BDMAP_00002841
BDMAP_00002842
BDMAP_00002843
BDMAP_00002844
BDMAP_00002845
BDMAP_00002846
BDMAP_00002847
BDMAP_00002848
BDMAP_00002849
BDMAP_00002850
BDMAP_00002851
BDMAP_00002852
BDMAP_00002853
BDMAP_00002854
BDMAP_00002855
BDMAP_00002856
BDMAP_00002857
BDMAP_00002858
BDMAP_00002859
BDMAP_00002860
BDMAP_00002861
BDMAP_00002862
BDMAP_00002863
BDMAP_00002864
BDMAP_00002865
BDMAP_00002866
BDMAP_00002867
BDMAP_00002868
BDMAP_00002869
BDMAP_00002870
BDMAP_00002871
BDMAP_00002872
BDMAP_00002873
BDMAP_00002874
BDMAP_00002875
BDMAP_00002876
BDMAP_00002877
BDMAP_00002878
BDMAP_00002879
BDMAP_00002880
BDMAP_00002881
BDMAP_00002882
BDMAP_00002883
BDMAP_00002884
BDMAP_00002885
BDMAP_00002886
BDMAP_00002887
BDMAP_00002888
BDMAP_00002889
BDMAP_00002890
BDMAP_00002891
BDMAP_00002892
BDMAP_00002893
BDMAP_00002894
BDMAP_00002895
BDMAP_00002896
BDMAP_00002897
BDMAP_00002898
BDMAP_00002899
BDMAP_00002900
BDMAP_00002901
BDMAP_00002902
BDMAP_00002903
BDMAP_00002904
BDMAP_00002905
BDMAP_00002906
BDMAP_00002907
BDMAP_00002908
BDMAP_00002909
BDMAP_00002910
BDMAP_00002911
BDMAP_00002912
BDMAP_00002913
BDMAP_00002914
BDMAP_00002915
BDMAP_00002916
BDMAP_00002917
BDMAP_00002918
BDMAP_00002919
BDMAP_00002920
BDMAP_00002921
BDMAP_00002922
BDMAP_00002923
BDMAP_00002924
BDMAP_00002925
BDMAP_00002926
BDMAP_00002927
BDMAP_00002928
BDMAP_00002929
BDMAP_00002930
BDMAP_00002931
BDMAP_00002932
BDMAP_00002933
BDMAP_00002934
BDMAP_00002935
BDMAP_00002936
BDMAP_00002937
BDMAP_00002938
BDMAP_00002939
BDMAP_00002940
BDMAP_00002941
BDMAP_00002942
BDMAP_00002943
BDMAP_00002944
BDMAP_00002945
BDMAP_00002946
BDMAP_00002947
BDMAP_00002948
BDMAP_00002949
BDMAP_00002950
BDMAP_00002951
BDMAP_00002952
BDMAP_00002953
BDMAP_00002954
BDMAP_00002955
BDMAP_00002956
BDMAP_00002957
BDMAP_00002958
BDMAP_00002959
BDMAP_00002960
BDMAP_00002961
BDMAP_00002962
BDMAP_00002963
BDMAP_00002964
BDMAP_00002965
BDMAP_00002966
BDMAP_00002967
BDMAP_00002968
BDMAP_00002969
BDMAP_00002970
BDMAP_00002971
BDMAP_00002972
BDMAP_00002973
BDMAP_00002974
BDMAP_00002975
BDMAP_00002976
BDMAP_00002977
BDMAP_00002978
BDMAP_00002979
BDMAP_00002980
BDMAP_00002981
BDMAP_00002982
BDMAP_00002983
BDMAP_00002984
BDMAP_00002985
BDMAP_00002986
BDMAP_00002987
BDMAP_00002988
BDMAP_00002989
BDMAP_00002990
BDMAP_00002991
BDMAP_00002992
BDMAP_00002993
BDMAP_00002994
BDMAP_00002995
BDMAP_00002996
BDMAP_00002997
BDMAP_00002998
BDMAP_00002999
BDMAP_00003000
BDMAP_00003001
BDMAP_00003002
BDMAP_00003003
BDMAP_00003004
BDMAP_00003005
BDMAP_00003006
BDMAP_00003007
BDMAP_00003008
BDMAP_00003009
BDMAP_00003010
BDMAP_00003011
BDMAP_00003012
BDMAP_00003013
BDMAP_00003014
BDMAP_00003015
BDMAP_00003016
BDMAP_00003017
BDMAP_00003018
BDMAP_00003019
BDMAP_00003020
BDMAP_00003021
BDMAP_00003022
BDMAP_00003023
BDMAP_00003024
BDMAP_00003025
BDMAP_00003026
BDMAP_00003027
BDMAP_00003028
BDMAP_00003029
BDMAP_00003030
BDMAP_00003031
BDMAP_00003032
BDMAP_00003033
BDMAP_00003034
BDMAP_00003035
BDMAP_00003036
BDMAP_00003037
BDMAP_00003038
BDMAP_00003039
BDMAP_00003040
BDMAP_00003041
BDMAP_00003042
BDMAP_00003043
BDMAP_00003044
BDMAP_00003045
BDMAP_00003046
BDMAP_00003047
BDMAP_00003048
BDMAP_00003049
BDMAP_00003050
BDMAP_00003051
BDMAP_00003052
BDMAP_00003053
BDMAP_00003054
BDMAP_00003055
BDMAP_00003056
BDMAP_00003057
BDMAP_00003058
BDMAP_00003059
BDMAP_00003060
BDMAP_00003061
BDMAP_00003062
BDMAP_00003063
BDMAP_00003064
BDMAP_00003065
BDMAP_00003066
BDMAP_00003067
BDMAP_00003068
BDMAP_00003069
BDMAP_00003070
BDMAP_00003071
BDMAP_00003072
BDMAP_00003073
BDMAP_00003074
BDMAP_00003075
BDMAP_00003076
BDMAP_00003077
BDMAP_00003078
BDMAP_00003079
BDMAP_00003080
BDMAP_00003081
BDMAP_00003082
BDMAP_00003083
BDMAP_00003084
BDMAP_00003085
BDMAP_00003086
BDMAP_00003087
BDMAP_00003088
BDMAP_00003089
BDMAP_00003090
BDMAP_00003091
BDMAP_00003092
BDMAP_00003093
BDMAP_00003094
BDMAP_00003095
BDMAP_00003096
BDMAP_00003097
BDMAP_00003098
BDMAP_00003099
BDMAP_00003100
BDMAP_00003101
BDMAP_00003102
BDMAP_00003103
BDMAP_00003104
BDMAP_00003105
BDMAP_00003106
BDMAP_00003107
BDMAP_00003108
BDMAP_00003109
BDMAP_00003110
BDMAP_00003111
BDMAP_00003112
BDMAP_00003113
BDMAP_00003114
BDMAP_00003115
BDMAP_00003116
BDMAP_00003117
BDMAP_00003118
BDMAP_00003119
BDMAP_00003120
BDMAP_00003121
BDMAP_00003122
BDMAP_00003123
BDMAP_00003124
BDMAP_00003125
BDMAP_00003126
BDMAP_00003127
BDMAP_00003128
BDMAP_00003129
BDMAP_00003130
BDMAP_00003131
BDMAP_00003132
BDMAP_00003133
BDMAP_00003134
BDMAP_00003135
BDMAP_00003136
BDMAP_00003137
BDMAP_00003138
BDMAP_00003139
BDMAP_00003140
BDMAP_00003141
BDMAP_00003142
BDMAP_00003143
BDMAP_00003144
BDMAP_00003145
BDMAP_00003146
BDMAP_00003147
BDMAP_00003148
BDMAP_00003149
BDMAP_00003150
BDMAP_00003151
BDMAP_00003152
BDMAP_00003153
BDMAP_00003154
BDMAP_00003155
BDMAP_00003156
BDMAP_00003157
BDMAP_00003158
BDMAP_00003159
BDMAP_00003160
BDMAP_00003161
BDMAP_00003162
BDMAP_00003163
BDMAP_00003164
BDMAP_00003165
BDMAP_00003166
BDMAP_00003167
BDMAP_00003168
BDMAP_00003169
BDMAP_00003170
BDMAP_00003171
BDMAP_00003172
BDMAP_00003173
BDMAP_00003174
BDMAP_00003175
BDMAP_00003176
BDMAP_00003177
BDMAP_00003178
BDMAP_00003179
BDMAP_00003180
BDMAP_00003181
BDMAP_00003182
BDMAP_00003183
BDMAP_00003184
BDMAP_00003185
BDMAP_00003186
BDMAP_00003187
BDMAP_00003188
BDMAP_00003189
BDMAP_00003190
BDMAP_00003191
BDMAP_00003192
BDMAP_00003193
BDMAP_00003194
BDMAP_00003195
BDMAP_00003196
BDMAP_00003197
BDMAP_00003198
BDMAP_00003199
BDMAP_00003200
BDMAP_00003201
BDMAP_00003202
BDMAP_00003203
BDMAP_00003204
BDMAP_00003205
BDMAP_00003206
BDMAP_00003207
BDMAP_00003208
BDMAP_00003209
BDMAP_00003210
BDMAP_00003211
BDMAP_00003212
BDMAP_00003213
BDMAP_00003214
BDMAP_00003215
BDMAP_00003216
BDMAP_00003217
BDMAP_00003218
BDMAP_00003219
BDMAP_00003220
BDMAP_00003221
BDMAP_00003222
BDMAP_00003223
BDMAP_00003224
BDMAP_00003225
BDMAP_00003226
BDMAP_00003227
BDMAP_00003228
BDMAP_00003229
BDMAP_00003230
BDMAP_00003231
BDMAP_00003232
BDMAP_00003233
BDMAP_00003234
BDMAP_00003235
BDMAP_00003236
BDMAP_00003237
BDMAP_00003238
BDMAP_00003239
BDMAP_00003240
BDMAP_00003241
BDMAP_00003242
BDMAP_00003243
BDMAP_00003244
BDMAP_00003245
BDMAP_00003246
BDMAP_00003247
BDMAP_00003248
BDMAP_00003249
BDMAP_00003250
BDMAP_00003251
BDMAP_00003252
BDMAP_00003253
BDMAP_00003254
BDMAP_00003255
BDMAP_00003256
BDMAP_00003257
BDMAP_00003258
BDMAP_00003259
BDMAP_00003260
BDMAP_00003261
BDMAP_00003262
BDMAP_00003263
BDMAP_00003264
BDMAP_00003265
BDMAP_00003266
BDMAP_00003267
BDMAP_00003268
BDMAP_00003269
BDMAP_00003270
BDMAP_00003271
BDMAP_00003272
BDMAP_00003273
BDMAP_00003274
BDMAP_00003275
BDMAP_00003276
BDMAP_00003277
BDMAP_00003278
BDMAP_00003279
BDMAP_00003280
BDMAP_00003281
BDMAP_00003282
BDMAP_00003283
BDMAP_00003284
BDMAP_00003285
BDMAP_00003286
BDMAP_00003287
BDMAP_00003288
BDMAP_00003289
BDMAP_00003290
BDMAP_00003291
BDMAP_00003292
BDMAP_00003293
BDMAP_00003294
BDMAP_00003295
BDMAP_00003296
BDMAP_00003297
BDMAP_00003298
BDMAP_00003299
BDMAP_00003300
BDMAP_00003301
BDMAP_00003302
BDMAP_00003303
BDMAP_00003304
BDMAP_00003305
BDMAP_00003306
BDMAP_00003307
BDMAP_00003308
BDMAP_00003309
BDMAP_00003310
BDMAP_00003311
BDMAP_00003312
BDMAP_00003313
BDMAP_00003314
BDMAP_00003315
BDMAP_00003316
BDMAP_00003317
BDMAP_00003318
BDMAP_00003319
BDMAP_00003320
BDMAP_00003321
BDMAP_00003322
BDMAP_00003323
BDMAP_00003324
BDMAP_00003325
BDMAP_00003326
BDMAP_00003327
BDMAP_00003328
BDMAP_00003329
BDMAP_00003330
BDMAP_00003331
BDMAP_00003332
BDMAP_00003333
BDMAP_00003334
BDMAP_00003335
BDMAP_00003336
BDMAP_00003337
BDMAP_00003338
BDMAP_00003339
BDMAP_00003340
BDMAP_00003341
BDMAP_00003342
BDMAP_00003343
BDMAP_00003344
BDMAP_00003345
BDMAP_00003346
BDMAP_00003347
BDMAP_00003348
BDMAP_00003349
BDMAP_00003350
BDMAP_00003351
BDMAP_00003352
BDMAP_00003353
BDMAP_00003354
BDMAP_00003355
BDMAP_00003356
BDMAP_00003357
BDMAP_00003358
BDMAP_00003359
BDMAP_00003360
BDMAP_00003361
BDMAP_00003362
BDMAP_00003363
BDMAP_00003364
BDMAP_00003365
BDMAP_00003366
BDMAP_00003367
BDMAP_00003368
BDMAP_00003369
BDMAP_00003370
BDMAP_00003371
BDMAP_00003372
BDMAP_00003373
BDMAP_00003374
BDMAP_00003375
BDMAP_00003376
BDMAP_00003377
BDMAP_00003378
BDMAP_00003379
BDMAP_00003380
BDMAP_00003381
BDMAP_00003382
BDMAP_00003383
BDMAP_00003384
BDMAP_00003385
BDMAP_00003386
BDMAP_00003387
BDMAP_00003388
BDMAP_00003389
BDMAP_00003390
BDMAP_00003391
BDMAP_00003392
BDMAP_00003393
BDMAP_00003394
BDMAP_00003395
BDMAP_00003396
BDMAP_00003397
BDMAP_00003398
BDMAP_00003399
BDMAP_00003400
BDMAP_00003401
BDMAP_00003402
BDMAP_00003403
BDMAP_00003404
BDMAP_00003405
BDMAP_00003406
BDMAP_00003407
BDMAP_00003408
BDMAP_00003409
BDMAP_00003410
BDMAP_00003411
BDMAP_00003412
BDMAP_00003413
BDMAP_00003414
BDMAP_00003415
BDMAP_00003416
BDMAP_00003417
BDMAP_00003418
BDMAP_00003419
BDMAP_00003420
BDMAP_00003421
BDMAP_00003422
BDMAP_00003423
BDMAP_00003424
BDMAP_00003425
BDMAP_00003426
BDMAP_00003427
BDMAP_00003428
BDMAP_00003429
BDMAP_00003430
BDMAP_00003431
BDMAP_00003432
BDMAP_00003433
BDMAP_00003434
BDMAP_00003435
BDMAP_00003436
BDMAP_00003437
BDMAP_00003438
BDMAP_00003439
BDMAP_00003440
BDMAP_00003441
BDMAP_00003442
BDMAP_00003443
BDMAP_00003444
BDMAP_00003445
BDMAP_00003446
BDMAP_00003447
BDMAP_00003448
BDMAP_00003449
BDMAP_00003450
BDMAP_00003451
BDMAP_00003452
BDMAP_00003453
BDMAP_00003454
BDMAP_00003455
BDMAP_00003456
BDMAP_00003457
BDMAP_00003458
BDMAP_00003459
BDMAP_00003460
BDMAP_00003461
BDMAP_00003462
BDMAP_00003463
BDMAP_00003464
BDMAP_00003465
BDMAP_00003466
BDMAP_00003467
BDMAP_00003468
BDMAP_00003469
BDMAP_00003470
BDMAP_00003471
BDMAP_00003472
BDMAP_00003473
BDMAP_00003474
BDMAP_00003475
BDMAP_00003476
BDMAP_00003477
BDMAP_00003478
BDMAP_00003479
BDMAP_00003480
BDMAP_00003481
BDMAP_00003482
BDMAP_00003483
BDMAP_00003484
BDMAP_00003485
BDMAP_00003486
BDMAP_00003487
BDMAP_00003488
BDMAP_00003489
BDMAP_00003490
BDMAP_00003491
BDMAP_00003492
BDMAP_00003493
BDMAP_00003494
BDMAP_00003495
BDMAP_00003496
BDMAP_00003497
BDMAP_00003498
BDMAP_00003499
BDMAP_00003500
BDMAP_00003501
BDMAP_00003502
BDMAP_00003503
BDMAP_00003504
BDMAP_00003505
BDMAP_00003506
BDMAP_00003507
BDMAP_00003508
BDMAP_00003509
BDMAP_00003510
BDMAP_00003511
BDMAP_00003512
BDMAP_00003513
BDMAP_00003514
BDMAP_00003515
BDMAP_00003516
BDMAP_00003517
BDMAP_00003518
BDMAP_00003519
BDMAP_00003520
BDMAP_00003521
BDMAP_00003522
BDMAP_00003523
BDMAP_00003524
BDMAP_00003525
BDMAP_00003526
BDMAP_00003527
BDMAP_00003528
BDMAP_00003529
BDMAP_00003530
BDMAP_00003531
BDMAP_00003532
BDMAP_00003533
BDMAP_00003534
BDMAP_00003535
BDMAP_00003536
BDMAP_00003537
BDMAP_00003538
BDMAP_00003539
BDMAP_00003540
BDMAP_00003541
BDMAP_00003542
BDMAP_00003543
BDMAP_00003544
BDMAP_00003545
BDMAP_00003546
BDMAP_00003547
BDMAP_00003548
BDMAP_00003549
BDMAP_00003550
BDMAP_00003551
BDMAP_00003552
BDMAP_00003553
BDMAP_00003554
BDMAP_00003555
BDMAP_00003556
BDMAP_00003557
BDMAP_00003558
BDMAP_00003559
BDMAP_00003560
BDMAP_00003561
BDMAP_00003562
BDMAP_00003563
BDMAP_00003564
BDMAP_00003565
BDMAP_00003566
BDMAP_00003567
BDMAP_00003568
BDMAP_00003569
BDMAP_00003570
BDMAP_00003571
BDMAP_00003572
BDMAP_00003573
BDMAP_00003574
BDMAP_00003575
BDMAP_00003576
BDMAP_00003577
BDMAP_00003578
BDMAP_00003579
BDMAP_00003580
BDMAP_00003581
BDMAP_00003582
BDMAP_00003583
BDMAP_00003584
BDMAP_00003585
BDMAP_00003586
BDMAP_00003587
BDMAP_00003588
BDMAP_00003589
BDMAP_00003590
BDMAP_00003591
BDMAP_00003592
BDMAP_00003593
BDMAP_00003594
BDMAP_00003595
BDMAP_00003596
BDMAP_00003597
BDMAP_00003598
BDMAP_00003599
BDMAP_00003600
BDMAP_00003601
BDMAP_00003602
BDMAP_00003603
BDMAP_00003604
BDMAP_00003605
BDMAP_00003606
BDMAP_00003607
BDMAP_00003608
BDMAP_00003609
BDMAP_00003610
BDMAP_00003611
BDMAP_00003612
BDMAP_00003613
BDMAP_00003614
BDMAP_00003615
BDMAP_00003616
BDMAP_00003617
BDMAP_00003618
BDMAP_00003619
BDMAP_00003620
BDMAP_00003621
BDMAP_00003622
BDMAP_00003623
BDMAP_00003624
BDMAP_00003625
BDMAP_00003626
BDMAP_00003627
BDMAP_00003628
BDMAP_00003629
BDMAP_00003630
BDMAP_00003631
BDMAP_00003632
BDMAP_00003633
BDMAP_00003634
BDMAP_00003635
BDMAP_00003636
BDMAP_00003637
BDMAP_00003638
BDMAP_00003639
BDMAP_00003640
BDMAP_00003641
BDMAP_00003642
BDMAP_00003643
BDMAP_00003644
BDMAP_00003645
BDMAP_00003646
BDMAP_00003647
BDMAP_00003648
BDMAP_00003649
BDMAP_00003650
BDMAP_00003651
BDMAP_00003652
BDMAP_00003653
BDMAP_00003654
BDMAP_00003655
BDMAP_00003656
BDMAP_00003657
BDMAP_00003658
BDMAP_00003659
BDMAP_00003660
BDMAP_00003661
BDMAP_00003662
BDMAP_00003663
BDMAP_00003664
BDMAP_00003665
BDMAP_00003666
BDMAP_00003667
BDMAP_00003668
BDMAP_00003669
BDMAP_00003670
BDMAP_00003671
BDMAP_00003672
BDMAP_00003673
BDMAP_00003674
BDMAP_00003675
BDMAP_00003676
BDMAP_00003677
BDMAP_00003678
BDMAP_00003679
BDMAP_00003680
BDMAP_00003681
BDMAP_00003682
BDMAP_00003683
BDMAP_00003684
BDMAP_00003685
BDMAP_00003686
BDMAP_00003687
BDMAP_00003688
BDMAP_00003689
BDMAP_00003690
BDMAP_00003691
BDMAP_00003692
BDMAP_00003693
BDMAP_00003694
BDMAP_00003695
BDMAP_00003696
BDMAP_00003697
BDMAP_00003698
BDMAP_00003699
BDMAP_00003700
BDMAP_00003701
BDMAP_00003702
BDMAP_00003703
BDMAP_00003704
BDMAP_00003705
BDMAP_00003706
BDMAP_00003707
BDMAP_00003708
BDMAP_00003709
BDMAP_00003710
BDMAP_00003711
BDMAP_00003712
BDMAP_00003713
BDMAP_00003714
BDMAP_00003715
BDMAP_00003716
BDMAP_00003717
BDMAP_00003718
BDMAP_00003719
BDMAP_00003720
BDMAP_00003721
BDMAP_00003722
BDMAP_00003723
BDMAP_00003724
BDMAP_00003725
BDMAP_00003726
BDMAP_00003727
BDMAP_00003728
BDMAP_00003729
BDMAP_00003730
BDMAP_00003731
BDMAP_00003732
BDMAP_00003733
BDMAP_00003734
BDMAP_00003735
BDMAP_00003736
BDMAP_00003737
BDMAP_00003738
BDMAP_00003739
BDMAP_00003740
BDMAP_00003741
BDMAP_00003742
BDMAP_00003743
BDMAP_00003744
BDMAP_00003745
BDMAP_00003746
BDMAP_00003747
BDMAP_00003748
BDMAP_00003749
BDMAP_00003750
BDMAP_00003751
BDMAP_00003752
BDMAP_00003753
BDMAP_00003754
BDMAP_00003755
BDMAP_00003756
BDMAP_00003757
BDMAP_00003758
BDMAP_00003759
BDMAP_00003760
BDMAP_00003761
BDMAP_00003762
BDMAP_00003763
BDMAP_00003764
BDMAP_00003765
BDMAP_00003766
BDMAP_00003767
BDMAP_00003768
BDMAP_00003769
BDMAP_00003770
BDMAP_00003771
BDMAP_00003772
BDMAP_00003773
BDMAP_00003774
BDMAP_00003775
BDMAP_00003776
BDMAP_00003777
BDMAP_00003778
BDMAP_00003779
BDMAP_00003780
BDMAP_00003781
BDMAP_00003782
BDMAP_00003783
BDMAP_00003784
BDMAP_00003785
BDMAP_00003786
BDMAP_00003787
BDMAP_00003788
BDMAP_00003789
BDMAP_00003790
BDMAP_00003791
BDMAP_00003792
BDMAP_00003793
BDMAP_00003794
BDMAP_00003795
BDMAP_00003796
BDMAP_00003797
BDMAP_00003798
BDMAP_00003799
BDMAP_00003800
BDMAP_00003801
BDMAP_00003802
BDMAP_00003803
BDMAP_00003804
BDMAP_00003805
BDMAP_00003806
BDMAP_00003807
BDMAP_00003808
BDMAP_00003809
BDMAP_00003810
BDMAP_00003811
BDMAP_00003812
BDMAP_00003813
BDMAP_00003814
BDMAP_00003815
BDMAP_00003816
BDMAP_00003817
BDMAP_00003818
BDMAP_00003819
BDMAP_00003820
BDMAP_00003821
BDMAP_00003822
BDMAP_00003823
BDMAP_00003824
BDMAP_00003825
BDMAP_00003826
BDMAP_00003827
BDMAP_00003828
BDMAP_00003829
BDMAP_00003830
BDMAP_00003831
BDMAP_00003832
BDMAP_00003833
BDMAP_00003834
BDMAP_00003835
BDMAP_00003836
BDMAP_00003837
BDMAP_00003838
BDMAP_00003839
BDMAP_00003840
BDMAP_00003841
BDMAP_00003842
BDMAP_00003843
BDMAP_00003844
BDMAP_00003845
BDMAP_00003846
BDMAP_00003847
BDMAP_00003848
BDMAP_00003849
BDMAP_00003850
BDMAP_00003851
BDMAP_00003852
BDMAP_00003853
BDMAP_00003854
BDMAP_00003855
BDMAP_00003856
BDMAP_00003857
BDMAP_00003858
BDMAP_00003859
BDMAP_00003860
BDMAP_00003861
BDMAP_00003862
BDMAP_00003863
BDMAP_00003864
BDMAP_00003865
BDMAP_00003866
BDMAP_00003867
BDMAP_00003868
BDMAP_00003869
BDMAP_00003870
BDMAP_00003871
BDMAP_00003872
BDMAP_00003873
BDMAP_00003874
BDMAP_00003875
BDMAP_00003876
BDMAP_00003877
BDMAP_00003878
BDMAP_00003879
BDMAP_00003880
BDMAP_00003881
BDMAP_00003882
BDMAP_00003883
BDMAP_00003884
BDMAP_00003885
BDMAP_00003886
BDMAP_00003887
BDMAP_00003888
BDMAP_00003889
BDMAP_00003890
BDMAP_00003891
BDMAP_00003892
BDMAP_00003893
BDMAP_00003894
BDMAP_00003895
BDMAP_00003896
BDMAP_00003897
BDMAP_00003898
BDMAP_00003899
BDMAP_00003900
BDMAP_00003901
BDMAP_00003902
BDMAP_00003903
BDMAP_00003904
BDMAP_00003905
BDMAP_00003906
BDMAP_00003907
BDMAP_00003908
BDMAP_00003909
BDMAP_00003910
BDMAP_00003911
BDMAP_00003912
BDMAP_00003913
BDMAP_00003914
BDMAP_00003915
BDMAP_00003916
BDMAP_00003917
BDMAP_00003918
BDMAP_00003919
BDMAP_00003920
BDMAP_00003921
BDMAP_00003922
BDMAP_00003923
BDMAP_00003924
BDMAP_00003925
BDMAP_00003926
BDMAP_00003927
BDMAP_00003928
BDMAP_00003929
BDMAP_00003930
BDMAP_00003931
BDMAP_00003932
BDMAP_00003933
BDMAP_00003934
BDMAP_00003935
BDMAP_00003936
BDMAP_00003937
BDMAP_00003938
BDMAP_00003939
BDMAP_00003940
BDMAP_00003941
BDMAP_00003942
BDMAP_00003943
BDMAP_00003944
BDMAP_00003945
BDMAP_00003946
BDMAP_00003947
BDMAP_00003948
BDMAP_00003949
BDMAP_00003950
BDMAP_00003951
BDMAP_00003952
BDMAP_00003953
BDMAP_00003954
BDMAP_00003955
BDMAP_00003956
BDMAP_00003957
BDMAP_00003958
BDMAP_00003959
BDMAP_00003960
BDMAP_00003961
BDMAP_00003962
BDMAP_00003963
BDMAP_00003964
BDMAP_00003965
BDMAP_00003966
BDMAP_00003967
BDMAP_00003968
BDMAP_00003969
BDMAP_00003970
BDMAP_00003971
BDMAP_00003972
BDMAP_00003973
BDMAP_00003974
BDMAP_00003975
BDMAP_00003976
BDMAP_00003977
BDMAP_00003978
BDMAP_00003979
BDMAP_00003980
BDMAP_00003981
BDMAP_00003982
BDMAP_00003983
BDMAP_00003984
BDMAP_00003985
BDMAP_00003986
BDMAP_00003987
BDMAP_00003988
BDMAP_00003989
BDMAP_00003990
BDMAP_00003991
BDMAP_00003992
BDMAP_00003993
BDMAP_00003994
BDMAP_00003995
BDMAP_00003996
BDMAP_00003997
BDMAP_00003998
BDMAP_00003999
BDMAP_00004000
BDMAP_00004001
BDMAP_00004002
BDMAP_00004003
BDMAP_00004004
BDMAP_00004005
BDMAP_00004006
BDMAP_00004007
BDMAP_00004008
BDMAP_00004009
BDMAP_00004010
BDMAP_00004011
BDMAP_00004012
BDMAP_00004013
BDMAP_00004014
BDMAP_00004015
BDMAP_00004016
BDMAP_00004017
BDMAP_00004018
BDMAP_00004019
BDMAP_00004020
BDMAP_00004021
BDMAP_00004022
BDMAP_00004023
BDMAP_00004024
BDMAP_00004025
BDMAP_00004026
BDMAP_00004027
BDMAP_00004028
BDMAP_00004029
BDMAP_00004030
BDMAP_00004031
BDMAP_00004032
BDMAP_00004033
BDMAP_00004034
BDMAP_00004035
BDMAP_00004036
BDMAP_00004037
BDMAP_00004038
BDMAP_00004039
BDMAP_00004040
BDMAP_00004041
BDMAP_00004042
BDMAP_00004043
BDMAP_00004044
BDMAP_00004045
BDMAP_00004046
BDMAP_00004047
BDMAP_00004048
BDMAP_00004049
BDMAP_00004050
BDMAP_00004051
BDMAP_00004052
BDMAP_00004053
BDMAP_00004054
BDMAP_00004055
BDMAP_00004056
BDMAP_00004057
BDMAP_00004058
BDMAP_00004059
BDMAP_00004060
BDMAP_00004061
BDMAP_00004062
BDMAP_00004063
BDMAP_00004064
BDMAP_00004065
BDMAP_00004066
BDMAP_00004067
BDMAP_00004068
BDMAP_00004069
BDMAP_00004070
BDMAP_00004071
BDMAP_00004072
BDMAP_00004073
BDMAP_00004074
BDMAP_00004075
BDMAP_00004076
BDMAP_00004077
BDMAP_00004078
BDMAP_00004079
BDMAP_00004080
BDMAP_00004081
BDMAP_00004082
BDMAP_00004083
BDMAP_00004084
BDMAP_00004085
BDMAP_00004086
BDMAP_00004087
BDMAP_00004088
BDMAP_00004089
BDMAP_00004090
BDMAP_00004091
BDMAP_00004092
BDMAP_00004093
BDMAP_00004094
BDMAP_00004095
BDMAP_00004096
BDMAP_00004097
BDMAP_00004098
BDMAP_00004099
BDMAP_00004100
BDMAP_00004101
BDMAP_00004102
BDMAP_00004103
BDMAP_00004104
BDMAP_00004105
BDMAP_00004106
BDMAP_00004107
BDMAP_00004108
BDMAP_00004109
BDMAP_00004110
BDMAP_00004111
BDMAP_00004112
BDMAP_00004113
BDMAP_00004114
BDMAP_00004115
BDMAP_00004116
BDMAP_00004117
BDMAP_00004118
BDMAP_00004119
BDMAP_00004120
BDMAP_00004121
BDMAP_00004122
BDMAP_00004123
BDMAP_00004124
BDMAP_00004125
BDMAP_00004126
BDMAP_00004127
BDMAP_00004128
BDMAP_00004129
BDMAP_00004130
BDMAP_00004131
BDMAP_00004132
BDMAP_00004133
BDMAP_00004134
BDMAP_00004135
BDMAP_00004136
BDMAP_00004137
BDMAP_00004138
BDMAP_00004139
BDMAP_00004140
BDMAP_00004141
BDMAP_00004142
BDMAP_00004143
BDMAP_00004144
BDMAP_00004145
BDMAP_00004146
BDMAP_00004147
BDMAP_00004148
BDMAP_00004149
BDMAP_00004150
BDMAP_00004151
BDMAP_00004152
BDMAP_00004153
BDMAP_00004154
BDMAP_00004155
BDMAP_00004156
BDMAP_00004157
BDMAP_00004158
BDMAP_00004159
BDMAP_00004160
BDMAP_00004161
BDMAP_00004162
BDMAP_00004163
BDMAP_00004164
BDMAP_00004165
BDMAP_00004166
BDMAP_00004167
BDMAP_00004168
BDMAP_00004169
BDMAP_00004170
BDMAP_00004171
BDMAP_00004172
BDMAP_00004173
BDMAP_00004174
BDMAP_00004175
BDMAP_00004176
BDMAP_00004177
BDMAP_00004178
BDMAP_00004179
BDMAP_00004180
BDMAP_00004181
BDMAP_00004182
BDMAP_00004183
BDMAP_00004184
BDMAP_00004185
BDMAP_00004186
BDMAP_00004187
BDMAP_00004188
BDMAP_00004189
BDMAP_00004190
BDMAP_00004191
BDMAP_00004192
BDMAP_00004193
BDMAP_00004194
BDMAP_00004195
BDMAP_00004196
BDMAP_00004197
BDMAP_00004198
BDMAP_00004199
BDMAP_00004200
BDMAP_00004201
BDMAP_00004202
BDMAP_00004203
BDMAP_00004204
BDMAP_00004205
BDMAP_00004206
BDMAP_00004207
BDMAP_00004208
BDMAP_00004209
BDMAP_00004210
BDMAP_00004211
BDMAP_00004212
BDMAP_00004213
BDMAP_00004214
BDMAP_00004215
BDMAP_00004216
BDMAP_00004217
BDMAP_00004218
BDMAP_00004219
BDMAP_00004220
BDMAP_00004221
BDMAP_00004222
BDMAP_00004223
BDMAP_00004224
BDMAP_00004225
BDMAP_00004226
BDMAP_00004227
BDMAP_00004228
BDMAP_00004229
BDMAP_00004230
BDMAP_00004231
BDMAP_00004232
BDMAP_00004233
BDMAP_00004234
BDMAP_00004235
BDMAP_00004236
BDMAP_00004237
BDMAP_00004238
BDMAP_00004239
BDMAP_00004240
BDMAP_00004241
BDMAP_00004242
BDMAP_00004243
BDMAP_00004244
BDMAP_00004245
BDMAP_00004246
BDMAP_00004247
BDMAP_00004248
BDMAP_00004249
BDMAP_00004250
BDMAP_00004251
BDMAP_00004252
BDMAP_00004253
BDMAP_00004254
BDMAP_00004255
BDMAP_00004256
BDMAP_00004257
BDMAP_00004258
BDMAP_00004259
BDMAP_00004260
BDMAP_00004261
BDMAP_00004262
BDMAP_00004263
BDMAP_00004264
BDMAP_00004265
BDMAP_00004266
BDMAP_00004267
BDMAP_00004268
BDMAP_00004269
BDMAP_00004270
BDMAP_00004271
BDMAP_00004272
BDMAP_00004273
BDMAP_00004274
BDMAP_00004275
BDMAP_00004276
BDMAP_00004277
BDMAP_00004278
BDMAP_00004279
BDMAP_00004280
BDMAP_00004281
BDMAP_00004282
BDMAP_00004283
BDMAP_00004284
BDMAP_00004285
BDMAP_00004286
BDMAP_00004287
BDMAP_00004288
BDMAP_00004289
BDMAP_00004290
BDMAP_00004291
BDMAP_00004292
BDMAP_00004293
BDMAP_00004294
BDMAP_00004295
BDMAP_00004296
BDMAP_00004297
BDMAP_00004298
BDMAP_00004299
BDMAP_00004300
BDMAP_00004301
BDMAP_00004302
BDMAP_00004303
BDMAP_00004304
BDMAP_00004305
BDMAP_00004306
BDMAP_00004307
BDMAP_00004308
BDMAP_00004309
BDMAP_00004310
BDMAP_00004311
BDMAP_00004312
BDMAP_00004313
BDMAP_00004314
BDMAP_00004315
BDMAP_00004316
BDMAP_00004317
BDMAP_00004318
BDMAP_00004319
BDMAP_00004320
BDMAP_00004321
BDMAP_00004322
BDMAP_00004323
BDMAP_00004324
BDMAP_00004325
BDMAP_00004326
BDMAP_00004327
BDMAP_00004328
BDMAP_00004329
BDMAP_00004330
BDMAP_00004331
BDMAP_00004332
BDMAP_00004333
BDMAP_00004334
BDMAP_00004335
BDMAP_00004336
BDMAP_00004337
BDMAP_00004338
BDMAP_00004339
BDMAP_00004340
BDMAP_00004341
BDMAP_00004342
BDMAP_00004343
BDMAP_00004344
BDMAP_00004345
BDMAP_00004346
BDMAP_00004347
BDMAP_00004348
BDMAP_00004349
BDMAP_00004350
BDMAP_00004351
BDMAP_00004352
BDMAP_00004353
BDMAP_00004354
BDMAP_00004355
BDMAP_00004356
BDMAP_00004357
BDMAP_00004358
BDMAP_00004359
BDMAP_00004360
BDMAP_00004361
BDMAP_00004362
BDMAP_00004363
BDMAP_00004364
BDMAP_00004365
BDMAP_00004366
BDMAP_00004367
BDMAP_00004368
BDMAP_00004369
BDMAP_00004370
BDMAP_00004371
BDMAP_00004372
BDMAP_00004373
BDMAP_00004374
BDMAP_00004375
BDMAP_00004376
BDMAP_00004377
BDMAP_00004378
BDMAP_00004379
BDMAP_00004380
BDMAP_00004381
BDMAP_00004382
BDMAP_00004383
BDMAP_00004384
BDMAP_00004385
BDMAP_00004386
BDMAP_00004387
BDMAP_00004388
BDMAP_00004389
BDMAP_00004390
BDMAP_00004391
BDMAP_00004392
BDMAP_00004393
BDMAP_00004394
BDMAP_00004395
BDMAP_00004396
BDMAP_00004397
BDMAP_00004398
BDMAP_00004399
BDMAP_00004400
BDMAP_00004401
BDMAP_00004402
BDMAP_00004403
BDMAP_00004404
BDMAP_00004405
BDMAP_00004406
BDMAP_00004407
BDMAP_00004408
BDMAP_00004409
BDMAP_00004410
BDMAP_00004411
BDMAP_00004412
BDMAP_00004413
BDMAP_00004414
BDMAP_00004415
BDMAP_00004416
BDMAP_00004417
BDMAP_00004418
BDMAP_00004419
BDMAP_00004420
BDMAP_00004421
BDMAP_00004422
BDMAP_00004423
BDMAP_00004424
BDMAP_00004425
BDMAP_00004426
BDMAP_00004427
BDMAP_00004428
BDMAP_00004429
BDMAP_00004430
BDMAP_00004431
BDMAP_00004432
BDMAP_00004433
BDMAP_00004434
BDMAP_00004435
BDMAP_00004436
BDMAP_00004437
BDMAP_00004438
BDMAP_00004439
BDMAP_00004440
BDMAP_00004441
BDMAP_00004442
BDMAP_00004443
BDMAP_00004444
BDMAP_00004445
BDMAP_00004446
BDMAP_00004447
BDMAP_00004448
BDMAP_00004449
BDMAP_00004450
BDMAP_00004451
BDMAP_00004452
BDMAP_00004453
BDMAP_00004454
BDMAP_00004455
BDMAP_00004456
BDMAP_00004457
BDMAP_00004458
BDMAP_00004459
BDMAP_00004460
BDMAP_00004461
BDMAP_00004462
BDMAP_00004463
BDMAP_00004464
BDMAP_00004465
BDMAP_00004466
BDMAP_00004467
BDMAP_00004468
BDMAP_00004469
BDMAP_00004470
BDMAP_00004471
BDMAP_00004472
BDMAP_00004473
BDMAP_00004474
BDMAP_00004475
BDMAP_00004476
BDMAP_00004477
BDMAP_00004478
BDMAP_00004479
BDMAP_00004480
BDMAP_00004481
BDMAP_00004482
BDMAP_00004483
BDMAP_00004484
BDMAP_00004485
BDMAP_00004486
BDMAP_00004487
BDMAP_00004488
BDMAP_00004489
BDMAP_00004490
BDMAP_00004491
BDMAP_00004492
BDMAP_00004493
BDMAP_00004494
BDMAP_00004495
BDMAP_00004496
BDMAP_00004497
BDMAP_00004498
BDMAP_00004499
BDMAP_00004500
BDMAP_00004501
BDMAP_00004502
BDMAP_00004503
BDMAP_00004504
BDMAP_00004505
BDMAP_00004506
BDMAP_00004507
BDMAP_00004508
BDMAP_00004509
BDMAP_00004510
BDMAP_00004511
BDMAP_00004512
BDMAP_00004513
BDMAP_00004514
BDMAP_00004515
BDMAP_00004516
BDMAP_00004517
BDMAP_00004518
BDMAP_00004519
BDMAP_00004520
BDMAP_00004521
BDMAP_00004522
BDMAP_00004523
BDMAP_00004524
BDMAP_00004525
BDMAP_00004526
BDMAP_00004527
BDMAP_00004528
BDMAP_00004529
BDMAP_00004530
BDMAP_00004531
BDMAP_00004532
BDMAP_00004533
BDMAP_00004534
BDMAP_00004535
BDMAP_00004536
BDMAP_00004537
BDMAP_00004538
BDMAP_00004539
BDMAP_00004540
BDMAP_00004541
BDMAP_00004542
BDMAP_00004543
BDMAP_00004544
BDMAP_00004545
BDMAP_00004546
BDMAP_00004547
BDMAP_00004548
BDMAP_00004549
BDMAP_00004550
BDMAP_00004551
BDMAP_00004552
BDMAP_00004553
BDMAP_00004554
BDMAP_00004555
BDMAP_00004556
BDMAP_00004557
BDMAP_00004558
BDMAP_00004559
BDMAP_00004560
BDMAP_00004561
BDMAP_00004562
BDMAP_00004563
BDMAP_00004564
BDMAP_00004565
BDMAP_00004566
BDMAP_00004567
BDMAP_00004568
BDMAP_00004569
BDMAP_00004570
BDMAP_00004571
BDMAP_00004572
BDMAP_00004573
BDMAP_00004574
BDMAP_00004575
BDMAP_00004576
BDMAP_00004577
BDMAP_00004578
BDMAP_00004579
BDMAP_00004580
BDMAP_00004581
BDMAP_00004582
BDMAP_00004583
BDMAP_00004584
BDMAP_00004585
BDMAP_00004586
BDMAP_00004587
BDMAP_00004588
BDMAP_00004589
BDMAP_00004590
BDMAP_00004591
BDMAP_00004592
BDMAP_00004593
BDMAP_00004594
BDMAP_00004595
BDMAP_00004596
BDMAP_00004597
BDMAP_00004598
BDMAP_00004599
BDMAP_00004600
BDMAP_00004601
BDMAP_00004602
BDMAP_00004603
BDMAP_00004604
BDMAP_00004605
BDMAP_00004606
BDMAP_00004607
BDMAP_00004608
BDMAP_00004609
BDMAP_00004610
BDMAP_00004611
BDMAP_00004612
BDMAP_00004613
BDMAP_00004614
BDMAP_00004615
BDMAP_00004616
BDMAP_00004617
BDMAP_00004618
BDMAP_00004619
BDMAP_00004620
BDMAP_00004621
BDMAP_00004622
BDMAP_00004623
BDMAP_00004624
BDMAP_00004625
BDMAP_00004626
BDMAP_00004627
BDMAP_00004628
BDMAP_00004629
BDMAP_00004630
BDMAP_00004631
BDMAP_00004632
BDMAP_00004633
BDMAP_00004634
BDMAP_00004635
BDMAP_00004636
BDMAP_00004637
BDMAP_00004638
BDMAP_00004639
BDMAP_00004640
BDMAP_00004641
BDMAP_00004642
BDMAP_00004643
BDMAP_00004644
BDMAP_00004645
BDMAP_00004646
BDMAP_00004647
BDMAP_00004648
BDMAP_00004649
BDMAP_00004650
BDMAP_00004651
BDMAP_00004652
BDMAP_00004653
BDMAP_00004654
BDMAP_00004655
BDMAP_00004656
BDMAP_00004657
BDMAP_00004658
BDMAP_00004659
BDMAP_00004660
BDMAP_00004661
BDMAP_00004662
BDMAP_00004663
BDMAP_00004664
BDMAP_00004665
BDMAP_00004666
BDMAP_00004667
BDMAP_00004668
BDMAP_00004669
BDMAP_00004670
BDMAP_00004671
BDMAP_00004672
BDMAP_00004673
BDMAP_00004674
BDMAP_00004675
BDMAP_00004676
BDMAP_00004677
BDMAP_00004678
BDMAP_00004679
BDMAP_00004680
BDMAP_00004681
BDMAP_00004682
BDMAP_00004683
BDMAP_00004684
BDMAP_00004685
BDMAP_00004686
BDMAP_00004687
BDMAP_00004688
BDMAP_00004689
BDMAP_00004690
BDMAP_00004691
BDMAP_00004692
BDMAP_00004693
BDMAP_00004694
BDMAP_00004695
BDMAP_00004696
BDMAP_00004697
BDMAP_00004698
BDMAP_00004699
BDMAP_00004700
BDMAP_00004701
BDMAP_00004702
BDMAP_00004703
BDMAP_00004704
BDMAP_00004705
BDMAP_00004706
BDMAP_00004707
BDMAP_00004708
BDMAP_00004709
BDMAP_00004710
BDMAP_00004711
BDMAP_00004712
BDMAP_00004713
BDMAP_00004714
BDMAP_00004715
BDMAP_00004716
BDMAP_00004717
BDMAP_00004718
BDMAP_00004719
BDMAP_00004720
BDMAP_00004721
BDMAP_00004722
BDMAP_00004723
BDMAP_00004724
BDMAP_00004725
BDMAP_00004726
BDMAP_00004727
BDMAP_00004728
BDMAP_00004729
BDMAP_00004730
BDMAP_00004731
BDMAP_00004732
BDMAP_00004733
BDMAP_00004734
BDMAP_00004735
BDMAP_00004736
BDMAP_00004737
BDMAP_00004738
BDMAP_00004739
BDMAP_00004740
BDMAP_00004741
BDMAP_00004742
BDMAP_00004743
BDMAP_00004744
BDMAP_00004745
BDMAP_00004746
BDMAP_00004747
BDMAP_00004748
BDMAP_00004749
BDMAP_00004750
BDMAP_00004751
BDMAP_00004752
BDMAP_00004753
BDMAP_00004754
BDMAP_00004755
BDMAP_00004756
BDMAP_00004757
BDMAP_00004758
BDMAP_00004759
BDMAP_00004760
BDMAP_00004761
BDMAP_00004762
BDMAP_00004763
BDMAP_00004764
BDMAP_00004765
BDMAP_00004766
BDMAP_00004767
BDMAP_00004768
BDMAP_00004769
BDMAP_00004770
BDMAP_00004771
BDMAP_00004772
BDMAP_00004773
BDMAP_00004774
BDMAP_00004775
BDMAP_00004776
BDMAP_00004777
BDMAP_00004778
BDMAP_00004779
BDMAP_00004780
BDMAP_00004781
BDMAP_00004782
BDMAP_00004783
BDMAP_00004784
BDMAP_00004785
BDMAP_00004786
BDMAP_00004787
BDMAP_00004788
BDMAP_00004789
BDMAP_00004790
BDMAP_00004791
BDMAP_00004792
BDMAP_00004793
BDMAP_00004794
BDMAP_00004795
BDMAP_00004796
BDMAP_00004797
BDMAP_00004798
BDMAP_00004799
BDMAP_00004800
BDMAP_00004801
BDMAP_00004802
BDMAP_00004803
BDMAP_00004804
BDMAP_00004805
BDMAP_00004806
BDMAP_00004807
BDMAP_00004808
BDMAP_00004809
BDMAP_00004810
BDMAP_00004811
BDMAP_00004812
BDMAP_00004813
BDMAP_00004814
BDMAP_00004815
BDMAP_00004816
BDMAP_00004817
BDMAP_00004818
BDMAP_00004819
BDMAP_00004820
BDMAP_00004821
BDMAP_00004822
BDMAP_00004823
BDMAP_00004824
BDMAP_00004825
BDMAP_00004826
BDMAP_00004827
BDMAP_00004828
BDMAP_00004829
BDMAP_00004830
BDMAP_00004831
BDMAP_00004832
BDMAP_00004833
BDMAP_00004834
BDMAP_00004835
BDMAP_00004836
BDMAP_00004837
BDMAP_00004838
BDMAP_00004839
BDMAP_00004840
BDMAP_00004841
BDMAP_00004842
BDMAP_00004843
BDMAP_00004844
BDMAP_00004845
BDMAP_00004846
BDMAP_00004847
BDMAP_00004848
BDMAP_00004849
BDMAP_00004850
BDMAP_00004851
BDMAP_00004852
BDMAP_00004853
BDMAP_00004854
BDMAP_00004855
BDMAP_00004856
BDMAP_00004857
BDMAP_00004858
BDMAP_00004859
BDMAP_00004860
BDMAP_00004861
BDMAP_00004862
BDMAP_00004863
BDMAP_00004864
BDMAP_00004865
BDMAP_00004866
BDMAP_00004867
BDMAP_00004868
BDMAP_00004869
BDMAP_00004870
BDMAP_00004871
BDMAP_00004872
BDMAP_00004873
BDMAP_00004874
BDMAP_00004875
BDMAP_00004876
BDMAP_00004877
BDMAP_00004878
BDMAP_00004879
BDMAP_00004880
BDMAP_00004881
BDMAP_00004882
BDMAP_00004883
BDMAP_00004884
BDMAP_00004885
BDMAP_00004886
BDMAP_00004887
BDMAP_00004888
BDMAP_00004889
BDMAP_00004890
BDMAP_00004891
BDMAP_00004892
BDMAP_00004893
BDMAP_00004894
BDMAP_00004895
BDMAP_00004896
BDMAP_00004897
BDMAP_00004898
BDMAP_00004899
BDMAP_00004900
BDMAP_00004901
BDMAP_00004902
BDMAP_00004903
BDMAP_00004904
BDMAP_00004905
BDMAP_00004906
BDMAP_00004907
BDMAP_00004908
BDMAP_00004909
BDMAP_00004910
BDMAP_00004911
BDMAP_00004912
BDMAP_00004913
BDMAP_00004914
BDMAP_00004915
BDMAP_00004916
BDMAP_00004917
BDMAP_00004918
BDMAP_00004919
BDMAP_00004920
BDMAP_00004921
BDMAP_00004922
BDMAP_00004923
BDMAP_00004924
BDMAP_00004925
BDMAP_00004926
BDMAP_00004927
BDMAP_00004928
BDMAP_00004929
BDMAP_00004930
BDMAP_00004931
BDMAP_00004932
BDMAP_00004933
BDMAP_00004934
BDMAP_00004935
BDMAP_00004936
BDMAP_00004937
BDMAP_00004938
BDMAP_00004939
BDMAP_00004940
BDMAP_00004941
BDMAP_00004942
BDMAP_00004943
BDMAP_00004944
BDMAP_00004945
BDMAP_00004946
BDMAP_00004947
BDMAP_00004948
BDMAP_00004949
BDMAP_00004950
BDMAP_00004951
BDMAP_00004952
BDMAP_00004953
BDMAP_00004954
BDMAP_00004955
BDMAP_00004956
BDMAP_00004957
BDMAP_00004958
BDMAP_00004959
BDMAP_00004960
BDMAP_00004961
BDMAP_00004962
BDMAP_00004963
BDMAP_00004964
BDMAP_00004965
BDMAP_00004966
BDMAP_00004967
BDMAP_00004968
BDMAP_00004969
BDMAP_00004970
BDMAP_00004971
BDMAP_00004972
BDMAP_00004973
BDMAP_00004974
BDMAP_00004975
BDMAP_00004976
BDMAP_00004977
BDMAP_00004978
BDMAP_00004979
BDMAP_00004980
BDMAP_00004981
BDMAP_00004982
BDMAP_00004983
BDMAP_00004984
BDMAP_00004985
BDMAP_00004986
BDMAP_00004987
BDMAP_00004988
BDMAP_00004989
BDMAP_00004990
BDMAP_00004991
BDMAP_00004992
BDMAP_00004993
BDMAP_00004994
BDMAP_00004995
BDMAP_00004996
BDMAP_00004997
BDMAP_00004998
BDMAP_00004999
BDMAP_00005000
BDMAP_00005001
BDMAP_00005002
BDMAP_00005003
BDMAP_00005004
BDMAP_00005005
BDMAP_00005006
BDMAP_00005007
BDMAP_00005008
BDMAP_00005009
BDMAP_00005010
BDMAP_00005011
BDMAP_00005012
BDMAP_00005013
BDMAP_00005014
BDMAP_00005015
BDMAP_00005016
BDMAP_00005017
BDMAP_00005018
BDMAP_00005019
BDMAP_00005020
BDMAP_00005021
BDMAP_00005022
BDMAP_00005023
BDMAP_00005024
BDMAP_00005025
BDMAP_00005026
BDMAP_00005027
BDMAP_00005028
BDMAP_00005029
BDMAP_00005030
BDMAP_00005031
BDMAP_00005032
BDMAP_00005033
BDMAP_00005034
BDMAP_00005035
BDMAP_00005036
BDMAP_00005037
BDMAP_00005038
BDMAP_00005039
BDMAP_00005040
BDMAP_00005041
BDMAP_00005042
BDMAP_00005043
BDMAP_00005044
BDMAP_00005045
BDMAP_00005046
BDMAP_00005047
BDMAP_00005048
BDMAP_00005049
BDMAP_00005050
BDMAP_00005051
BDMAP_00005052
BDMAP_00005053
BDMAP_00005054
BDMAP_00005055
BDMAP_00005056
BDMAP_00005057
BDMAP_00005058
BDMAP_00005059
BDMAP_00005060
BDMAP_00005061
BDMAP_00005062
BDMAP_00005063
BDMAP_00005064
BDMAP_00005065
BDMAP_00005066
BDMAP_00005067
BDMAP_00005068
BDMAP_00005069
BDMAP_00005070
BDMAP_00005071
BDMAP_00005072
BDMAP_00005073
BDMAP_00005074
BDMAP_00005075
BDMAP_00005076
BDMAP_00005077
BDMAP_00005078
BDMAP_00005079
BDMAP_00005080
BDMAP_00005081
BDMAP_00005082
BDMAP_00005083
BDMAP_00005084
BDMAP_00005085
BDMAP_00005086
BDMAP_00005087
BDMAP_00005088
BDMAP_00005089
BDMAP_00005090
BDMAP_00005091
BDMAP_00005092
BDMAP_00005093
BDMAP_00005094
BDMAP_00005095
BDMAP_00005096
BDMAP_00005097
BDMAP_00005098
BDMAP_00005099
BDMAP_00005100
BDMAP_00005101
BDMAP_00005102
BDMAP_00005103
BDMAP_00005104
BDMAP_00005105
BDMAP_00005106
BDMAP_00005107
BDMAP_00005108
BDMAP_00005109
BDMAP_00005110
BDMAP_00005111
BDMAP_00005112
BDMAP_00005113
BDMAP_00005114
BDMAP_00005115
BDMAP_00005116
BDMAP_00005117
BDMAP_00005118
BDMAP_00005119
BDMAP_00005120
BDMAP_00005121
BDMAP_00005122
BDMAP_00005123
BDMAP_00005124
BDMAP_00005125
BDMAP_00005126
BDMAP_00005127
BDMAP_00005128
BDMAP_00005129
BDMAP_00005130
BDMAP_00005131
BDMAP_00005132
BDMAP_00005133
BDMAP_00005134
BDMAP_00005135
BDMAP_00005136
BDMAP_00005137
BDMAP_00005138
BDMAP_00005139
BDMAP_00005140
BDMAP_00005141
BDMAP_00005142
BDMAP_00005143
BDMAP_00005144
BDMAP_00005145
BDMAP_00005146
BDMAP_00005147
BDMAP_00005148
BDMAP_00005149
BDMAP_00005150
BDMAP_00005151
BDMAP_00005152
BDMAP_00005153
BDMAP_00005154
BDMAP_00005155
BDMAP_00005156
BDMAP_00005157
BDMAP_00005158
BDMAP_00005159
BDMAP_00005160
BDMAP_00005161
BDMAP_00005162
BDMAP_00005163
BDMAP_00005164
BDMAP_00005165
BDMAP_00005166
BDMAP_00005167
BDMAP_00005168
BDMAP_00005169
BDMAP_00005170
BDMAP_00005171
BDMAP_00005172
BDMAP_00005173
BDMAP_00005174
BDMAP_00005175
BDMAP_00005176
BDMAP_00005177
BDMAP_00005178
BDMAP_00005179
BDMAP_00005180
BDMAP_00005181
BDMAP_00005182
BDMAP_00005183
BDMAP_00005184
BDMAP_00005185
BDMAP_00005186
BDMAP_00005187
BDMAP_00005188
BDMAP_00005189
BDMAP_00005190
BDMAP_00005191
BDMAP_00005192
BDMAP_00005193
BDMAP_00005194
BDMAP_00005195
================================================
FILE: Finetune/AbdomenAtlas/main.py
================================================
# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from functools import partial
import logging
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.parallel
import torch.utils.data.distributed
from optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from trainer import run_training
from dataset.dataloader_bdmap import get_loader_Atlas
import torch.nn as nn
from monai.inferers import sliding_window_inference
from monai.losses import DiceCELoss
from monai.metrics import DiceMetric
from monai.networks.nets import SwinUNETR
from monai.transforms import Activations, AsDiscrete, Compose
from monai.utils.enums import MetricReduction
from monai.networks.blocks import PatchEmbed, UnetOutBlock, UnetrBasicBlock, UnetrUpBlock
from monai.networks.nets.swin_unetr import SwinTransformer as SwinViT
from monai.utils import ensure_tuple_rep
import warnings
warnings.filterwarnings('ignore')
# os.environ['CUDA_VISIBLE_DEVICES'] = "7"
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '28890'
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
print('Setting resource limit:', str(resource.getrlimit(resource.RLIMIT_NOFILE)))
parser = argparse.ArgumentParser(description="Swin UNETR segmentation pipeline")
parser.add_argument("--checkpoint", default=None, help="start training from saved checkpoint")
parser.add_argument("--logdir", default="logs", type=str, help="directory to save the tensorboard logs")
parser.add_argument(
"--pretrained_dir", default="./pretrained_models/", type=str, help="pretrained checkpoint directory"
)
parser.add_argument("--out_channels", default=10, type=int, help="number of output channels")
parser.add_argument(
"--pretrained_model_name",
default="model.pt",
type=str,
help="pretrained model name",
)
roi = 96
parser.add_argument("--data_dir", default="/project/medimgfmod/CT/AbdomenAtlasMini1.0/", type=str,
help="dataset directory")
parser.add_argument("--data_txt_path", default='./dataset/dataset_list', help="dataset json file")
parser.add_argument("--dataset_list", default=['AbdomenAtlas1.0'], help="dataset json file")
parser.add_argument("--cache_dataset", default=True, help="use monai CACHE Dataset class")
parser.add_argument("--cache_dir", default='./cache', help="CACHE dir")
parser.add_argument("--save_checkpoint", default=True, help="save checkpoint during training")
parser.add_argument("--max_epochs", default=100, type=int, help="max number of training epochs")
parser.add_argument("--warmup_epochs", default=5, type=int, help="number of warmup epochs")
parser.add_argument("--val_every", default=1, type=int, help="validation frequency")
parser.add_argument("--batch_size", default=1, type=int, help="number of batch size")
parser.add_argument("--sw_batch_size", default=4, type=int, help="number of sliding window batch size")
parser.add_argument("--optim_lr", default=1e-3, type=float, help="optimization learning rate")
parser.add_argument("--optim_name", default="adamw", type=str, help="optimization algorithm")
parser.add_argument("--reg_weight", default=1e-5, type=float, help="regularization weight")
parser.add_argument("--momentum", default=0.99, type=float, help="momentum")
parser.add_argument("--noamp", default=False, help="do NOT use amp for training")
parser.add_argument("--distributed", action="store_true", help="start distributed training")
parser.add_argument("--world_size", default=1, type=int, help="number of nodes for distributed training")
parser.add_argument("--rank", default=0, type=int, help="node rank for distributed training")
parser.add_argument("--dist-url", default="tcp://127.0.0.1:23456", type=str, help="distributed url")
parser.add_argument("--dist-backend", default="nccl", type=str, help="distributed backend")
parser.add_argument("--norm_name", default="instance", type=str, help="normalization name")
parser.add_argument("--workers", default=8, type=int, help="number of workers")
parser.add_argument("--feature_size", default=48, type=int, help="feature size")
parser.add_argument("--in_channels", default=1, type=int, help="number of input channels")
parser.add_argument("--use_normal_dataset", default=True, help="use monai Dataset class")
parser.add_argument("--a_min", default=-175.0, type=float, help="a_min in ScaleIntensityRanged")
parser.add_argument("--a_max", default=250.0, type=float, help="a_max in ScaleIntensityRanged")
parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
parser.add_argument("--space_z", default=2.0, type=float, help="spacing in z direction")
parser.add_argument("--roi_x", default=roi, type=int, help="roi size in x direction")
parser.add_argument("--roi_y", default=roi, type=int, help="roi size in y direction")
parser.add_argument("--roi_z", default=roi, type=int, help="roi size in z direction")
parser.add_argument("--dropout_rate", default=0.0, type=float, help="dropout rate")
parser.add_argument("--dropout_path_rate", default=0.0, type=float, help="drop path rate")
parser.add_argument("--RandFlipd_prob", default=0.2, type=float, help="RandFlipd aug probability")
parser.add_argument("--RandRotate90d_prob", default=0.2, type=float, help="RandRotate90d aug probability")
parser.add_argument("--RandScaleIntensityd_prob", default=0.1, type=float, help="RandScaleIntensityd aug probability")
parser.add_argument("--RandShiftIntensityd_prob", default=0.1, type=float, help="RandShiftIntensityd aug probability")
parser.add_argument("--infer_overlap", default=0.75, type=float, help="sliding window inference overlap")
parser.add_argument("--lrschedule", default="warmup_cosine", type=str, help="type of learning rate scheduler")
parser.add_argument("--resume_ckpt", action="store_true", help="resume training from pretrained checkpoint")
parser.add_argument("--smooth_dr", default=1e-6, type=float, help="constant added to dice denominator to avoid nan")
parser.add_argument("--smooth_nr", default=0.0, type=float, help="constant added to dice numerator to avoid zero")
parser.add_argument("--use_checkpoint", default=True, help="use gradient checkpointing to save memory")
parser.add_argument("--use_ssl_pretrained", default=True, help="use self-supervised pretrained weights")
parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
parser.add_argument("--squared_dice", action="store_true", help="use squared Dice")
def main():
args = parser.parse_args()
args.amp = not args.noamp
if args.distributed:
args.ngpus_per_node = torch.cuda.device_count()
print("Found total gpus", args.ngpus_per_node)
args.world_size = args.ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=args.ngpus_per_node, args=(args,))
else:
main_worker(gpu=0, args=args)
def main_worker(gpu, args):
if args.distributed:
torch.multiprocessing.set_start_method("fork", force=True)
np.set_printoptions(formatter={"float": "{: 0.3f}".format}, suppress=True)
args.gpu = gpu
if args.distributed:
args.rank = args.rank * args.ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
args.test_mode = False
loader = get_loader_Atlas(args)
print(args.rank, " gpu", args.gpu)
if args.rank == 0:
print("Batch size is:", args.batch_size, "epochs", args.max_epochs)
inf_size = [args.roi_x, args.roi_y, args.roi_z]
if args.rank == 0:
os.makedirs(args.logdir, exist_ok=True)
logger = init_log('global', logging.INFO)
logger.propagate = 0
pretrained_dir = args.pretrained_dir
model = SwinUNETR(
img_size=(args.roi_x, args.roi_y, args.roi_z),
in_channels=args.in_channels,
out_channels=args.out_channels,
feature_size=args.feature_size,
drop_rate=0.0,
attn_drop_rate=0.0,
dropout_path_rate=args.dropout_path_rate,
use_checkpoint=args.use_checkpoint,
use_v2=True
)
if args.resume_ckpt:
model_dict = torch.load(os.path.join(pretrained_dir, args.pretrained_model_name))["state_dict"]
model.load_state_dict(model_dict)
print("Use resume weights")
if args.use_ssl_pretrained:
try:
model_dict = torch.load("./VoCo_10k.pt", map_location=torch.device('cpu'))
state_dict = model_dict
# state_dict = model_dict['net']
# fix potential differences in state dict keys from pre-training to
# fine-tuning
if "module." in list(state_dict.keys())[0]:
print("Tag 'module.' found in state dict - fixing!")
for key in list(state_dict.keys()):
state_dict[key.replace("module.", "")] = state_dict.pop(key)
if "swin_vit" in list(state_dict.keys())[0]:
print("Tag 'swin_vit' found in state dict - fixing!")
for key in list(state_dict.keys()):
state_dict[key.replace("swin_vit", "swinViT")] = state_dict.pop(key)
# We now load model weights, setting param `strict` to False, i.e.:
# this load the encoder weights (Swin-ViT, SSL pre-trained), but leaves
# the decoder weights untouched (CNN UNet decoder).
model.load_state_dict(state_dict, strict=False)
print("Using pretrained voco ema self-supervised Swin UNETR backbone weights !")
except ValueError:
raise ValueError("Self-supervised pre-trained weights not available for" + str(args.model_name))
if args.squared_dice:
dice_loss = DiceCELoss(
to_onehot_y=True, softmax=True, squared_pred=True, smooth_nr=args.smooth_nr, smooth_dr=args.smooth_dr
)
else:
dice_loss = DiceCELoss(include_background=False, to_onehot_y=True, softmax=True)
post_label = AsDiscrete(to_onehot=args.out_channels)
post_pred = AsDiscrete(argmax=True, to_onehot=args.out_channels)
dice_acc = DiceMetric(include_background=False, reduction=MetricReduction.MEAN, get_not_nans=True)
model_inferer = partial(
sliding_window_inference,
roi_size=inf_size,
sw_batch_size=args.sw_batch_size,
predictor=model,
overlap=args.infer_overlap,
)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total parameters count", pytorch_total_params)
best_acc = 0
start_epoch = 0
if args.checkpoint is not None:
checkpoint = torch.load(args.checkpoint, map_location="cpu")
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in checkpoint["state_dict"].items():
new_state_dict[k.replace("backbone.", "")] = v
model.load_state_dict(new_state_dict, strict=False)
if "epoch" in checkpoint:
start_epoch = checkpoint["epoch"]
if "best_acc" in checkpoint:
best_acc = checkpoint["best_acc"]
print("=> loaded checkpoint '{}' (epoch {}) (bestacc {})".format(args.checkpoint, start_epoch, best_acc))
model.cuda()
if args.distributed:
torch.cuda.set_device(args.gpu)
if args.norm_name == "batch":
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], output_device=args.gpu)
if args.optim_name == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.optim_lr, weight_decay=args.reg_weight)
elif args.optim_name == "adamw":
optimizer = torch.optim.AdamW(model.parameters(), lr=args.optim_lr, weight_decay=args.reg_weight)
# optimizer = torch.optim.AdamW(model.parameters(), lr=args.optim_lr, amsgrad=True)
elif args.optim_name == "sgd":
optimizer = torch.optim.SGD(
model.parameters(), lr=args.optim_lr, momentum=args.momentum, nesterov=True, weight_decay=args.reg_weight
)
else:
raise ValueError("Unsupported Optimization Procedure: " + str(args.optim_name))
if args.lrschedule == "warmup_cosine":
print(len(loader[0]))
max_steps = args.max_epochs * len(loader[0])
warmup_steps = args.warmup_epochs * len(loader[0])
scheduler = LinearWarmupCosineAnnealingLR(
optimizer, warmup_epochs=warmup_steps, max_epochs=max_steps
)
elif args.lrschedule == "cosine_anneal":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.max_epochs)
if args.checkpoint is not None:
scheduler.step(epoch=start_epoch)
else:
scheduler = None
accuracy = run_training(
model=model,
train_loader=loader[0],
val_loader=loader[1],
optimizer=optimizer,
loss_func=dice_loss,
acc_func=dice_acc,
args=args,
model_inferer=model_inferer,
scheduler=scheduler,
start_epoch=start_epoch,
post_label=post_label,
post_pred=post_pred,
)
return accuracy
logs = set()
def init_log(name, level=logging.INFO):
if (name, level) in logs:
return
logs.add((name, level))
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
if "SLURM_PROCID" in os.environ:
rank = int(os.environ["SLURM_PROCID"])
logger.addFilter(lambda record: rank == 0)
else:
rank = 0
format_str = "[%(asctime)s][%(levelname)8s] %(message)s"
formatter = logging.Formatter(format_str)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
if __name__ == "__main__":
main()
================================================
FILE: Finetune/AbdomenAtlas/optimizers/__init__.py
================================================
================================================
FILE: Finetune/AbdomenAtlas/optimizers/lr_scheduler.py
================================================
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from typing import List
from torch import nn as nn
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
__all__ = ["LinearLR", "ExponentialLR"]
class _LRSchedulerMONAI(_LRScheduler):
"""Base class for increasing the learning rate between two boundaries over a number
of iterations"""
def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1) -> None:
"""
Args:
optimizer: wrapped optimizer.
end_lr: the final learning rate.
num_iter: the number of iterations over which the test occurs.
last_epoch: the index of last epoch.
Returns:
None
"""
self.end_lr = end_lr
self.num_iter = num_iter
super(_LRSchedulerMONAI, self).__init__(optimizer, last_epoch)
class LinearLR(_LRSchedulerMONAI):
"""Linearly increases the learning rate between two boundaries over a number of
iterations.
"""
def get_lr(self):
r = self.last_epoch / (self.num_iter - 1)
return [base_lr + r * (self.end_lr - base_lr) for base_lr in self.base_lrs]
class ExponentialLR(_LRSchedulerMONAI):
"""Exponentially increases the learning rate between two boundaries over a number of
iterations.
"""
def get_lr(self):
r = self.last_epoch / (self.num_iter - 1)
return [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs]
class WarmupCosineSchedule(LambdaLR):
"""Linear warmup and then cosine decay.
Based on https://huggingface.co/ implementation.
"""
def __init__(
self, optimizer: Optimizer, warmup_steps: int, t_total: int, cycles: float = 0.5, last_epoch: int = -1
) -> None:
"""
Args:
optimizer: wrapped optimizer.
warmup_steps: number of warmup iterations.
t_total: total number of training iterations.
cycles: cosine cycles parameter.
last_epoch: the index of last epoch.
Returns:
None
"""
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
class LinearWarmupCosineAnnealingLR(_LRScheduler):
def __init__(
self,
optimizer: Optimizer,
warmup_epochs: int,
max_epochs: int,
warmup_start_lr: float = 0.0,
eta_min: float = 0.0,
last_epoch: int = -1,
) -> None:
"""
Args:
optimizer (Optimizer): Wrapped optimizer.
warmup_epochs (int): Maximum number of iterations for linear warmup
max_epochs (int): Maximum number of iterations
warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
"""
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.warmup_start_lr = warmup_start_lr
self.eta_min = eta_min
super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
"""
Compute learning rate using chainable form of the scheduler
"""
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning
)
if self.last_epoch == 0:
return [self.warmup_start_lr] * len(self.base_lrs)
elif self.last_epoch < self.warmup_epochs:
return [
group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
elif self.last_epoch == self.warmup_epochs:
return self.base_lrs
elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0:
return [
group["lr"]
+ (base_lr - self.eta_min) * (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
return [
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
/ (
1
+ math.cos(
math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs)
)
)
* (group["lr"] - self.eta_min)
+ self.eta_min
for group in self.optimizer.param_groups
]
def _get_closed_form_lr(self) -> List[float]:
"""
Called when epoch is passed as a param to the `step` function of the scheduler.
"""
if self.last_epoch < self.warmup_epochs:
return [
self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr in self.base_lrs
]
return [
self.eta_min
+ 0.5
* (base_lr - self.eta_min)
* (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
for base_lr in self.base_lrs
]
================================================
FILE: Finetune/AbdomenAtlas/preprocess/try_load.py
================================================
# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from functools import partial
import nibabel as nib
import numpy as np
import torch
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
from dataset.dataloader_bdmap import get_loader_Atlas
from utils.utils import dice, resample_3d
from utils.utils import AverageMeter, distributed_all_gather
from tqdm import tqdm
from monai.inferers import sliding_window_inference
from monai.data import decollate_batch
from monai.losses import DiceCELoss
from monai.metrics import DiceMetric
from monai.networks.nets import SwinUNETR
from monai.transforms import Activations, AsDiscrete, Compose
from monai.utils.enums import MetricReduction
from utils.utils import *
import cv2
from PIL import Image
# os.environ['CUDA_VISIBLE_DEVICES'] = "0"
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '28890'
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
print('Setting resource limit:', str(resource.getrlimit(resource.RLIMIT_NOFILE)))
parser = argparse.ArgumentParser(description="Swin UNETR segmentation pipeline")
parser.add_argument(
"--pretrained_dir", default="./runs/logs_scratch_v2/", type=str, help="pretrained checkpoint directory"
)
parser.add_argument("--data_dir", default="/project/medimgfmod/CT/AbdomenAtlasMini1.0/", type=str, help="dataset directory")
parser.add_argument("--data_txt_path", default='./dataset/dataset_list', help="dataset json file")
parser.add_argument("--dataset_list", default=['AbdomenAtlas1.0'], help="dataset json file")
parser.add_argument("--pos", default=1, type=int, help="number of positive sample")
parser.add_argument("--neg", default=0, type=int, help="number of negative sample")
roi=96
parser.add_argument("--cache_dataset", default=False, help="use monai CACHE Dataset class")
parser.add_argument("--feature_size", default=48, type=int, help="feature size")
parser.add_argument("--batch_size", default=8, type=int, help="number of batch size")
parser.add_argument("--sw_batch_size", default=1, type=int, help="number of sliding window batch size")
parser.add_argument("--infer_overlap", default=0.75, type=float, help="sliding window inference overlap")
parser.add_argument("--in_channels", default=1, type=int, help="number of input channels")
parser.add_argument("--out_channels", default=14, type=int, help="number of output channels")
parser.add_argument("--a_min", default=-175.0, type=float, help="a_min in ScaleIntensityRanged")
parser.add_argument("--a_max", default=250.0, type=float, help="a_max in ScaleIntensityRanged")
parser.add_argument("--b_min", default=0.0, type=float, help="b_min in ScaleIntensityRanged")
parser.add_argument("--b_max", default=1.0, type=float, help="b_max in ScaleIntensityRanged")
parser.add_argument("--space_x", default=1.5, type=float, help="spacing in x direction")
parser.add_argument("--space_y", default=1.5, type=float, help="spacing in y direction")
parser.add_argument("--space_z", default=2.0, type=float, help="spacing in z direction")
parser.add_argument("--roi_x", default=roi, type=int, help="roi size in x direction")
parser.add_argument("--roi_y", default=roi, type=int, help="roi size in y direction")
parser.add_argument("--roi_z", default=roi, type=int, help="roi size in z direction")
parser.add_argument("--dropout_rate", default=0.0, type=float, help="dropout rate")
parser.add_argument("--distributed", action="store_true", help="start distributed training")
parser.add_argument("--workers", default=16, type=int, help="number of workers")
parser.add_argument("--RandFlipd_prob", default=0.2, type=float, help="RandFlipd aug probability")
parser.add_argument("--RandRotate90d_prob", default=0.2, type=float, help="RandRotate90d aug probability")
parser.add_argument("--RandScaleIntensityd_prob", default=0.1, type=float, help="RandScaleIntensityd aug probability")
parser.add_argument("--RandShiftIntensityd_prob", default=0.1, type=float, help="RandShiftIntensityd aug probability")
parser.add_argument("--spatial_dims", default=3, type=int, help="spatial dimension of input data")
parser.add_argument("--use_checkpoint", default=True, help="use gradient checkpointing to save memory")
import warnings
warnings.filterwarnings('ignore')
def main():
args = parser.parse_args()
args.test_mode = True
loader = get_loader_Atlas(args)
# num = 0
# vis_path = './vis/'
# check_dir(vis_path)
with torch.no_grad():
for batch_data in tqdm(loader[0]):
image, label = batch_data["image"], batch_data["label"]
print(image.shape, label.shape, torch.unique(label))
# img = image[0][0].data.cpu().numpy()
# label = label[0][0].data.cpu().numpy()
#
# h, w, c = img.shape
# cmap = color_map()
#
# for j in range(c):
# im = img[:, :, j]
# la = label[:, :, j]
#
# if len(list(np.unique(la))) > 1:
# im = (255 * im).astype(np.uint8)
# la = Image.fromarray(la.astype(np.uint8), mode='P')
# la.putpalette(cmap)
# num += 1
#
# cv2.imwrite(vis_path+str(num)+'_im.png', im)
# la.save(vis_path+str(num)+'_lab.png')
if __name__ == "__main__":
main()
================================================
FILE: Finetune/AbdomenAtlas/readme.md
================================================
# VoCo for AbdomenAtlas
<a href="https://arxiv.org/abs/2402.17300"><img src='https://img.shields.io/badge/arXiv-VoCo-red' alt='Paper PDF'></a>
<a href='https://huggingface.co/datasets/Luffy503/VoCo-10k/tree/main'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
CVPR 2024 paper, [**"VoCo: A Simple-yet-Effective Volume Contrastive Learning Framework for 3D Medical Image Analysis"**](https://arxiv.org/abs/2402.17300)
Authors: Linshan Wu, <a href="https://scholar.google.com/citations?user=PfM5gucAAAAJ&hl=en">Jiaxin Zhuang</a>, and <a href="https://scholar.google.com/citations?hl=en&user=Z_t5DjwAAAAJ">Hao Chen</a>
Code for AbdomenAtlasMini1.0 Training and Inference.
## Usage
### Pre-training
Please refer to the official [VoCo repo](https://github.com/Luffy03/VoCo)
### Requirement
I have stored all the required checkpoints and running logs in the project.
Our Segmentation Training codes are based on [MONAI](https://github.com/Project-MONAI/research-contributions).
Please also refer to the requirements.txt.
### Training
First edit the data_path of AbdomenAtlasMini1.0 in 'train.sh'
```
data_dir=YOUR AbdomenAtlasMini1.0 PATH
```
Reading 9 label files is not efficient in training and we also find that there are some bugs in
the originial [data_loader](https://github.com/MrGiovanni/SuPreM/blob/d8a948c96e56f2050109c3ce418bc4caa09420a5/supervised_pretraining/dataset/dataloader_bdmap.py#L147)
(the data of label is loaded but the meta_keys of labels are not loaded, thus the following transform will result in not corresponding image and labels. We provide '/preprocess/try_load.py' for visualization). Thus, we first merge all 9 label files in to one.
```
# preprocess, in exe function of check.py , path=YOUR AbdomenAtlasMini1.0 PATH
python check.py
# merge all 9 organ label files to one label.nii.gz
```
After pre-processing, Training implementation
```
# bash
sh train.sh
# Or using slurm
sbatch train.slurm
```
To accelerate training, we use 'PersistentDataset' to pre-cache data.
```
# in train.sh
cache_dataset=False
# Or with adequate space
cache_dataset=True
cache_dir=Your path to save cache
```
### Inference
First edit the test and prediction path of AbdomenAtlasMini1.0 in 'Atlas_test.sh'
```
test_data_path=Your path to AbdomenAtlasTest
save_prediction_path=Your path to save the prediction AbdomenAtlasTest
```
Inference implementation
```
# bash
sh Atlas_test.sh
```
Inference Visualization
```
# We provide check_pred_vis() function in check.py for you to visualize the predictions
python check.py
```
## Acknowledgement
We thank [MONAI](https://github.com/Project-MONAI/research-contributions) and [SuPreM](https://github.com/MrGiovanni/SuPreM) for part of their codes.
## Citation ✏️ 📄
If you find this repo useful for your research, please consider citing the paper as follows:
```
@inproceedings{VoCo,
title={VoCo: A Simple-yet-Effective Volume Contrastive Learning Framework for 3D Medical Image Analysis},
author={Wu, Linshan and Zhuang, Jiaxin and Chen, Hao},
booktitle={IEEE Conf. Comput. Vis. Pattern Recog.},
year={2024}
}
```
================================================
FILE: Finetune/AbdomenAtlas/requirements.txt
================================================
# packages in environment at /home/lwubf/anaconda3/envs/nnunet:
#
# Name Version Build Channel
_libgcc_mutex 0.1 main
absl-py 2.1.0 <pip>
ca-certificates 2023.12.12 h06a4308_0
certifi 2022.12.7 <pip>
charset-normalizer 2.1.1 <pip>
cmake 3.25.0 <pip>
contourpy 1.2.0 <pip>
cycler 0.12.1 <pip>
einops 0.7.0 <pip>
elasticdeform 0.5.0 <pip>
filelock 3.9.0 <pip>
fonttools 4.50.0 <pip>
fsspec 2024.2.0 <pip>
grpcio 1.62.0 <pip>
huggingface-hub 0.21.4 <pip>
idna 3.4 <pip>
importlib-metadata 7.0.1 <pip>
importlib_resources 6.4.0 <pip>
inquirerpy 0.3.4 <pip>
Jinja2 3.1.2 <pip>
kiwisolver 1.4.5 <pip>
ld_impl_linux-64 2.38 h1181459_1
libffi 3.3 he6710b0_2
libgcc-ng 9.1.0 hdf63c60_0
libstdcxx-ng 9.1.0 hdf63c60_0
lit 15.0.7 <pip>
Markdown 3.5.2 <pip>
MarkupSafe 2.1.5 <pip>
matplotlib 3.8.3 <pip>
monai 1.3.0 <pip>
mpmath 1.3.0 <pip>
ncurses 6.3 h7f8727e_2
networkx 3.2.1 <pip>
nibabel 5.2.0 <pip>
numpy 1.26.4 <pip>
opencv-python 4.9.0.80 <pip>
openssl 1.1.1w h7f8727e_0
packaging 23.2 <pip>
pfzy 0.3.4 <pip>
pillow 10.2.0 <pip>
pip 23.3.1 py39h06a4308_0
prompt-toolkit 3.0.43 <pip>
protobuf 4.25.3 <pip>
pyparsing 3.1.2 <pip>
python 3.9.12 h12debd9_1
python-dateutil 2.9.0.post0 <pip>
PyYAML 6.0.1 <pip>
readline 8.1.2 h7f8727e_1
requests 2.28.1 <pip>
scipy 1.12.0 <pip>
setuptools 68.2.2 py39h06a4308_0
SimpleITK 2.0.2 <pip>
six 1.16.0 <pip>
sqlite 3.38.5 hc218d9a_0
sympy 1.12 <pip>
tensorboard 2.16.2 <pip>
tensorboard-data-server 0.7.2 <pip>
tensorboardX 2.6.2.2 <pip>
tk 8.6.12 h1ccaba5_0
torch 2.0.1+cu118 <pip>
torchaudio 2.0.2+cu118 <pip>
torchvision 0.15.2+cu118 <pip>
tqdm 4.66.2 <pip>
triton 2.0.0 <pip>
typing_extensions 4.8.0 <pip>
tzdata 2024a h04d1e81_0
urllib3 1.26.13 <pip>
wcwidth 0.2.13 <pip>
Werkzeug 3.0.1 <pip>
wheel 0.41.2 py39h06a4308_0
xz 5.2.5 h7f8727e_1
zipp 3.17.0 <pip>
zlib 1.2.12 h7f8727e_2
================================================
FILE: Finetune/AbdomenAtlas/train.sh
================================================
now=$(date +"%Y%m%d_%H%M%S")
logdir=runs/logs
mkdir -p $logdir
data_dir=/project/medimgfmod/CT/AbdomenAtlasMini1.0/
cache_dataset=False
cache_dir=/scratch/medimgfmod/CT/cache/Atlas
torchrun --master_port=21472 main.py \
--data_dir $data_dir --cache_dataset $cache_dataset --cache_dir $cache_dir --logdir $logdir | tee $logdir/$now.txt
================================================
FILE: Finetune/AbdomenAtlas/train.slurm
================================================
#!/bin/bash
# NOTE: Lines starting with "#SBATCH" are valid SLURM commands or statements,
# while those starting with "#" and "##SBATCH" are comments.
#SBATCH -J Atlas
#SBATCH -t 72:00:00 #Maximum runtime of 48 hours
# Enable email notificaitons when job begins and ends
#SBATCH --mail-user=lwubf@connect.ust.hk #Update your email address
#SBATCH --mail-type=begin
#SBATCH --mail-type=end
# Choose partition (queue) with "gpu"
#SBATCH -p project
# To use 24 cpu core and 1 gpu devices in a node
#SBATCH -N 1 -n 16 --gres=gpu:1
# Setup runtime environment if necessary
source ~/.bashrc
source activate nnunet
# Go to the job submission directory and run your application
cd /home/lwubf/AbdomenAtlas/
sh train.sh
================================================
FILE: Finetune/AbdomenAtlas/trainer.py
================================================
# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data.distributed
from tensorboardX import SummaryWriter
from torch.cuda.amp import GradScaler, autocast
from utils.utils import AverageMeter, distributed_all_gather
from monai.data import decollate_batch
from utils.mixup import mixup
def train_epoch(model, loader, optimizer, scheduler, scaler, epoch, loss_func, args):
model.train()
start_time = time.time()
run_loss = AverageMeter()
for idx, batch_data in enumerate(loader):
data, target = batch_data["image"], batch_data["label"]
data, target = data.cuda(), target.cuda()
data, target = mixup([data, target])
for param in model.parameters():
param.grad = None
with autocast(enabled=args.amp):
logits = model(data)
loss = loss_func(logits, target)
#
if args.amp:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
if args.distributed:
loss_list = distributed_all_gather([loss], out_numpy=True, is_valid=idx < loader.sampler.valid_length)
run_loss.update(
np.mean(np.mean(np.stack(loss_list, axis=0), axis=0), axis=0), n=args.batch_size * args.world_size
)
else:
run_loss.update(loss.item(), n=args.batch_size)
lr = optimizer.param_groups[0]["lr"]
if scheduler is not None:
scheduler.step()
if args.rank == 0 and (idx + 1) % 200 == 0:
print(
"Epoch {}/{} {}/{}".format(epoch, args.max_epochs, idx, len(loader)),
"loss: {:.4f}".format(run_loss.avg),
"lr: {:.8f}".format(lr),
"time {:.2f}s".format(time.time() - start_time),
)
start_time = time.time()
for param in model.parameters():
param.grad = None
return run_loss.avg
def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, post_label=None, post_pred=None):
model.eval()
run_acc = AverageMeter()
start_time = time.time()
with torch.no_grad():
for idx, batch_data in enumerate(loader):
data, target = batch_data["image"], batch_data["label"]
data, target = data.cuda(), target.cuda()
with autocast(enabled=args.amp):
if model_inferer is not None:
logits = model_inferer(data)
else:
logits = model(data)
val_labels_list = [target[0]]
val_labels_convert = [post_label(val_label_tensor) for val_label_tensor in val_labels_list]
val_outputs_list = [logits[0]]
val_output_convert = [post_pred(val_pred_tensor) for val_pred_tensor in val_outputs_list]
acc_func.reset()
acc_func(y_pred=val_output_convert, y=val_labels_convert)
acc, not_nans = acc_func.aggregate()
acc = acc.cuda(args.rank)
if args.distributed:
acc_list, not_nans_list = distributed_all_gather(
[acc, not_nans], out_numpy=True, is_valid=idx < loader.sampler.valid_length
)
for al, nl in zip(acc_list, not_nans_list):
run_acc.update(al, n=nl)
else:
run_acc.update(acc.cpu().numpy(), n=not_nans.cpu().numpy())
if args.rank == 0:
avg_acc = np.mean(run_acc.avg)
print(
"Val {}/{} {}/{}".format(epoch, args.max_epochs, idx, len(loader)),
"acc",
avg_acc,
"time {:.2f}s".format(time.time() - start_time),
)
start_time = time.time()
torch.cuda.empty_cache()
return run_acc.avg
def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0, optimizer=None, scheduler=None):
state_dict = model.state_dict() if not args.distributed else model.module.state_dict()
save_dict = {"epoch": epoch, "best_acc": best_acc, "state_dict": state_dict}
if optimizer is not None:
save_dict["optimizer"] = optimizer.state_dict()
if scheduler is not None:
save_dict["scheduler"] = scheduler.state_dict()
filename = os.path.join(args.logdir, filename)
torch.save(save_dict, filename)
print("Saving checkpoint", filename)
def run_training(
model,
train_loader,
val_loader,
optimizer,
loss_func,
acc_func,
args,
model_inferer=None,
scheduler=None,
start_epoch=0,
post_label=None,
post_pred=None,
):
scaler = None
if args.amp:
scaler = GradScaler()
val_acc_max = 0.0
for epoch in range(start_epoch, args.max_epochs):
if args.distributed:
train_loader.sampler.set_epoch(epoch)
torch.distributed.barrier()
print(args.rank, time.ctime(), "Epoch:", epoch)
epoch_time = time.time()
train_loss = train_epoch(
model, train_loader, optimizer, scheduler, scaler=scaler, epoch=epoch, loss_func=loss_func, args=args
)
if args.rank == 0:
print(
"Final training {}/{}".format(epoch, args.max_epochs - 1),
"loss: {:.4f}".format(train_loss),
"time {:.2f}s".format(time.time() - epoch_time),
)
b_new_best = False
if (epoch + 1) % args.val_every == 0:
if args.distributed:
torch.distributed.barrier()
epoch_time = time.time()
val_avg_acc = val_epoch(
model,
val_loader,
epoch=epoch,
acc_func=acc_func,
model_inferer=model_inferer,
args=args,
post_label=post_label,
post_pred=post_pred,
)
val_avg_acc = np.mean(val_avg_acc)
if args.rank == 0:
print(
"Final validation {}/{}".format(epoch, args.max_epochs - 1),
"acc",
val_avg_acc,
"time {:.2f}s".format(time.time() - epoch_time),
)
if val_avg_acc > val_acc_max:
print("new best ({:.6f} --> {:.6f}). ".format(val_acc_max, val_avg_acc))
val_acc_max = val_avg_acc
b_new_best = True
if args.rank == 0 and args.logdir is not None and args.save_checkpoint:
save_checkpoint(
model, epoch, args, best_acc=val_acc_max, optimizer=optimizer, scheduler=scheduler
)
if args.rank == 0 and args.logdir is not None and args.save_checkpoint:
save_checkpoint(model, epoch, args, best_acc=val_acc_max, filename="model_final.pt")
if b_new_best:
print("Copying to model.pt new best model!!!!")
shutil.copyfile(os.path.join(args.logdir, "model_final.pt"), os.path.join(args.logdir, "model.pt"))
print("Training Finished !, Best Accuracy: ", val_acc_max)
return val_acc_max
================================================
FILE: Finetune/AbdomenAtlas/utils/__init__.py
================================================
================================================
FILE: Finetune/AbdomenAtlas/utils/data_trans.py
================================================
import math
import os
from copy import deepcopy
import numpy as np
import torch
import pickle
from monai import data, transforms
from monai.data import *
from monai.transforms import *
from torch.utils.data import DataLoader, ConcatDataset
class Sampler(torch.utils.data.Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, make_even=True):
if num_replicas is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = torch.distributed.get_world_size()
if rank is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = torch.distributed.get_rank()
self.shuffle = shuffle
self.make_even = make_even
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
indices = list(range(len(self.dataset)))
self.valid_length = len(indices[self.rank: self.total_size: self.num_replicas])
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
if self.make_even:
if len(indices) < self.total_size:
if self.total_size - len(indices) < len(indices):
indices += indices[: (self.total_size - len(indices))]
else:
extra_ids = np.random.randint(low=0, high=len(indices), size=self.total_size - len(indices))
indices += [indices[ids] for ids in extra_ids]
assert len(indices) == self.total_size
indices = indices[self.rank: self.total_size: self.num_replicas]
self.num_samples = len(indices)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
def get_trans(args):
base_trans = [
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
Orientationd(keys=["image", "label"], axcodes="RAS"),
Spacingd(keys=["image", "label"], pixdim=(args.space_x, args.space_y, args.space_z),
mode=("bilinear", "nearest")),
ScaleIntensityRanged(
keys=["image"],
a_min=args.a_min,
a_max=args.a_max,
b_min=0.0,
b_max=1.0,
clip=True,
),
CropForegroundd(keys=["image", "label"], source_key="image"),
SpatialPadd(keys=["image", "label"], spatial_size=(args.roi_x, args.roi_y, args.roi_z),
mode='constant'),
transforms.RandShiftIntensityd(keys="image", offsets=0.1, prob=0),
]
random_trans = [
RandCropByPosNegLabeld(
keys=["image", "label"],
label_key="label",
spatial_size=(args.roi_x, args.roi_y, args.roi_z),
pos=args.pos,
neg=args.neg,
num_samples=args.sw_batch_size,
image_key="image",
image_threshold=0,
),
transforms.RandFlipd(keys=["image", "label"], prob=args.RandFlipd_prob, spatial_axis=0),
transforms.RandFlipd(keys=["image", "label"], prob=args.RandFlipd_prob, spatial_axis=1),
transforms.RandFlipd(keys=["image", "label"], prob=args.RandFlipd_prob, spatial_axis=2),
transforms.RandRotate90d(keys=["image", "label"], prob=args.RandRotate90d_prob, max_k=3),
transforms.RandScaleIntensityd(keys="image", factors=0.1, prob=args.RandScaleIntensityd_prob),
transforms.RandShiftIntensityd(keys="image", offsets=0.1, prob=args.RandShiftIntensityd_prob),
Delete_keys(keys=["image", "label"]),
]
return base_trans, random_trans
class Delete_keys(MapTransform):
"""Filter unsed label.
"""
def __call__(self, data):
d = dict(data)
if 'name' in d.keys():
del d['name']
if 'image_meta_dict' in d.keys():
del d['image_meta_dict']
if 'label_meta_dict' in d.keys():
del d['label_meta_dict']
return d
================================================
FILE: Finetune/AbdomenAtlas/utils/mixup.py
================================================
import torch
import numpy as np
def mixup(inputs):
batch_size = inputs[0].size(0)
rand = torch.randperm(batch_size)
rand = [ra.tolist() for ra in rand]
lam = int(np.random.beta(0.2, 0.2) * inputs[0].size(2))
new_inputs = []
for input in inputs:
rand_input = input[rand]
if np.random.rand() < 0.5:
new_input = torch.cat([input[:, :, :, 0:lam, :],
rand_input[:, :, :, lam:input.size(3), :]], dim=3)
else:
new_input = torch.cat([input[:, :, 0:lam, :, :],
rand_input[:, :, lam:input.size(2), :, :]], dim=2)
new_inputs.append(new_input)
return new_inputs
================================================
FILE: Finetune/AbdomenAtlas/utils/utils.py
================================================
# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.ndimage as ndimage
import torch
import os
import SimpleITK as sitk
def resample_3d(img, target_size):
imx, imy, imz = img.shape
tx, ty, tz = target_size
zoom_ratio = (float(tx) / float(imx), float(ty) / float(imy), float(tz) / float(imz))
img_resampled = ndimage.zoom(img, zoom_ratio, order=0, prefilter=False)
return img_resampled
def dice(x, y):
intersect = np.sum(np.sum(np.sum(x * y)))
y_sum = np.sum(np.sum(np.sum(y)))
if y_sum == 0:
return 0.0
x_sum = np.sum(np.sum(np.sum(x)))
return 2 * intersect / (x_sum + y_sum)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = np.where(self.count > 0, self.sum / self.count, self.sum)
def distributed_all_gather(
tensor_list, valid_batch_size=None, out_numpy=False, world_size=None, no_barrier=False, is_valid=None
):
if world_size is None:
world_size = torch.distributed.get_world_size()
if valid_batch_size is not None:
valid_batch_size = min(valid_batch_size, world_size)
elif is_valid is not None:
is_valid = torch.tensor(bool(is_valid), dtype=torch.bool, device=tensor_list[0].device)
if not no_barrier:
torch.distributed.barrier()
tensor_list_out = []
with torch.no_grad():
if is_valid is not None:
is_valid_list = [torch.zeros_like(is_valid) for _ in range(world_size)]
torch.distributed.all_gather(is_valid_list, is_valid)
is_valid = [x.item() for x in is_valid_list]
for tensor in tensor_list:
gather_list = [torch.zeros_like(tensor) for _ in range(world_size)]
torch.distributed.all_gather(gather_list, tensor)
if valid_batch_size is not None:
gather_list = gather_list[:valid_batch_size]
elif is_valid is not None:
gather_list = [g for g, v in zip(gather_list, is_valid_list) if v]
if out_numpy:
gather_list = [t.cpu().numpy() for t in gather_list]
tensor_list_out.append(gather_list)
return tensor_list_out
def color_map(dataset='pascal'):
cmap = np.zeros((256, 3), dtype='uint8')
if dataset == 'pascal' or dataset == 'coco':
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
for i in range(256):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
elif dataset == 'cityscapes':
cmap[0] = np.array([128, 64, 128])
cmap[1] = np.array([244, 35, 232])
cmap[2] = np.array([70, 70, 70])
cmap[3] = np.array([102, 102, 156])
cmap[4] = np.array([190, 153, 153])
cmap[5] = np.array([153, 153, 153])
cmap[6] = np.array([250, 170, 30])
cmap[7] = np.array([220, 220, 0])
cmap[8] = np.array([107, 142, 35])
cmap[9] = np.array([152, 251, 152])
cmap[10] = np.array([70, 130, 180])
cmap[11] = np.array([220, 20, 60])
cmap[12] = np.array([255, 0, 0])
cmap[13] = np.array([0, 0, 142])
cmap[14] = np.array([0, 0, 70])
cmap[15] = np.array([0, 60, 100])
cmap[16] = np.array([0, 80, 100])
cmap[17] = np.array([0, 0, 230])
cmap[18] = np.array([119, 11, 32])
cmap[19] = np.array([0, 0, 0])
cmap[255] = np.array([0, 0, 0])
return cmap
def check_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def read(img):
img = sitk.ReadImage(img)
img = sitk.GetArrayFromImage(img)
img = img.transpose(1, 2, 0)
return img
================================================
FILE: Finetune/Amos/check_test.py
================================================
import argparse
import os
from functools import partial
import nibabel as nib
import numpy as np
import torch
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
from utils.data_test import get_loader
from utils.utils import dice, resample_3d
from utils.utils import AverageMeter, distributed_all_gather
from monai.inferers import sliding_window_inference
from monai.data import decollate_batch
from monai.losses import DiceCELoss
from monai.metrics import DiceMetric
from monai.networks.nets import SwinUNETR
from monai.transforms import Activations, AsDiscrete, Compose
from monai.utils.enums import MetricReduction
import zipfile
import shutil
import SimpleITK as sitk
from tqdm import tqdm
from utils.utils import *
from PIL import Image
def norm(img):
new_img = img.copy()
new_img[img<-175] = 0
new_img[img>250] = 250
out = new_img/250
out = (255*out).astype(np.uint8)
return out
def check_size():
data_path = "D:\data/amos22\imagesTr"
pred_path = 'D:\data/amos22\labelsTr'
view_path = './pred/view_tr'
# data_path = "D:\data/amos22/imagesTs"
# pred_path = './pred/test'
# view_path = './pred/view_ts'
check_dir(view_path)
cmap = color_map()
ls = os.listdir(pred_path)
num = 0
# for i in tqdm(ls):
i = ls[0]
# i = 'FLARETs_0031_0000.nii'
img_path = os.path.join(data_path, i) # i[:-7]+'_0000.nii.gz'
img_itk = sitk.ReadImage(img_path)
img = sitk.GetArrayFromImage(img_itk)
print(img_itk.GetSpacing(), img_itk.GetDirection())
# img = np.flip(img, 1)
# img = np.flip(img, 2)
pred = os.path.join(pred_path, i)
pred_itk = sitk.ReadImage(pred)
pred = sitk.GetArrayFromImage(pred_itk)
print(pred_itk.GetSpacing(), pred_itk.GetDirection())
# pred = pred.transpose()
print(img.shape, pred.shape)
c, h, w = img.shape
for j in range(c):
im = img[j, :, :]
pre = pred[j, :, :].astype(np.uint8)
pre = Image.fromarray(pre, mode='P')
pre.putpalette(cmap)
im = norm(im)
import cv2
cv2.imwrite(view_path + '/' + str(j) + '_raw.png', im)
pre.save(view_path + '/' + str(j) + '_pred.png')
def rename():
pred_path = './pred/test'
ls = os.listdir(pred_path)
for i in ls:
old_name = os.path.join(pred_path, i)
new_name = os.path.join(pred_path, i[:-13] + '.nii.gz')
os.rename(old_name, new_name)
def check_direction():
data_path = "D:\data\FLARE22\imagesTr" # (-1, -1, 1)
data_path = "D:\data/amos22\imagesTr" # (1, -1, 1)
data_path = 'D:\data\BTCV\imagesTr' # (1, 1, 1)
ls = os.listdir(data_path)
for i in tqdm(ls):
img_path = os.path.join(data_path, i) # i[:-7]+'_0000.nii.gz'
img_itk = sitk.ReadImage(img_path)
print(img_itk.GetSpacing(), img_itk.GetDirection())
if __name__ == "__main__":
check_direction()
================================================
FILE: Finetune/Amos/dataset/__init__.py
================================================
================================================
FILE: Finetune/Amos/dataset/dataset.json
================================================
{
"description": "0",
"labels": {
"0": "background",
"1": "Liver",
"10": "Esophagus",
"11": "Stomach",
"12": "Duodenum",
"13": "Left Kidney",
"2": "Right kidney",
"3": "Spleen",
"4": "Pancreas",
"5": "Aorta",
"6": "Inferior vena cava",
"7": "Right adrenal gland",
"8": "Left adrenal gland",
"9": "Gallbladder"
},
"licence": "hands off!",
"modality": {
"0": "CT"
},
"name": "FLARE22",
"numTest": 200,
"numTraining": 50,
"reference": "0",
"release": "0.0",
"tensorImageSize": "4D",
"test": [
"./imagesTs/FLARETs_0001_0000.nii.gz",
"./imagesTs/FLARETs_0002_0000.nii.gz",
"./imagesTs/FLARETs_0003_0000.nii.gz",
"./imagesTs/FLARETs_0004_0000.nii.gz",
"./imagesTs/FLARETs_0005_0000.nii.gz",
"./imagesTs/FLARETs_0006_0000.nii.gz",
"./imagesTs/FLARETs_0007_0000.nii.gz",
"./imagesTs/FLARETs_0008_0000.nii.gz",
"./imagesTs/FLARETs_0009_0000.nii.gz",
"./imagesTs/FLARETs_0010_0000.nii.gz",
"./imagesTs/FLARETs_0011_0000.nii.gz",
"./imagesTs/FLARETs_0012_0000.nii.gz",
"./imagesTs/FLARETs_0013_0000.nii.gz",
"./imagesTs/FLARETs_0014_0000.nii.gz",
"./imagesTs/FLARETs_0015_0000.nii.gz",
"./imagesTs/FLARETs_0016_0000.nii.gz",
"./imagesTs/FLARETs_0017_0000.nii.gz",
"./imagesTs/FLARETs_0018_0000.nii.gz",
"./imagesTs/FLARETs_0019_0000.nii.gz",
"./imagesTs/FLARETs_0020_0000.nii.gz",
"./imagesTs/FLARETs_0021_0000.nii.gz",
"./imagesTs/FLARETs_0022_0000.nii.gz",
"./imagesTs/FLARETs_0023_0000.nii.gz",
"./imagesTs/FLARETs_0024_0000.nii.gz",
"./imagesTs/FLARETs_0025_0000.nii.gz",
"./imagesTs/FLARETs_0026_0000.nii.gz",
"./imagesTs/FLARETs_0027_0000.nii.gz",
"./imagesTs/FLARETs_0028_0000.nii.gz",
"./imagesTs/FLARETs_0029_0000.nii.gz",
"./imagesTs/FLARETs_0030_0000.nii.gz",
"./imagesTs/FLARETs_0031_0000.nii.gz",
"./imagesTs/FLARETs_0032_0000.nii.gz",
"./imagesTs/FLARETs_0033_0000.nii.gz",
"./imagesTs/FLARETs_0034_0000.nii.gz",
"./imagesTs/FLARETs_0035_0000.nii.gz",
"./imagesTs/FLARETs_0036_0000.nii.gz",
"./imagesTs/FLARETs_0037_0000.nii.gz",
"./imagesTs/FLARETs_0038_0000.nii.gz",
"./imagesTs/FLARETs_0039_0000.nii.gz",
"./imagesTs/FLARETs_0040_0000.nii.gz",
"./imagesTs/FLARETs_0041_0000.nii.gz",
"./imagesTs/FLARETs_0042_0000.nii.gz",
"./imagesTs/FLARETs_0043_0000.nii.gz",
"./imagesTs/FLARETs_0044_0000.nii.gz",
"./imagesTs/FLARETs_0045_0000.nii.gz",
"./imagesTs/FLARETs_0046_0000.nii.gz",
"./imagesTs/FLARETs_0047_0000.nii.gz",
"./imagesTs/FLARETs_0048_0000.nii.gz",
"./imagesTs/FLARETs_0049_0000.nii.gz",
"./imagesTs/FLARETs_0050_0000.nii.gz",
"./imagesTs/FLARETs_0051_0000.nii.gz",
"./imagesTs/FLARETs_0052_0000.nii.gz",
"./imagesTs/FLARETs_0053_0000.nii.gz",
"./imagesTs/FLARETs_0054_0000.nii.gz",
"./imagesTs/FLARETs_0055_0000.nii.gz",
"./imagesTs/FLARETs_0056_0000.nii.gz",
"./imagesTs/FLARETs_0057_0000.nii.gz",
"./imagesTs/FLARETs_0058_0000.nii.gz",
"./imagesTs/FLARETs_0059_0000.nii.gz",
"./imagesTs/FLARETs_0060_0000.nii.gz",
"./imagesTs/FLARETs_0061_0000.nii.gz",
"./imagesTs/FLARETs_0062_0000.nii.gz",
"./imagesTs/FLARETs_0063_0000.nii.gz",
"./imagesTs/FLARETs_0064_0000.nii.gz",
"./imagesTs/FLARETs_0065_0000.nii.gz",
"./imagesTs/FLARETs_0066_0000.nii.gz",
"./imagesTs/FLARETs_0067_0000.nii.gz",
"./imagesTs/FLARETs_0068_0000.nii.gz",
"./imagesTs/FLARETs_0069_0000.nii.gz",
"./imagesTs/FLARETs_0070_0000.nii.gz",
"./imagesTs/FLARETs_0071_0000.nii.gz",
"./imagesTs/FLARETs_0072_0000.nii.gz",
"./imagesTs/FLARETs_0073_0000.nii.gz",
"./imagesTs/FLARETs_0074_0000.nii.gz",
"./imagesTs/FLARETs_0075_0000.nii.gz",
"./imagesTs/FLARETs_0076_0000.nii.gz",
"./imagesTs/FLARETs_0077_0000.nii.gz",
"./imagesTs/FLARETs_0078_0000.nii.gz",
"./imagesTs/FLARETs_0079_0000.nii.gz",
"./imagesTs/FLARETs_0080_0000.nii.gz",
"./imagesTs/FLARETs_0081_0000.nii.gz",
"./imagesTs/FLARETs_0082_0000.nii.gz",
"./imagesTs/FLARETs_0083_0000.nii.gz",
"./imagesTs/FLARETs_0084_0000.nii.gz",
"./imagesTs/FLARETs_0085_0000.nii.gz",
"./imagesTs/FLARETs_0086_0000.nii.gz",
"./imagesTs/FLARETs_0087_0000.nii.gz",
"./imagesTs/FLARETs_0088_0000.nii.gz",
"./imagesTs/FLARETs_0089_0000.nii.gz",
"./imagesTs/FLARETs_0090_0000.nii.gz",
"./imagesTs/FLARETs_0091_0000.nii.gz",
"./imagesTs/FLARETs_0092_0000.nii.gz",
"./imagesTs/FLARETs_0093_0000.nii.gz",
"./imagesTs/FLARETs_0094_0000.nii.gz",
"./imagesTs/FLARETs_0095_0000.nii.gz",
"./imagesTs/FLARETs_0096_0000.nii.gz",
"./imagesTs/FLARETs_0097_0000.nii.gz",
"./imagesTs/FLARETs_0098_0000.nii.gz",
"./imagesTs/FLARETs_0099_0000.nii.gz",
"./imagesTs/FLARETs_0100_0000.nii.gz",
"./imagesTs/FLARETs_0101_0000.nii.gz",
"./imagesTs/FLARETs_0102_0000.nii.gz",
"./imagesTs/FLARETs_0103_0000.nii.gz",
"./imagesTs/FLARETs_0104_0000.nii.gz",
"./imagesTs/FLARETs_0105_0000.nii.gz",
"./imagesTs/FLARETs_0106_0000.nii.gz",
"./imagesTs/FLARETs_0107_0000.nii.gz",
"./imagesTs/FLARETs_0108_0000.nii.gz",
"./imagesTs/FLARETs_0109_0000.nii.gz",
"./imagesTs/FLARETs_0110_0000.nii.gz",
"./imagesTs/FLARETs_0111_0000.nii.gz",
"./imagesTs/FLARETs_0112_0000.nii.gz",
"./imagesTs/FLARETs_0113_0000.nii.gz",
"./imagesTs/FLARETs_0114_0000.nii.gz",
"./imagesTs/FLARETs_0115_0000.nii.gz",
"./imagesTs/FLARETs_0116_0000.nii.gz",
"./imagesTs/FLARETs_0117_0000.nii.gz",
"./imagesTs/FLARETs_0118_0000.nii.gz",
"./imagesTs/FLARETs_0119_0000.nii.gz",
"./imagesTs/FLARETs_0120_0000.nii.gz",
"./imagesTs/FLARETs_0121_0000.nii.gz",
"./imagesTs/FLARETs_0122_0000.nii.gz",
"./imagesTs/FLARETs_0123_0000.nii.gz",
"./imagesTs/FLARETs_0124_0000.nii.gz",
"./imagesTs/FLARETs_0125_0000.nii.gz",
"./imagesTs/FLARETs_0126_0000.nii.gz",
"./imagesTs/FLARETs_0127_0000.nii.gz",
"./imagesTs/FLARETs_0128_0000.nii.gz",
"./imagesTs/FLARETs_0129_0000.nii.gz",
"./imagesTs/FLARETs_0130_0000.nii.gz",
"./imagesTs/FLARETs_0131_0000.nii.gz",
"./imagesTs/FLARETs_0132_0000.nii.gz",
"./imagesTs/FLARETs_0133_0000.nii.gz",
"./imagesTs/FLARETs_0134_0000.nii.gz",
"./imagesTs/FLARETs_0135_0000.nii.gz",
"./imagesTs/FLARETs_0136_0000.nii.gz",
"./imagesTs/FLARETs_0137_0000.nii.gz",
"./imagesTs/FLARETs_0138_0000.nii.gz",
"./imagesTs/FLARETs_0139_0000.nii.gz",
"./imagesTs/FLARETs_0140_0000.nii.gz",
"./imagesTs/FLARETs_0141_0000.nii.gz",
"./imagesTs/FLARETs_0142_0000.nii.gz",
"./imagesTs/FLARETs_0143_0000.nii.gz",
"./imagesTs/FLARETs_0144_0000.nii.gz",
"./imagesTs/FLARETs_0145_0000.nii.gz",
"./imagesTs/FLARETs_0146_0000.nii.gz",
"./imagesTs/FLARETs_0147_0000.nii.gz",
"./imagesTs/FLARETs_0148_0000.nii.gz",
"./imagesTs/FLARETs_0149_0000.nii.gz",
"./imagesTs/FLARETs_0150_0000.nii.gz",
"./imagesTs/FLARETs_0151_0000.nii.gz",
"./imagesTs/FLARETs_0152_0000.nii.gz",
"./imagesTs/FLARETs_0153_0000.nii.gz",
"./imagesTs/FLARETs_0154_0000.nii.gz",
"./imagesTs/FLARETs_0155_0000.nii.gz",
"./imagesTs/FLARETs_0156_0000.nii.gz",
"./imagesTs/FLARETs_0157_0000.nii.gz",
"./imagesTs/FLARETs_0158_0000.nii.gz",
"./imagesTs/FLARETs_0159_0000.nii.gz",
"./imagesTs/FLARETs_0160_0000.nii.gz",
"./imagesTs/FLARETs_0161_0000.nii.gz",
"./imagesTs/FLARETs_0162_0000.nii.gz",
"./imagesTs/FLARETs_0163_0000.nii.gz",
"./imagesTs/FLARETs_0164_0000.nii.gz",
"./imagesTs/FLARETs_0165_0000.nii.gz",
"./imagesTs/FLARETs_0166_0000.nii.gz",
"./imagesTs/FLARETs_0167_0000.nii.gz",
"./imagesTs/FLARETs_0168_0000.nii.gz",
"./imagesTs/FLARETs_0169_0000.nii.gz",
"./imagesTs/FLARETs_0170_0000.nii.gz",
"./imagesTs/FLARETs_0171_0000.nii.gz",
"./imagesTs/FLARETs_0172_0000.nii.gz",
"./imagesTs/FLARETs_0173_0000.nii.gz",
"./imagesTs/FLARETs_0174_0000.nii.gz",
"./imagesTs/FLARETs_0175_0000.nii.gz",
"./imagesTs/FLARETs_0176_0000.nii.gz",
"./imagesTs/FLARETs_0177_0000.nii.gz",
"./imagesTs/FLARETs_0178_0000.nii.gz",
"./imagesTs/FLARETs_0179_0000.nii.gz",
"./imagesTs/FLARETs_0180_0000.nii.gz",
"./imagesTs/FLARETs_0181_0000.nii.gz",
"./imagesTs/FLARETs_0182_0000.nii.gz",
"./imagesTs/FLARETs_0183_0000.nii.gz",
"./imagesTs/FLARETs_0184_0000.nii.gz",
"./imagesTs/FLARETs_0185_0000.nii.gz",
"./imagesTs/FLARETs_0186_0000.nii.gz",
"./imagesTs/FLARETs_0187_0000.nii.gz",
"./imagesTs/FLARETs_0188_0000.nii.gz",
"./imagesTs/FLARETs_0189_0000.nii.gz",
"./imagesTs/FLARETs_0190_0000.nii.gz",
"./imagesTs/FLARETs_0191_0000.nii.gz",
"./imagesTs/FLARETs_0192_0000.nii.gz",
"./imagesTs/FLARETs_0193_0000.nii.gz",
"./imagesTs/FLARETs_0194_0000.nii.gz",
"./imagesTs/FLARETs_0195_0000.nii.gz",
"./imagesTs/FLARETs_0196_0000.nii.gz",
"./imagesTs/FLARETs_0197_0000.nii.gz",
"./imagesTs/FLARETs_0198_0000.nii.gz",
"./imagesTs/FLARETs_0199_0000.nii.gz",
"./imagesTs/FLARETs_0200_0000.nii.gz"
],
"validation": [{
"image": "./imagesTr/FLARE22_Tr_0001_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0001.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0002_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0002.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0003_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0003.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0004_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0004.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0005_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0005.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0006_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0006.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0007_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0007.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0008_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0008.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0009_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0009.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0010_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0010.nii.gz"
}
],
"training": [
{
"image": "./imagesTr/FLARE22_Tr_0011_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0011.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0012_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0012.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0013_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0013.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0014_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0014.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0015_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0015.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0016_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0016.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0017_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0017.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0018_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0018.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0019_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0019.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0020_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0020.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0021_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0021.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0022_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0022.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0023_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0023.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0024_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0024.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0025_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0025.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0026_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0026.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0027_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0027.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0028_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0028.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0029_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0029.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0030_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0030.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0031_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0031.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0032_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0032.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0033_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0033.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0034_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0034.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0035_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0035.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0036_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0036.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0037_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0037.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0038_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0038.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0039_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0039.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0040_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0040.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0041_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0041.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0042_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0042.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0043_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0043.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0044_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0044.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0045_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0045.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0046_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0046.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0047_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0047.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0048_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0048.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0049_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0049.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0050_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0050.nii.gz"
}
]
}
================================================
FILE: Finetune/Amos/dataset/dataset_test50.json
================================================
{
"description": "0",
"labels": {
"0": "background",
"1": "Liver",
"10": "Esophagus",
"11": "Stomach",
"12": "Duodenum",
"13": "Left Kidney",
"2": "Right kidney",
"3": "Spleen",
"4": "Pancreas",
"5": "Aorta",
"6": "Inferior vena cava",
"7": "Right adrenal gland",
"8": "Left adrenal gland",
"9": "Gallbladder"
},
"licence": "hands off!",
"modality": {
"0": "CT"
},
"name": "FLARE22",
"numTest": 200,
"numTraining": 50,
"reference": "0",
"release": "0.0",
"tensorImageSize": "4D",
"test": [
"./imagesTs/FLARETs_0001_0000.nii.gz",
"./imagesTs/FLARETs_0002_0000.nii.gz",
"./imagesTs/FLARETs_0003_0000.nii.gz",
"./imagesTs/FLARETs_0004_0000.nii.gz",
"./imagesTs/FLARETs_0005_0000.nii.gz",
"./imagesTs/FLARETs_0006_0000.nii.gz",
"./imagesTs/FLARETs_0007_0000.nii.gz",
"./imagesTs/FLARETs_0008_0000.nii.gz",
"./imagesTs/FLARETs_0009_0000.nii.gz",
"./imagesTs/FLARETs_0010_0000.nii.gz",
"./imagesTs/FLARETs_0011_0000.nii.gz",
"./imagesTs/FLARETs_0012_0000.nii.gz",
"./imagesTs/FLARETs_0013_0000.nii.gz",
"./imagesTs/FLARETs_0014_0000.nii.gz",
"./imagesTs/FLARETs_0015_0000.nii.gz",
"./imagesTs/FLARETs_0016_0000.nii.gz",
"./imagesTs/FLARETs_0017_0000.nii.gz",
"./imagesTs/FLARETs_0018_0000.nii.gz",
"./imagesTs/FLARETs_0019_0000.nii.gz",
"./imagesTs/FLARETs_0020_0000.nii.gz",
"./imagesTs/FLARETs_0021_0000.nii.gz",
"./imagesTs/FLARETs_0022_0000.nii.gz",
"./imagesTs/FLARETs_0023_0000.nii.gz",
"./imagesTs/FLARETs_0024_0000.nii.gz",
"./imagesTs/FLARETs_0025_0000.nii.gz",
"./imagesTs/FLARETs_0026_0000.nii.gz",
"./imagesTs/FLARETs_0027_0000.nii.gz",
"./imagesTs/FLARETs_0028_0000.nii.gz",
"./imagesTs/FLARETs_0029_0000.nii.gz",
"./imagesTs/FLARETs_0030_0000.nii.gz",
"./imagesTs/FLARETs_0031_0000.nii.gz",
"./imagesTs/FLARETs_0032_0000.nii.gz",
"./imagesTs/FLARETs_0033_0000.nii.gz",
"./imagesTs/FLARETs_0034_0000.nii.gz",
"./imagesTs/FLARETs_0035_0000.nii.gz",
"./imagesTs/FLARETs_0036_0000.nii.gz",
"./imagesTs/FLARETs_0037_0000.nii.gz",
"./imagesTs/FLARETs_0038_0000.nii.gz",
"./imagesTs/FLARETs_0039_0000.nii.gz",
"./imagesTs/FLARETs_0040_0000.nii.gz",
"./imagesTs/FLARETs_0041_0000.nii.gz",
"./imagesTs/FLARETs_0042_0000.nii.gz",
"./imagesTs/FLARETs_0043_0000.nii.gz",
"./imagesTs/FLARETs_0044_0000.nii.gz",
"./imagesTs/FLARETs_0045_0000.nii.gz",
"./imagesTs/FLARETs_0046_0000.nii.gz",
"./imagesTs/FLARETs_0047_0000.nii.gz",
"./imagesTs/FLARETs_0048_0000.nii.gz",
"./imagesTs/FLARETs_0049_0000.nii.gz",
"./imagesTs/FLARETs_0050_0000.nii.gz"
],
"validation": [{
"image": "./imagesTr/FLARE22_Tr_0001_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0001.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0002_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0002.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0003_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0003.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0004_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0004.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0005_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0005.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0006_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0006.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0007_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0007.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0008_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0008.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0009_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0009.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0010_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0010.nii.gz"
}
],
"training": [
{
"image": "./imagesTr/FLARE22_Tr_0011_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0011.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0012_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0012.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0013_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0013.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0014_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0014.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0015_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0015.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0016_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0016.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0017_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0017.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0018_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0018.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0019_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0019.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0020_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0020.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0021_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0021.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0022_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0022.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0023_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0023.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0024_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0024.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0025_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0025.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0026_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0026.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0027_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0027.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0028_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0028.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0029_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0029.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0030_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0030.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0031_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0031.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0032_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0032.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0033_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0033.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0034_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0034.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0035_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0035.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0036_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0036.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0037_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0037.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0038_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0038.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0039_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0039.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0040_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0040.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0041_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0041.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0042_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0042.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0043_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0043.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0044_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0044.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0045_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0045.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0046_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0046.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0047_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0047.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0048_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0048.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0049_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0049.nii.gz"
},
{
"image": "./imagesTr/FLARE22_Tr_0050_0000.nii.gz",
"label": "./labelsTr/FLARE22_Tr_0050.nii.gz"
}
]
}
================================================
FILE: Finetune/Amos/dataset_CT.json
================================================
{"name": "AMOS", "description": "Amos: A large-scale abdominal multi-organ benchmark for versatile medical image segmentation", "author": "Yuanfeng Ji", "reference": "SRIDB x CUHKSZ x HKU x LGCHSZ x LGPHSZ", "licence": "CC-BY-SA 4.0", "release": "1.0 01/05/2022", "contact": "u3008013@connect.hku.hk", "tensorImageSize": "3D", "modality": {"0": "CT"},
"labels": {"0": "background",
"1": "spleen",
"2": "right kidney",
"3": "left kidney",
"4": "gall bladder",
"5": "esophagus", "6": "liver",
"7": "stomach", "8": "arota",
"9": "postcava", "10": "pancreas",
"11": "right adrenal gland", "12": "left adrenal gland",
"13": "duodenum", "14": "bladder", "15": "prostate/uterus"},
"numTraining": 240, "numValidation": 120, "numTest": 240,
"training": [{"image": "./imagesTr/amos_0001.nii.gz", "label": "./labelsTr/amos_0001.nii.gz"}, {"image": "./imagesTr/amos_0004.nii.gz", "label": "./labelsTr/amos_0004.nii.gz"}, {"image": "./imagesTr/amos_0005.nii.gz", "label": "./labelsTr/amos_0005.nii.gz"}, {"image": "./imagesTr/amos_0006.nii.
gitextract_xwu1bqsv/ ├── Finetune/ │ ├── AbdomenAtlas/ │ │ ├── Atlas_test.py │ │ ├── Atlas_test.sh │ │ ├── check.py │ │ ├── dataset/ │ │ │ ├── __init__.py │ │ │ ├── dataloader_bdmap.py │ │ │ ├── dataloader_test.py │ │ │ └── dataset_list/ │ │ │ └── AbdomenAtlas1.0.txt │ │ ├── main.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── preprocess/ │ │ │ └── try_load.py │ │ ├── readme.md │ │ ├── requirements.txt │ │ ├── train.sh │ │ ├── train.slurm │ │ ├── trainer.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── data_trans.py │ │ ├── mixup.py │ │ └── utils.py │ ├── Amos/ │ │ ├── check_test.py │ │ ├── dataset/ │ │ │ ├── __init__.py │ │ │ ├── dataset.json │ │ │ └── dataset_test50.json │ │ ├── dataset_CT.json │ │ ├── gen_json.py │ │ ├── inferers.py │ │ ├── main.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── pre_cache.py │ │ ├── test.py │ │ ├── train.sh │ │ ├── trainer.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── data_test.py │ │ │ ├── data_utils.py │ │ │ └── utils.py │ │ └── val.py │ ├── BTCV/ │ │ ├── dataset/ │ │ │ ├── __init__.py │ │ │ └── dataset_0.json │ │ ├── main.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── trainer.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── data_test.py │ │ │ ├── data_utils.py │ │ │ └── utils.py │ │ └── val.py │ ├── CC-CCII/ │ │ ├── csv/ │ │ │ ├── CC_CCII_fold0_train.csv │ │ │ ├── CC_CCII_fold0_valid.csv │ │ │ ├── CC_CCII_fold1_train.csv │ │ │ ├── CC_CCII_fold1_valid.csv │ │ │ ├── CC_CCII_fold2_train.csv │ │ │ ├── CC_CCII_fold2_valid.csv │ │ │ └── CC_CCII_metadata.csv │ │ ├── dataset/ │ │ │ └── __init__.py │ │ ├── eval.py │ │ ├── main.py │ │ ├── model.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── train.sh │ │ ├── trainer.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── utils.py │ ├── Flare22/ │ │ ├── __init__.py │ │ ├── dataset/ │ │ │ ├── __init__.py │ │ │ ├── dataset.json │ │ │ └── dataset_test50.json │ │ ├── inferers.py │ │ ├── main.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── train.sh │ │ ├── trainer.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── data_test.py │ │ │ ├── data_utils.py │ │ │ └── utils.py │ │ └── val.py │ ├── MM-WHS/ │ │ ├── dataset.json │ │ ├── inferers.py │ │ ├── main.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── pretrained_models/ │ │ │ └── __init__.py │ │ ├── test.py │ │ ├── train.sh │ │ ├── trainer.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── utils.py │ ├── Word/ │ │ ├── dataset/ │ │ │ ├── __init__.py │ │ │ └── dataset_word.json │ │ ├── main.py │ │ ├── optimizers/ │ │ │ ├── __init__.py │ │ │ └── lr_scheduler.py │ │ ├── train.sh │ │ ├── train.slurm │ │ ├── trainer.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── utils.py │ └── nnUNet/ │ ├── LICENSE │ ├── documentation/ │ │ ├── __init__.py │ │ ├── benchmarking.md │ │ ├── changelog.md │ │ ├── competitions/ │ │ │ └── AutoPETII.md │ │ ├── convert_msd_dataset.md │ │ ├── dataset_format.md │ │ ├── dataset_format_inference.md │ │ ├── explanation_normalization.md │ │ ├── explanation_plans_files.md │ │ ├── extending_nnunet.md │ │ ├── how_to_use_nnunet.md │ │ ├── installation_instructions.md │ │ ├── manual_data_splits.md │ │ ├── pretraining_and_finetuning.md │ │ ├── region_based_training.md │ │ ├── run_inference_with_pretrained_models.md │ │ ├── set_environment_variables.md │ │ ├── setting_up_paths.md │ │ └── tldr_migration_guide_from_v1.md │ ├── msd.txt │ ├── nnunetv2/ │ │ ├── __init__.py │ │ ├── batch_running/ │ │ │ ├── __init__.py │ │ │ ├── benchmarking/ │ │ │ │ ├── __init__.py │ │ │ │ ├── generate_benchmarking_commands.py │ │ │ │ └── summarize_benchmark_results.py │ │ │ ├── collect_results_custom_Decathlon.py │ │ │ ├── collect_results_custom_Decathlon_2d.py │ │ │ ├── generate_lsf_runs_customDecathlon.py │ │ │ └── release_trainings/ │ │ │ ├── __init__.py │ │ │ └── nnunetv2_v1/ │ │ │ ├── __init__.py │ │ │ ├── collect_results.py │ │ │ └── generate_lsf_commands.py │ │ ├── configuration.py │ │ ├── dataset_conversion/ │ │ │ ├── Dataset017_BTCV.py │ │ │ ├── Dataset027_ACDC.py │ │ │ ├── Dataset073_Fluo_C3DH_A549_SIM.py │ │ │ ├── Dataset114_MNMs.py │ │ │ ├── Dataset115_EMIDEC.py │ │ │ ├── Dataset120_RoadSegmentation.py │ │ │ ├── Dataset137_BraTS21.py │ │ │ ├── Dataset218_Amos2022_task1.py │ │ │ ├── Dataset219_Amos2022_task2.py │ │ │ ├── Dataset220_KiTS2023.py │ │ │ ├── Dataset221_AutoPETII_2023.py │ │ │ ├── Dataset988_dummyDataset4.py │ │ │ ├── __init__.py │ │ │ ├── convert_MSD_dataset.py │ │ │ ├── convert_raw_dataset_from_old_nnunet_format.py │ │ │ ├── datasets_for_integration_tests/ │ │ │ │ ├── Dataset996_IntegrationTest_Hippocampus_regions_ignore.py │ │ │ │ ├── Dataset997_IntegrationTest_Hippocampus_regions.py │ │ │ │ ├── Dataset998_IntegrationTest_Hippocampus_ignore.py │ │ │ │ ├── Dataset999_IntegrationTest_Hippocampus.py │ │ │ │ └── __init__.py │ │ │ └── generate_dataset_json.py │ │ ├── ensembling/ │ │ │ ├── __init__.py │ │ │ └── ensemble.py │ │ ├── evaluation/ │ │ │ ├── __init__.py │ │ │ ├── accumulate_cv_results.py │ │ │ ├── evaluate_predictions.py │ │ │ └── find_best_configuration.py │ │ ├── experiment_planning/ │ │ │ ├── __init__.py │ │ │ ├── dataset_fingerprint/ │ │ │ │ ├── __init__.py │ │ │ │ └── fingerprint_extractor.py │ │ │ ├── experiment_planners/ │ │ │ │ ├── __init__.py │ │ │ │ ├── default_experiment_planner.py │ │ │ │ ├── network_topology.py │ │ │ │ ├── readme.md │ │ │ │ └── resencUNet_planner.py │ │ │ ├── plan_and_preprocess_api.py │ │ │ ├── plan_and_preprocess_entrypoints.py │ │ │ ├── plans_for_pretraining/ │ │ │ │ ├── __init__.py │ │ │ │ └── move_plans_between_datasets.py │ │ │ └── verify_dataset_integrity.py │ │ ├── imageio/ │ │ │ ├── __init__.py │ │ │ ├── base_reader_writer.py │ │ │ ├── natural_image_reader_writer.py │ │ │ ├── nibabel_reader_writer.py │ │ │ ├── reader_writer_registry.py │ │ │ ├── readme.md │ │ │ ├── simpleitk_reader_writer.py │ │ │ └── tif_reader_writer.py │ │ ├── inference/ │ │ │ ├── __init__.py │ │ │ ├── data_iterators.py │ │ │ ├── examples.py │ │ │ ├── export_prediction.py │ │ │ ├── predict_from_raw_data.py │ │ │ ├── readme.md │ │ │ └── sliding_window_prediction.py │ │ ├── model_sharing/ │ │ │ ├── __init__.py │ │ │ ├── entry_points.py │ │ │ ├── model_download.py │ │ │ ├── model_export.py │ │ │ └── model_import.py │ │ ├── paths.py │ │ ├── postprocessing/ │ │ │ ├── __init__.py │ │ │ └── remove_connected_components.py │ │ ├── preprocessing/ │ │ │ ├── __init__.py │ │ │ ├── cropping/ │ │ │ │ ├── __init__.py │ │ │ │ └── cropping.py │ │ │ ├── normalization/ │ │ │ │ ├── __init__.py │ │ │ │ ├── default_normalization_schemes.py │ │ │ │ ├── map_channel_name_to_normalization.py │ │ │ │ └── readme.md │ │ │ ├── preprocessors/ │ │ │ │ ├── __init__.py │ │ │ │ └── default_preprocessor.py │ │ │ └── resampling/ │ │ │ ├── __init__.py │ │ │ ├── default_resampling.py │ │ │ └── utils.py │ │ ├── run/ │ │ │ ├── __init__.py │ │ │ ├── load_pretrained_weights.py │ │ │ └── run_training.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ └── integration_tests/ │ │ │ ├── __init__.py │ │ │ ├── add_lowres_and_cascade.py │ │ │ ├── cleanup_integration_test.py │ │ │ ├── lsf_commands.sh │ │ │ ├── prepare_integration_tests.sh │ │ │ ├── readme.md │ │ │ ├── run_integration_test.sh │ │ │ ├── run_integration_test_bestconfig_inference.py │ │ │ └── run_integration_test_trainingOnly_DDP.sh │ │ ├── training/ │ │ │ ├── __init__.py │ │ │ ├── data_augmentation/ │ │ │ │ ├── __init__.py │ │ │ │ ├── compute_initial_patch_size.py │ │ │ │ └── custom_transforms/ │ │ │ │ ├── __init__.py │ │ │ │ ├── cascade_transforms.py │ │ │ │ ├── deep_supervision_donwsampling.py │ │ │ │ ├── limited_length_multithreaded_augmenter.py │ │ │ │ ├── manipulating_data_dict.py │ │ │ │ ├── masking.py │ │ │ │ ├── region_based_training.py │ │ │ │ └── transforms_for_dummy_2d.py │ │ │ ├── dataloading/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base_data_loader.py │ │ │ │ ├── data_loader_2d.py │ │ │ │ ├── data_loader_3d.py │ │ │ │ ├── nnunet_dataset.py │ │ │ │ └── utils.py │ │ │ ├── logging/ │ │ │ │ ├── __init__.py │ │ │ │ └── nnunet_logger.py │ │ │ ├── loss/ │ │ │ │ ├── __init__.py │ │ │ │ ├── compound_losses.py │ │ │ │ ├── deep_supervision.py │ │ │ │ ├── dice.py │ │ │ │ └── robust_ce_loss.py │ │ │ ├── lr_scheduler/ │ │ │ │ ├── __init__.py │ │ │ │ └── polylr.py │ │ │ └── nnUNetTrainer/ │ │ │ ├── __init__.py │ │ │ ├── nnUNetTrainer.py │ │ │ ├── nnUNetTrainer_swin.py │ │ │ ├── variants/ │ │ │ │ ├── __init__.py │ │ │ │ ├── benchmarking/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── nnUNetTrainerBenchmark_5epochs.py │ │ │ │ │ └── nnUNetTrainerBenchmark_5epochs_noDataLoading.py │ │ │ │ ├── data_augmentation/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── nnUNetTrainerDA5.py │ │ │ │ │ ├── nnUNetTrainerDAOrd0.py │ │ │ │ │ ├── nnUNetTrainerNoDA.py │ │ │ │ │ └── nnUNetTrainerNoMirroring.py │ │ │ │ ├── loss/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── nnUNetTrainerCELoss.py │ │ │ │ │ ├── nnUNetTrainerDiceLoss.py │ │ │ │ │ └── nnUNetTrainerTopkLoss.py │ │ │ │ ├── lr_schedule/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── nnUNetTrainerCosAnneal.py │ │ │ │ ├── network_architecture/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── nnUNetTrainerBN.py │ │ │ │ │ └── nnUNetTrainerNoDeepSupervision.py │ │ │ │ ├── optimizer/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── nnUNetTrainerAdam.py │ │ │ │ │ └── nnUNetTrainerAdan.py │ │ │ │ ├── sampling/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── nnUNetTrainer_probabilisticOversampling.py │ │ │ │ └── training_length/ │ │ │ │ ├── __init__.py │ │ │ │ ├── nnUNetTrainer_Xepochs.py │ │ │ │ └── nnUNetTrainer_Xepochs_NoMirroring.py │ │ │ └── vit.py │ │ └── utilities/ │ │ ├── __init__.py │ │ ├── collate_outputs.py │ │ ├── dataset_name_id_conversion.py │ │ ├── ddp_allgather.py │ │ ├── default_n_proc_DA.py │ │ ├── file_path_utilities.py │ │ ├── find_class_by_name.py │ │ ├── get_network_from_plans.py │ │ ├── helpers.py │ │ ├── json_export.py │ │ ├── label_handling/ │ │ │ ├── __init__.py │ │ │ └── label_handling.py │ │ ├── network_initialization.py │ │ ├── overlay_plots.py │ │ ├── plans_handling/ │ │ │ ├── __init__.py │ │ │ └── plans_handler.py │ │ └── utils.py │ ├── nnunetv2.egg-info/ │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ ├── entry_points.txt │ │ ├── requires.txt │ │ └── top_level.txt │ ├── pyproject.toml │ └── setup.py ├── LICENSE ├── README.md ├── jsons/ │ ├── HNSCC.json │ ├── Totalsegmentator_dataset.json │ ├── __init__.py │ ├── btcv.json │ ├── dataset_LUNA16_0.json │ ├── dataset_TCIAcovid19_0.json │ ├── flare23.json │ └── stoic21.json ├── models/ │ └── voco_head.py ├── optimizers/ │ ├── __init__.py │ └── lr_scheduler.py ├── requirements.txt ├── train.sh ├── utils/ │ ├── __init__.py │ ├── data_utils.py │ ├── ops.py │ └── utils.py └── voco_train.py
SYMBOL INDEX (971 symbols across 169 files)
FILE: Finetune/AbdomenAtlas/Atlas_test.py
function main (line 78) | def main():
FILE: Finetune/AbdomenAtlas/check.py
function read (line 10) | def read(img, transpose=False):
function vis (line 23) | def vis():
function check_original (line 71) | def check_original():
function exe (line 130) | def exe(path):
function trans_lab (line 158) | def trans_lab(path):
function check_pred_vis (line 176) | def check_pred_vis():
function check_pred_acc (line 211) | def check_pred_acc():
FILE: Finetune/AbdomenAtlas/dataset/dataloader_bdmap.py
class Sampler (line 67) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 68) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 88) | def __iter__(self):
method __len__ (line 107) | def __len__(self):
method set_epoch (line 110) | def set_epoch(self, epoch):
class LoadSelectedImaged (line 114) | class LoadSelectedImaged(MapTransform):
method __init__ (line 131) | def __init__(
method register (line 156) | def register(self, reader: ImageReader):
method __call__ (line 159) | def __call__(self, data, reader: Optional[ImageReader] = None):
function get_loader_Atlas (line 179) | def get_loader_Atlas(args):
class Filter_Atlas_Labels (line 240) | class Filter_Atlas_Labels(MapTransform):
method __call__ (line 244) | def __call__(self, data):
FILE: Finetune/AbdomenAtlas/dataset/dataloader_test.py
class Sampler (line 67) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 68) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 88) | def __iter__(self):
method __len__ (line 107) | def __len__(self):
method set_epoch (line 110) | def set_epoch(self, epoch):
class LoadSelectedImaged (line 114) | class LoadSelectedImaged(MapTransform):
method __init__ (line 131) | def __init__(
method register (line 156) | def register(self, reader: ImageReader):
method __call__ (line 159) | def __call__(self, data, reader: Optional[ImageReader] = None):
function get_test_loader_Atlas (line 179) | def get_test_loader_Atlas(args):
FILE: Finetune/AbdomenAtlas/main.py
function main (line 123) | def main():
function main_worker (line 135) | def main_worker(gpu, args):
function init_log (line 295) | def init_log(name, level=logging.INFO):
FILE: Finetune/AbdomenAtlas/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/AbdomenAtlas/preprocess/try_load.py
function main (line 87) | def main():
FILE: Finetune/AbdomenAtlas/trainer.py
function train_epoch (line 28) | def train_epoch(model, loader, optimizer, scheduler, scaler, epoch, loss...
function val_epoch (line 76) | def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, ...
function save_checkpoint (line 123) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 135) | def run_training(
FILE: Finetune/AbdomenAtlas/utils/data_trans.py
class Sampler (line 13) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 14) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 34) | def __iter__(self):
method __len__ (line 53) | def __len__(self):
method set_epoch (line 56) | def set_epoch(self, epoch):
function get_trans (line 60) | def get_trans(args):
class Delete_keys (line 104) | class Delete_keys(MapTransform):
method __call__ (line 108) | def __call__(self, data):
FILE: Finetune/AbdomenAtlas/utils/mixup.py
function mixup (line 5) | def mixup(inputs):
FILE: Finetune/AbdomenAtlas/utils/utils.py
function resample_3d (line 19) | def resample_3d(img, target_size):
function dice (line 27) | def dice(x, y):
class AverageMeter (line 36) | class AverageMeter(object):
method __init__ (line 37) | def __init__(self):
method reset (line 40) | def reset(self):
method update (line 46) | def update(self, val, n=1):
function distributed_all_gather (line 53) | def distributed_all_gather(
function color_map (line 83) | def color_map(dataset='pascal'):
function check_dir (line 128) | def check_dir(dir):
function read (line 133) | def read(img):
FILE: Finetune/Amos/check_test.py
function norm (line 30) | def norm(img):
function check_size (line 40) | def check_size():
function rename (line 89) | def rename():
function check_direction (line 100) | def check_direction():
FILE: Finetune/Amos/gen_json.py
function get_identifiers_from_splitted_files (line 6) | def get_identifiers_from_splitted_files(folder: str):
function generate_dataset_json (line 11) | def generate_dataset_json(output_file: str, imagesTr_dir: str, imagesTs_...
FILE: Finetune/Amos/inferers.py
function double_sliding_window_inference (line 28) | def double_sliding_window_inference(
function one_hot (line 294) | def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype =...
class GroupName (line 372) | class GroupName(enum.Enum):
function get_transforms_func (line 413) | def get_transforms_func(views: TransformsType,
function get_permute_transform (line 440) | def get_permute_transform(view_src: PermuteType,
function permute_inverse (line 452) | def permute_inverse(xs: Sequence[torch.Tensor],
function permute_rand (line 458) | def permute_rand(
FILE: Finetune/Amos/main.py
function main (line 112) | def main():
function main_worker (line 124) | def main_worker(gpu, args):
function init_log (line 286) | def init_log(name, level=logging.INFO):
FILE: Finetune/Amos/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/Amos/pre_cache.py
function main (line 87) | def main():
FILE: Finetune/Amos/test.py
function main (line 88) | def main():
FILE: Finetune/Amos/trainer.py
function train_epoch (line 27) | def train_epoch(model, loader, optimizer, scaler, epoch, loss_func, args):
function val_epoch (line 72) | def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, ...
function save_checkpoint (line 121) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 133) | def run_training(
FILE: Finetune/Amos/utils/data_test.py
class Sampler (line 22) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 23) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 43) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
method set_epoch (line 65) | def set_epoch(self, epoch):
function get_loader (line 69) | def get_loader(args):
FILE: Finetune/Amos/utils/data_utils.py
class Sampler (line 22) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 23) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 43) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
method set_epoch (line 65) | def set_epoch(self, epoch):
function get_loader (line 69) | def get_loader(args):
FILE: Finetune/Amos/utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
function color_map (line 81) | def color_map(dataset='pascal'):
function check_dir (line 126) | def check_dir(dir):
FILE: Finetune/Amos/val.py
function main (line 78) | def main():
FILE: Finetune/BTCV/main.py
function main (line 114) | def main():
function main_worker (line 126) | def main_worker(gpu, args):
function init_log (line 286) | def init_log(name, level=logging.INFO):
FILE: Finetune/BTCV/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/BTCV/trainer.py
function train_epoch (line 27) | def train_epoch(model, loader, optimizer, scheduler, scaler, epoch, loss...
function val_epoch (line 75) | def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, ...
function save_checkpoint (line 125) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 137) | def run_training(
FILE: Finetune/BTCV/utils/data_test.py
class Sampler (line 22) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 23) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 43) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
method set_epoch (line 65) | def set_epoch(self, epoch):
function get_loader (line 69) | def get_loader(args):
FILE: Finetune/BTCV/utils/data_utils.py
class Sampler (line 22) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 23) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 43) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
method set_epoch (line 65) | def set_epoch(self, epoch):
function get_loader (line 69) | def get_loader(args):
FILE: Finetune/BTCV/utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
FILE: Finetune/BTCV/val.py
function main (line 78) | def main():
FILE: Finetune/CC-CCII/eval.py
function main (line 83) | def main():
FILE: Finetune/CC-CCII/main.py
function main (line 93) | def main():
function main_worker (line 105) | def main_worker(gpu, args):
function init_log (line 228) | def init_log(name, level=logging.INFO):
FILE: Finetune/CC-CCII/model.py
class Swin (line 12) | class Swin(nn.Module):
method __init__ (line 13) | def __init__(self, args):
method forward (line 136) | def forward(self, x_in):
FILE: Finetune/CC-CCII/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/CC-CCII/trainer.py
function resize (line 27) | def resize(img):
function train_epoch (line 39) | def train_epoch(model, loader, optimizer, scheduler, scaler, epoch, args):
function val_epoch (line 84) | def val_epoch(model, loader, epoch, args):
function save_checkpoint (line 119) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 131) | def run_training(
FILE: Finetune/CC-CCII/utils/data_utils.py
function get_loader (line 24) | def get_loader(args):
class CC_CCII (line 84) | class CC_CCII(torch.utils.data.Dataset):
method __init__ (line 91) | def __init__(self, data=None, transforms=None, augmentation=True, args...
method __getitem__ (line 103) | def __getitem__(self, index):
method __len__ (line 139) | def __len__(self):
FILE: Finetune/CC-CCII/utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
FILE: Finetune/Flare22/inferers.py
function double_sliding_window_inference (line 28) | def double_sliding_window_inference(
function one_hot (line 294) | def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype =...
class GroupName (line 372) | class GroupName(enum.Enum):
function get_transforms_func (line 413) | def get_transforms_func(views: TransformsType,
function get_permute_transform (line 440) | def get_permute_transform(view_src: PermuteType,
function permute_inverse (line 452) | def permute_inverse(xs: Sequence[torch.Tensor],
function permute_rand (line 458) | def permute_rand(
FILE: Finetune/Flare22/main.py
function main (line 112) | def main():
function main_worker (line 124) | def main_worker(gpu, args):
function init_log (line 286) | def init_log(name, level=logging.INFO):
FILE: Finetune/Flare22/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/Flare22/trainer.py
function train_epoch (line 27) | def train_epoch(model, loader, optimizer, scaler, epoch, loss_func, args):
function val_epoch (line 72) | def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, ...
function save_checkpoint (line 121) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 133) | def run_training(
FILE: Finetune/Flare22/utils/data_test.py
class Sampler (line 22) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 23) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 43) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
method set_epoch (line 65) | def set_epoch(self, epoch):
function get_loader (line 69) | def get_loader(args):
FILE: Finetune/Flare22/utils/data_utils.py
class Sampler (line 22) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 23) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 43) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
method set_epoch (line 65) | def set_epoch(self, epoch):
function get_loader (line 69) | def get_loader(args):
FILE: Finetune/Flare22/utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
function color_map (line 81) | def color_map(dataset='pascal'):
function check_dir (line 126) | def check_dir(dir):
FILE: Finetune/Flare22/val.py
function main (line 78) | def main():
FILE: Finetune/MM-WHS/inferers.py
function double_sliding_window_inference (line 28) | def double_sliding_window_inference(
function one_hot (line 294) | def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype =...
class GroupName (line 372) | class GroupName(enum.Enum):
function get_transforms_func (line 413) | def get_transforms_func(views: TransformsType,
function get_permute_transform (line 440) | def get_permute_transform(view_src: PermuteType,
function permute_inverse (line 452) | def permute_inverse(xs: Sequence[torch.Tensor],
function permute_rand (line 458) | def permute_rand(
FILE: Finetune/MM-WHS/main.py
function main (line 109) | def main():
function main_worker (line 121) | def main_worker(gpu, args):
function init_log (line 278) | def init_log(name, level=logging.INFO):
FILE: Finetune/MM-WHS/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/MM-WHS/test.py
function get_test_loader (line 88) | def get_test_loader(args):
function main (line 141) | def main():
FILE: Finetune/MM-WHS/trainer.py
function train_epoch (line 27) | def train_epoch(model, loader, optimizer, scaler, epoch, loss_func, args):
function val_epoch (line 72) | def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, ...
function save_checkpoint (line 121) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 133) | def run_training(
FILE: Finetune/MM-WHS/utils/data_utils.py
class Sampler (line 23) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 24) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 44) | def __iter__(self):
method __len__ (line 63) | def __len__(self):
method set_epoch (line 66) | def set_epoch(self, epoch):
function get_loader (line 70) | def get_loader(args):
class Convert_WHS_label (line 152) | class Convert_WHS_label(MapTransform):
method __call__ (line 154) | def __call__(self, data):
FILE: Finetune/MM-WHS/utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
function color_map (line 81) | def color_map(dataset='pascal'):
function check_dir (line 126) | def check_dir(dir):
function load (line 131) | def load(model, model_dict):
FILE: Finetune/Word/main.py
function main (line 112) | def main():
function main_worker (line 124) | def main_worker(gpu, args):
function init_log (line 286) | def init_log(name, level=logging.INFO):
FILE: Finetune/Word/optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: Finetune/Word/trainer.py
function train_epoch (line 27) | def train_epoch(model, loader, optimizer, scheduler, scaler, epoch, loss...
function val_epoch (line 75) | def val_epoch(model, loader, epoch, acc_func, args, model_inferer=None, ...
function save_checkpoint (line 125) | def save_checkpoint(model, epoch, args, filename="model.pt", best_acc=0,...
function run_training (line 137) | def run_training(
FILE: Finetune/Word/utils/data_utils.py
class Sampler (line 24) | class Sampler(torch.utils.data.Sampler):
method __init__ (line 25) | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True...
method __iter__ (line 45) | def __iter__(self):
method __len__ (line 64) | def __len__(self):
method set_epoch (line 67) | def set_epoch(self, epoch):
function get_loader_word (line 71) | def get_loader_word(args):
FILE: Finetune/Word/utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
function color_map (line 81) | def color_map(dataset='pascal'):
function check_dir (line 126) | def check_dir(dir):
FILE: Finetune/nnUNet/nnunetv2/batch_running/collect_results_custom_Decathlon.py
function collect_results (line 12) | def collect_results(trainers: dict, datasets: List, output_file: str,
function summarize (line 43) | def summarize(input_file, output_file, folds: Tuple[int, ...], configs: ...
FILE: Finetune/nnUNet/nnunetv2/batch_running/generate_lsf_runs_customDecathlon.py
function merge (line 5) | def merge(dict1, dict2):
FILE: Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/collect_results.py
function collect_results (line 12) | def collect_results(trainers: dict, datasets: List, output_file: str,
function summarize (line 43) | def summarize(input_file, output_file, folds: Tuple[int, ...], configs: ...
FILE: Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/generate_lsf_commands.py
function merge (line 5) | def merge(dict1, dict2):
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset027_ACDC.py
function make_out_dirs (line 9) | def make_out_dirs(dataset_id: int, task_name="ACDC"):
function copy_files (line 25) | def copy_files(src_data_folder: Path, train_dir: Path, labels_dir: Path,...
function convert_acdc (line 51) | def convert_acdc(src_data_folder: str, dataset_id=27):
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset114_MNMs.py
function read_csv (line 14) | def read_csv(csv_file: str):
function convert_mnms (line 38) | def convert_mnms(src_data_folder: Path, csv_file_name: str, dataset_id: ...
function save_cardiac_phases (line 64) | def save_cardiac_phases(
function save_extracted_nifti_slice (line 81) | def save_extracted_nifti_slice(image, ed_frame: int, es_frame: int, out_...
function create_custom_splits (line 96) | def create_custom_splits(src_data_folder: Path, csv_file: str, dataset_i...
function get_vendor_split (line 132) | def get_vendor_split(patients: list[str], num_val_patients: int):
class RawTextArgumentDefaultsHelpFormatter (line 142) | class RawTextArgumentDefaultsHelpFormatter(argparse.ArgumentDefaultsHelp...
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py
function copy_files (line 8) | def copy_files(src_data_dir: Path, src_test_dir: Path, train_dir: Path, ...
function convert_emidec (line 28) | def convert_emidec(src_data_dir: str, src_test_dir: str, dataset_id=27):
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset120_RoadSegmentation.py
function load_and_covnert_case (line 14) | def load_and_covnert_case(input_image: str, input_seg: str, output_image...
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset137_BraTS21.py
function copy_BraTS_segmentation_and_convert_labels_to_nnUNet (line 12) | def copy_BraTS_segmentation_and_convert_labels_to_nnUNet(in_file: str, o...
function convert_labels_back_to_BraTS (line 32) | def convert_labels_back_to_BraTS(seg: np.ndarray):
function load_convert_labels_back_to_BraTS (line 40) | def load_convert_labels_back_to_BraTS(filename, input_folder, output_fol...
function convert_folder_with_preds_back_to_BraTS_labeling_convention (line 49) | def convert_folder_with_preds_back_to_BraTS_labeling_convention(input_fo...
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset218_Amos2022_task1.py
function convert_amos_task1 (line 7) | def convert_amos_task1(amos_base_dir: str, nnunet_dataset_id: int = 218):
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset219_Amos2022_task2.py
function convert_amos_task2 (line 7) | def convert_amos_task2(amos_base_dir: str, nnunet_dataset_id: int = 219):
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py
function convert_kits2023 (line 7) | def convert_kits2023(kits_base_dir: str, nnunet_dataset_id: int = 220):
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset221_AutoPETII_2023.py
function convert_autopet (line 7) | def convert_autopet(autopet_base_dir:str = '/media/isensee/My Book1/Auto...
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/convert_MSD_dataset.py
function split_4d_nifti (line 14) | def split_4d_nifti(filename, output_folder):
function convert_msd_dataset (line 41) | def convert_msd_dataset(source_folder: str, overwrite_target_id: Optiona...
function entry_point (line 117) | def entry_point():
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py
function convert (line 8) | def convert(source_folder, target_dataset_name):
function convert_entry_point (line 43) | def convert_entry_point():
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py
function sparsify_segmentation (line 13) | def sparsify_segmentation(seg: np.ndarray, label_manager: LabelManager, ...
FILE: Finetune/nnUNet/nnunetv2/dataset_conversion/generate_dataset_json.py
function generate_dataset_json (line 6) | def generate_dataset_json(output_folder: str,
FILE: Finetune/nnUNet/nnunetv2/ensembling/ensemble.py
function average_probabilities (line 17) | def average_probabilities(list_of_files: List[str]) -> np.ndarray:
function merge_files (line 32) | def merge_files(list_of_files,
function ensemble_folders (line 49) | def ensemble_folders(list_of_input_folders: List[str],
function entry_point_ensemble_folders (line 114) | def entry_point_ensemble_folders():
function ensemble_crossvalidations (line 128) | def ensemble_crossvalidations(list_of_trained_model_folders: List[str],
FILE: Finetune/nnUNet/nnunetv2/evaluation/accumulate_cv_results.py
function accumulate_cv_results (line 12) | def accumulate_cv_results(trained_model_folder,
FILE: Finetune/nnUNet/nnunetv2/evaluation/evaluate_predictions.py
function label_or_region_to_key (line 20) | def label_or_region_to_key(label_or_region: Union[int, Tuple[int]]):
function key_to_label_or_region (line 24) | def key_to_label_or_region(key: str):
function save_summary_json (line 34) | def save_summary_json(results: dict, output_file: str):
function load_summary_json (line 51) | def load_summary_json(filename: str):
function labels_to_list_of_regions (line 63) | def labels_to_list_of_regions(labels: List[int]):
function region_or_label_to_mask (line 67) | def region_or_label_to_mask(segmentation: np.ndarray, region_or_label: U...
function compute_tp_fp_fn_tn (line 77) | def compute_tp_fp_fn_tn(mask_ref: np.ndarray, mask_pred: np.ndarray, ign...
function compute_metrics (line 89) | def compute_metrics(reference_file: str, prediction_file: str, image_rea...
function compute_metrics_on_folder (line 123) | def compute_metrics_on_folder(folder_ref: str, folder_pred: str, output_...
function compute_metrics_on_folder2 (line 179) | def compute_metrics_on_folder2(folder_ref: str, folder_pred: str, datase...
function compute_metrics_on_folder_simple (line 201) | def compute_metrics_on_folder_simple(folder_ref: str, folder_pred: str, ...
function evaluate_folder_entry_point (line 217) | def evaluate_folder_entry_point():
function evaluate_simple_entry_point (line 235) | def evaluate_simple_entry_point():
FILE: Finetune/nnUNet/nnunetv2/evaluation/find_best_configuration.py
function filter_available_models (line 26) | def filter_available_models(model_dict: Union[List[dict], Tuple[dict, .....
function generate_inference_command (line 51) | def generate_inference_command(dataset_name_or_id: Union[int, str], conf...
function find_best_configuration (line 81) | def find_best_configuration(dataset_name_or_id,
function print_inference_instructions (line 214) | def print_inference_instructions(inference_info_dict: dict, instructions...
function dumb_trainer_config_plans_to_trained_models_dict (line 257) | def dumb_trainer_config_plans_to_trained_models_dict(trainers: List[str]...
function find_best_configuration_entry_point (line 271) | def find_best_configuration_entry_point():
function accumulate_crossval_results_entry_point (line 300) | def accumulate_crossval_results_entry_point():
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/fingerprint_extractor.py
class DatasetFingerprintExtractor (line 18) | class DatasetFingerprintExtractor(object):
method __init__ (line 19) | def __init__(self, dataset_name_or_id: Union[str, int], num_processes:...
method collect_foreground_intensities (line 42) | def collect_foreground_intensities(segmentation: np.ndarray, images: n...
method analyze_case (line 83) | def analyze_case(image_files: List[str], segmentation_file: str, reade...
method run (line 107) | def run(self, overwrite_existing: bool = False) -> dict:
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py
class ExperimentPlanner (line 24) | class ExperimentPlanner(object):
method __init__ (line 25) | def __init__(self, dataset_name_or_id: Union[str, int],
method determine_reader_writer (line 82) | def determine_reader_writer(self):
method static_estimate_VRAM_usage (line 88) | def static_estimate_VRAM_usage(patch_size: Tuple[int],
method determine_resampling (line 114) | def determine_resampling(self, *args, **kwargs):
method determine_segmentation_softmax_export_fn (line 138) | def determine_segmentation_softmax_export_fn(self, *args, **kwargs):
method determine_fullres_target_spacing (line 156) | def determine_fullres_target_spacing(self) -> np.ndarray:
method determine_normalization_scheme_and_whether_mask_is_used_for_norm (line 199) | def determine_normalization_scheme_and_whether_mask_is_used_for_norm(s...
method determine_transpose (line 216) | def determine_transpose(self):
method get_plans_for_configuration (line 229) | def get_plans_for_configuration(self,
method plan_experiment (line 371) | def plan_experiment(self):
method save_plans (line 502) | def save_plans(self, plans):
method generate_data_identifier (line 521) | def generate_data_identifier(self, configuration_name: str) -> str:
method load_plans (line 529) | def load_plans(self, fname: str):
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/network_topology.py
function get_shape_must_be_divisible_by (line 5) | def get_shape_must_be_divisible_by(net_numpool_per_axis):
function pad_shape (line 9) | def pad_shape(shape, must_be_divisible_by):
function get_pool_and_conv_props (line 30) | def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, m...
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/resencUNet_planner.py
class ResEncUNetPlanner (line 9) | class ResEncUNetPlanner(ExperimentPlanner):
method __init__ (line 10) | def __init__(self, dataset_name_or_id: Union[str, int],
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/plan_and_preprocess_api.py
function extract_fingerprint_dataset (line 18) | def extract_fingerprint_dataset(dataset_id: int,
function extract_fingerprints (line 36) | def extract_fingerprints(dataset_ids: List[int], fingerprint_extractor_c...
function plan_experiment_dataset (line 51) | def plan_experiment_dataset(dataset_id: int,
function plan_experiments (line 72) | def plan_experiments(dataset_ids: List[int], experiment_planner_class_na...
function preprocess_dataset (line 87) | def preprocess_dataset(dataset_id: int,
function preprocess (line 132) | def preprocess(dataset_ids: List[int],
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py
function extract_fingerprint_entry (line 5) | def extract_fingerprint_entry():
function plan_experiment_entry (line 30) | def plan_experiment_entry():
function preprocess_entry (line 69) | def preprocess_entry():
function plan_and_preprocess_entry (line 109) | def plan_and_preprocess_entry():
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/plans_for_pretraining/move_plans_between_datasets.py
function move_plans_between_datasets (line 13) | def move_plans_between_datasets(
function entry_point_move_plans_between_datasets (line 65) | def entry_point_move_plans_between_datasets():
FILE: Finetune/nnUNet/nnunetv2/experiment_planning/verify_dataset_integrity.py
function verify_labels (line 32) | def verify_labels(label_file: str, readerclass: Type[BaseReaderWriter], ...
function check_cases (line 47) | def check_cases(image_files: List[str], label_file: str, expected_num_ch...
function verify_dataset_integrity (line 119) | def verify_dataset_integrity(folder: str, num_processes: int = 8) -> None:
FILE: Finetune/nnUNet/nnunetv2/imageio/base_reader_writer.py
class BaseReaderWriter (line 21) | class BaseReaderWriter(ABC):
method _check_all_same (line 23) | def _check_all_same(input_list):
method _check_all_same_array (line 31) | def _check_all_same_array(input_list):
method read_images (line 39) | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]])...
method read_seg (line 72) | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
method write_seg (line 89) | def write_seg(self, seg: np.ndarray, output_fname: str, properties: di...
FILE: Finetune/nnUNet/nnunetv2/imageio/natural_image_reader_writer.py
class NaturalImage2DIO (line 22) | class NaturalImage2DIO(BaseReaderWriter):
method read_images (line 36) | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]])...
method read_seg (line 61) | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
method write_seg (line 64) | def write_seg(self, seg: np.ndarray, output_fname: str, properties: di...
FILE: Finetune/nnUNet/nnunetv2/imageio/nibabel_reader_writer.py
class NibabelIO (line 24) | class NibabelIO(BaseReaderWriter):
method read_images (line 37) | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]])...
method read_seg (line 90) | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
method write_seg (line 93) | def write_seg(self, seg: np.ndarray, output_fname: str, properties: di...
class NibabelIOWithReorient (line 100) | class NibabelIOWithReorient(BaseReaderWriter):
method read_images (line 115) | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]])...
method read_seg (line 173) | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
method write_seg (line 176) | def write_seg(self, seg: np.ndarray, output_fname: str, properties: di...
FILE: Finetune/nnUNet/nnunetv2/imageio/reader_writer_registry.py
function determine_reader_writer_from_dataset_json (line 23) | def determine_reader_writer_from_dataset_json(dataset_json_content: dict...
function determine_reader_writer_from_file_ending (line 41) | def determine_reader_writer_from_file_ending(file_ending: str, example_f...
function recursive_find_reader_writer_by_name (line 73) | def recursive_find_reader_writer_by_name(rw_class_name: str) -> Type[Bas...
FILE: Finetune/nnUNet/nnunetv2/imageio/simpleitk_reader_writer.py
class SimpleITKIO (line 22) | class SimpleITKIO(BaseReaderWriter):
method read_images (line 29) | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]])...
method read_seg (line 114) | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
method write_seg (line 117) | def write_seg(self, seg: np.ndarray, output_fname: str, properties: di...
FILE: Finetune/nnUNet/nnunetv2/imageio/tif_reader_writer.py
class Tiff3DIO (line 23) | class Tiff3DIO(BaseReaderWriter):
method read_images (line 38) | def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]])...
method write_seg (line 71) | def write_seg(self, seg: np.ndarray, output_fname: str, properties: di...
method read_seg (line 79) | def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
FILE: Finetune/nnUNet/nnunetv2/inference/data_iterators.py
function preprocess_fromfiles_save_to_queue (line 17) | def preprocess_fromfiles_save_to_queue(list_of_lists: List[List[str]],
function preprocessing_iterator_fromfiles (line 60) | def preprocessing_iterator_fromfiles(list_of_lists: List[List[str]],
class PreprocessAdapter (line 119) | class PreprocessAdapter(DataLoader):
method __init__ (line 120) | def __init__(self, list_of_lists: List[List[str]],
method generate_train_batch (line 145) | def generate_train_batch(self):
class PreprocessAdapterFromNpy (line 165) | class PreprocessAdapterFromNpy(DataLoader):
method __init__ (line 166) | def __init__(self, list_of_images: List[np.ndarray],
method generate_train_batch (line 191) | def generate_train_batch(self):
function preprocess_fromnpy_save_to_queue (line 213) | def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray],
function preprocessing_iterator_fromnpy (line 258) | def preprocessing_iterator_fromnpy(list_of_images: List[np.ndarray],
FILE: Finetune/nnUNet/nnunetv2/inference/examples.py
function my_iterator (line 89) | def my_iterator(list_of_input_arrs, list_of_input_props):
FILE: Finetune/nnUNet/nnunetv2/inference/export_prediction.py
function convert_predicted_logits_to_segmentation_with_correct_shape (line 15) | def convert_predicted_logits_to_segmentation_with_correct_shape(predicte...
function export_prediction_from_logits (line 71) | def export_prediction_from_logits(predicted_array_or_file: Union[np.ndar...
function resample_and_save (line 109) | def resample_and_save(predicted: Union[torch.Tensor, np.ndarray], target...
FILE: Finetune/nnUNet/nnunetv2/inference/predict_from_raw_data.py
class nnUNetPredictor (line 38) | class nnUNetPredictor(object):
method __init__ (line 39) | def __init__(self,
method initialize_from_trained_model_folder (line 68) | def initialize_from_trained_model_folder(self, model_training_output_d...
method manual_initialization (line 117) | def manual_initialization(self, network: nn.Module, plans_manager: Pla...
method auto_detect_available_folds (line 142) | def auto_detect_available_folds(model_training_output_dir, checkpoint_...
method _manage_input_and_output_lists (line 151) | def _manage_input_and_output_lists(self, list_of_lists_or_source_folde...
method predict_from_files (line 191) | def predict_from_files(self,
method _internal_get_data_iterator_from_lists_of_filenames (line 252) | def _internal_get_data_iterator_from_lists_of_filenames(self,
method get_data_iterator_from_raw_npy_data (line 275) | def get_data_iterator_from_raw_npy_data(self,
method predict_from_list_of_npy_arrays (line 314) | def predict_from_list_of_npy_arrays(self,
method predict_from_data_iterator (line 332) | def predict_from_data_iterator(self,
method predict_single_npy_array (line 412) | def predict_single_npy_array(self, input_image: np.ndarray, image_prop...
method predict_logits_from_preprocessed_data (line 449) | def predict_logits_from_preprocessed_data(self, data: torch.Tensor) ->...
method _internal_get_sliding_window_slicers (line 509) | def _internal_get_sliding_window_slicers(self, image_size: Tuple[int, ...
method _internal_maybe_mirror_and_predict (line 543) | def _internal_maybe_mirror_and_predict(self, x: torch.Tensor) -> torch...
method predict_sliding_window_return_logits (line 560) | def predict_sliding_window_return_logits(self, input_image: torch.Tens...
function predict_entry_point_modelfolder (line 639) | def predict_entry_point_modelfolder():
function predict_entry_point (line 728) | def predict_entry_point():
FILE: Finetune/nnUNet/nnunetv2/inference/sliding_window_prediction.py
function compute_gaussian (line 11) | def compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma...
function compute_steps_for_sliding_window (line 32) | def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_s...
FILE: Finetune/nnUNet/nnunetv2/model_sharing/entry_points.py
function print_license_warning (line 6) | def print_license_warning():
function download_by_url (line 18) | def download_by_url():
function install_from_zip_entry_point (line 31) | def install_from_zip_entry_point():
function export_pretrained_model_entry (line 41) | def export_pretrained_model_entry():
FILE: Finetune/nnUNet/nnunetv2/model_sharing/model_download.py
function download_and_install_from_url (line 11) | def download_and_install_from_url(url):
function download_file (line 37) | def download_file(url: str, local_filename: str, chunk_size: Optional[in...
FILE: Finetune/nnUNet/nnunetv2/model_sharing/model_export.py
function export_pretrained_model (line 6) | def export_pretrained_model(dataset_name_or_id: Union[int, str], output_...
FILE: Finetune/nnUNet/nnunetv2/model_sharing/model_import.py
function install_model_from_zip_file (line 6) | def install_model_from_zip_file(zip_file: str):
FILE: Finetune/nnUNet/nnunetv2/postprocessing/remove_connected_components.py
function remove_all_but_largest_component_from_segmentation (line 22) | def remove_all_but_largest_component_from_segmentation(segmentation: np....
function apply_postprocessing (line 37) | def apply_postprocessing(segmentation: np.ndarray, pp_fns: List[Callable...
function load_postprocess_save (line 43) | def load_postprocess_save(segmentation_file: str,
function determine_postprocessing (line 53) | def determine_postprocessing(folder_predictions: str,
function apply_postprocessing_to_folder (line 248) | def apply_postprocessing_to_folder(input_folder: str,
function entry_point_determine_postprocessing_folder (line 298) | def entry_point_determine_postprocessing_folder():
function entry_point_apply_postprocessing (line 318) | def entry_point_apply_postprocessing():
FILE: Finetune/nnUNet/nnunetv2/preprocessing/cropping/cropping.py
function create_nonzero_mask (line 8) | def create_nonzero_mask(data):
function crop_to_nonzero (line 24) | def crop_to_nonzero(data, seg=None, nonzero_label=-1):
FILE: Finetune/nnUNet/nnunetv2/preprocessing/normalization/default_normalization_schemes.py
class ImageNormalization (line 8) | class ImageNormalization(ABC):
method __init__ (line 11) | def __init__(self, use_mask_for_norm: bool = None, intensityproperties...
method run (line 20) | def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
class ZScoreNormalization (line 27) | class ZScoreNormalization(ImageNormalization):
method run (line 30) | def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
class CTNormalization (line 52) | class CTNormalization(ImageNormalization):
method run (line 55) | def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
class NoNormalization (line 67) | class NoNormalization(ImageNormalization):
method run (line 70) | def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
class RescaleTo01Normalization (line 74) | class RescaleTo01Normalization(ImageNormalization):
method run (line 77) | def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
class RGBTo01Normalization (line 84) | class RGBTo01Normalization(ImageNormalization):
method run (line 87) | def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
FILE: Finetune/nnUNet/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py
function get_normalization_scheme (line 15) | def get_normalization_scheme(channel_name: str) -> Type[ImageNormalizati...
FILE: Finetune/nnUNet/nnunetv2/preprocessing/preprocessors/default_preprocessor.py
class DefaultPreprocessor (line 33) | class DefaultPreprocessor(object):
method __init__ (line 34) | def __init__(self, verbose: bool = True):
method run_case_npy (line 40) | def run_case_npy(self, data: np.ndarray, seg: Union[np.ndarray, None],...
method run_case (line 115) | def run_case(self, image_files: List[str], seg_file: Union[str, None],...
method run_case_save (line 143) | def run_case_save(self, output_filename_truncated: str, image_files: L...
method _sample_foreground_locations (line 152) | def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: ...
method _normalize (line 180) | def _normalize(self, data: np.ndarray, seg: np.ndarray, configuration_...
method run (line 194) | def run(self, dataset_name_or_id: Union[int, str], configuration_name:...
method modify_seg_fn (line 259) | def modify_seg_fn(self, seg: np.ndarray, plans_manager: PlansManager, ...
function example_test_case_preprocessing (line 267) | def example_test_case_preprocessing():
FILE: Finetune/nnUNet/nnunetv2/preprocessing/resampling/default_resampling.py
function get_do_separate_z (line 13) | def get_do_separate_z(spacing: Union[Tuple[float, ...], List[float], np....
function get_lowres_axis (line 18) | def get_lowres_axis(new_spacing: Union[Tuple[float, ...], List[float], n...
function compute_new_shape (line 23) | def compute_new_shape(old_shape: Union[Tuple[int, ...], List[int], np.nd...
function resample_data_or_seg_to_spacing (line 32) | def resample_data_or_seg_to_spacing(data: np.ndarray,
function resample_data_or_seg_to_shape (line 77) | def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray],
function resample_data_or_seg (line 125) | def resample_data_or_seg(data: np.ndarray, new_shape: Union[Tuple[float,...
FILE: Finetune/nnUNet/nnunetv2/preprocessing/resampling/utils.py
function recursive_find_resampling_fn_by_name (line 8) | def recursive_find_resampling_fn_by_name(resampling_fn: str) -> Callable:
FILE: Finetune/nnUNet/nnunetv2/run/load_pretrained_weights.py
function load_pretrained_weights (line 6) | def load_pretrained_weights(network, fname, verbose=False):
FILE: Finetune/nnUNet/nnunetv2/run/run_training.py
function find_free_network_port (line 18) | def find_free_network_port() -> int:
function get_trainer_from_args (line 31) | def get_trainer_from_args(dataset_name_or_id: Union[int, str],
function maybe_load_checkpoint (line 70) | def maybe_load_checkpoint(nnunet_trainer: nnUNetTrainer, continue_traini...
function setup_ddp (line 101) | def setup_ddp(rank, world_size):
function cleanup_ddp (line 106) | def cleanup_ddp():
function run_ddp (line 110) | def run_ddp(rank, dataset_name_or_id, configuration, fold, tr, p, use_co...
function run_training (line 138) | def run_training(dataset_name_or_id: Union[str, int],
function run_training_entry (line 211) | def run_training_entry():
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/compute_initial_patch_size.py
function get_patch_size (line 4) | def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/cascade_transforms.py
class MoveSegAsOneHotToData (line 10) | class MoveSegAsOneHotToData(AbstractTransform):
method __init__ (line 11) | def __init__(self, index_in_origin: int, all_labels: Union[Tuple[int, ...
method __call__ (line 23) | def __call__(self, **data_dict):
class RemoveRandomConnectedComponentFromOneHotEncodingTransform (line 40) | class RemoveRandomConnectedComponentFromOneHotEncodingTransform(Abstract...
method __init__ (line 41) | def __init__(self, channel_idx: Union[int, List[int]], key: str = "dat...
method __call__ (line 58) | def __call__(self, **data_dict):
class ApplyRandomBinaryOperatorTransform (line 88) | class ApplyRandomBinaryOperatorTransform(AbstractTransform):
method __init__ (line 89) | def __init__(self,
method __call__ (line 111) | def __call__(self, **data_dict):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py
class DownsampleSegForDSTransform2 (line 8) | class DownsampleSegForDSTransform2(AbstractTransform):
method __init__ (line 12) | def __init__(self, ds_scales: Union[List, Tuple],
method __call__ (line 27) | def __call__(self, **data_dict):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py
class LimitedLenWrapper (line 4) | class LimitedLenWrapper(NonDetMultiThreadedAugmenter):
method __init__ (line 5) | def __init__(self, my_imaginary_length, *args, **kwargs):
method __len__ (line 9) | def __len__(self):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py
class RemoveKeyTransform (line 4) | class RemoveKeyTransform(AbstractTransform):
method __init__ (line 5) | def __init__(self, key_to_remove: str):
method __call__ (line 8) | def __call__(self, **data_dict):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/masking.py
class MaskTransform (line 6) | class MaskTransform(AbstractTransform):
method __init__ (line 7) | def __init__(self, apply_to_channels: List[int], mask_idx_in_seg: int ...
method __call__ (line 18) | def __call__(self, **data_dict):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py
class ConvertSegmentationToRegionsTransform (line 7) | class ConvertSegmentationToRegionsTransform(AbstractTransform):
method __init__ (line 8) | def __init__(self, regions: Union[List, Tuple],
method __call__ (line 23) | def __call__(self, **data_dict):
FILE: Finetune/nnUNet/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py
class Convert3DTo2DTransform (line 6) | class Convert3DTo2DTransform(AbstractTransform):
method __init__ (line 7) | def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('dat...
method __call__ (line 13) | def __call__(self, **data_dict):
class Convert2DTo3DTransform (line 26) | class Convert2DTo3DTransform(AbstractTransform):
method __init__ (line 27) | def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('dat...
method __call__ (line 33) | def __call__(self, **data_dict):
FILE: Finetune/nnUNet/nnunetv2/training/dataloading/base_data_loader.py
class nnUNetDataLoaderBase (line 10) | class nnUNetDataLoaderBase(DataLoader):
method __init__ (line 11) | def __init__(self,
method _oversample_last_XX_percent (line 45) | def _oversample_last_XX_percent(self, sample_idx: int) -> bool:
method _probabilistic_oversampling (line 51) | def _probabilistic_oversampling(self, sample_idx: int) -> bool:
method determine_shapes (line 55) | def determine_shapes(self):
method get_bbox (line 64) | def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locat...
FILE: Finetune/nnUNet/nnunetv2/training/dataloading/data_loader_2d.py
class nnUNetDataLoader2D (line 6) | class nnUNetDataLoader2D(nnUNetDataLoaderBase):
method generate_train_batch (line 7) | def generate_train_batch(self):
FILE: Finetune/nnUNet/nnunetv2/training/dataloading/data_loader_3d.py
class nnUNetDataLoader3D (line 6) | class nnUNetDataLoader3D(nnUNetDataLoaderBase):
method generate_train_batch (line 7) | def generate_train_batch(self):
FILE: Finetune/nnUNet/nnunetv2/training/dataloading/nnunet_dataset.py
class nnUNetDataset (line 11) | class nnUNetDataset(object):
method __init__ (line 12) | def __init__(self, folder: str, case_identifiers: List[str] = None,
method __getitem__ (line 59) | def __getitem__(self, key):
method __setitem__ (line 65) | def __setitem__(self, key, value):
method keys (line 68) | def keys(self):
method __len__ (line 71) | def __len__(self):
method items (line 74) | def items(self):
method values (line 77) | def values(self):
method load_case (line 80) | def load_case(self, key):
FILE: Finetune/nnUNet/nnunetv2/training/dataloading/utils.py
function find_broken_image_and_labels (line 13) | def find_broken_image_and_labels(
function try_fix_broken_npy (line 42) | def try_fix_broken_npy(path_do_data_dir: Path, case_ids: set[str], fix_i...
function verify_or_stratify_npys (line 68) | def verify_or_stratify_npys(path_to_data_dir: str | Path) -> None:
function _convert_to_npy (line 91) | def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, ove...
function unpack_dataset (line 106) | def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwr...
function get_case_identifiers (line 119) | def get_case_identifiers(folder: str) -> List[str]:
FILE: Finetune/nnUNet/nnunetv2/training/logging/nnunet_logger.py
class nnUNetLogger (line 9) | class nnUNetLogger(object):
method __init__ (line 17) | def __init__(self, verbose: bool = False):
method log (line 31) | def log(self, key, value, epoch: int):
method plot_progress_png (line 54) | def plot_progress_png(self, output_folder):
method get_checkpoint (line 99) | def get_checkpoint(self):
method load_checkpoint (line 102) | def load_checkpoint(self, checkpoint: dict):
FILE: Finetune/nnUNet/nnunetv2/training/loss/compound_losses.py
class DC_and_CE_loss (line 8) | class DC_and_CE_loss(nn.Module):
method __init__ (line 9) | def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_di...
method forward (line 31) | def forward(self, net_output: torch.Tensor, target: torch.Tensor):
class DC_and_BCE_loss (line 59) | class DC_and_BCE_loss(nn.Module):
method __init__ (line 60) | def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_d...
method forward (line 83) | def forward(self, net_output: torch.Tensor, target: torch.Tensor):
class DC_and_topk_loss (line 102) | class DC_and_topk_loss(nn.Module):
method __init__ (line 103) | def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_di...
method forward (line 124) | def forward(self, net_output: torch.Tensor, target: torch.Tensor):
FILE: Finetune/nnUNet/nnunetv2/training/loss/deep_supervision.py
class DeepSupervisionWrapper (line 5) | class DeepSupervisionWrapper(nn.Module):
method __init__ (line 6) | def __init__(self, loss, weight_factors=None):
method forward (line 19) | def forward(self, *args):
FILE: Finetune/nnUNet/nnunetv2/training/loss/dice.py
class SoftDiceLoss (line 8) | class SoftDiceLoss(nn.Module):
method __init__ (line 9) | def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = F...
method forward (line 22) | def forward(self, x, y, loss_mask=None):
class MemoryEfficientSoftDiceLoss (line 58) | class MemoryEfficientSoftDiceLoss(nn.Module):
method __init__ (line 59) | def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = F...
method forward (line 72) | def forward(self, x, y, loss_mask=None):
function get_tp_fp_fn_tn (line 122) | def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
FILE: Finetune/nnUNet/nnunetv2/training/loss/robust_ce_loss.py
class RobustCrossEntropyLoss (line 6) | class RobustCrossEntropyLoss(nn.CrossEntropyLoss):
method forward (line 12) | def forward(self, input: Tensor, target: Tensor) -> Tensor:
class TopKLoss (line 19) | class TopKLoss(RobustCrossEntropyLoss):
method __init__ (line 23) | def __init__(self, weight=None, ignore_index: int = -100, k: float = 1...
method forward (line 27) | def forward(self, inp, target):
FILE: Finetune/nnUNet/nnunetv2/training/lr_scheduler/polylr.py
class PolyLRScheduler (line 4) | class PolyLRScheduler(_LRScheduler):
method __init__ (line 5) | def __init__(self, optimizer, initial_lr: float, max_steps: int, expon...
method step (line 13) | def step(self, current_step=None):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py
class nnUNetTrainer (line 67) | class nnUNetTrainer(object):
method __init__ (line 68) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method initialize (line 200) | def initialize(self):
method _do_i_compile (line 229) | def _do_i_compile(self):
method _save_debug_information (line 232) | def _save_debug_information(self):
method build_network_architecture (line 268) | def build_network_architecture(plans_manager: PlansManager,
method _get_deep_supervision_scales (line 295) | def _get_deep_supervision_scales(self):
method _set_batch_size_and_oversample (line 303) | def _set_batch_size_and_oversample(self):
method _build_loss (line 350) | def _build_loss(self):
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 376) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
method print_to_log_file (line 433) | def print_to_log_file(self, *args, also_print_to_console=True, add_tim...
method print_plans (line 461) | def print_plans(self):
method configure_optimizers (line 470) | def configure_optimizers(self):
method plot_network_architecture (line 476) | def plot_network_architecture(self):
method do_split (line 514) | def do_split(self):
method get_tr_and_val_datasets (line 578) | def get_tr_and_val_datasets(self):
method get_dataloaders (line 592) | def get_dataloaders(self):
method get_plain_dataloaders (line 643) | def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], d...
method get_training_transforms (line 675) | def get_training_transforms(
method get_validation_transforms (line 769) | def get_validation_transforms(
method set_deep_supervision_enabled (line 798) | def set_deep_supervision_enabled(self, enabled: bool):
method on_train_start (line 808) | def on_train_start(self):
method on_train_end (line 850) | def on_train_end(self):
method on_train_epoch_start (line 874) | def on_train_epoch_start(self):
method train_step (line 884) | def train_step(self, batch: dict) -> dict:
method on_train_epoch_end (line 916) | def on_train_epoch_end(self, train_outputs: List[dict]):
method on_validation_epoch_start (line 928) | def on_validation_epoch_start(self):
method validation_step (line 931) | def validation_step(self, batch: dict) -> dict:
method on_validation_epoch_end (line 995) | def on_validation_epoch_end(self, val_outputs: List[dict]):
method on_epoch_start (line 1028) | def on_epoch_start(self):
method on_epoch_end (line 1031) | def on_epoch_end(self):
method save_checkpoint (line 1057) | def save_checkpoint(self, filename: str) -> None:
method load_checkpoint (line 1082) | def load_checkpoint(self, filename_or_checkpoint: Union[dict, str]) ->...
method perform_actual_validation (line 1120) | def perform_actual_validation(self, save_probabilities: bool = False):
method run_training (line 1249) | def run_training(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/nnUNetTrainer_swin.py
class nnUNetTrainer_swin (line 12) | class nnUNetTrainer_swin(nnUNetTrainer):
method __init__ (line 13) | def __init__(
method build_network_architecture (line 27) | def build_network_architecture(plans_manager: PlansManager,
method set_deep_supervision_enabled (line 66) | def set_deep_supervision_enabled(self, enabled: bool):
class nnUNetTrainer_swin_pre (line 70) | class nnUNetTrainer_swin_pre(nnUNetTrainer):
method __init__ (line 71) | def __init__(
method build_network_architecture (line 85) | def build_network_architecture(plans_manager: PlansManager,
method set_deep_supervision_enabled (line 149) | def set_deep_supervision_enabled(self, enabled: bool):
function delete_patch_embed (line 153) | def delete_patch_embed(state_dict):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs.py
class nnUNetTrainerBenchmark_5epochs (line 8) | class nnUNetTrainerBenchmark_5epochs(nnUNetTrainer):
method __init__ (line 9) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method perform_actual_validation (line 18) | def perform_actual_validation(self, save_probabilities: bool = False):
method save_checkpoint (line 21) | def save_checkpoint(self, filename: str) -> None:
method run_training (line 25) | def run_training(self):
method on_train_end (line 31) | def on_train_end(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py
class nnUNetTrainerBenchmark_5epochs_noDataLoading (line 9) | class nnUNetTrainerBenchmark_5epochs_noDataLoading(nnUNetTrainerBenchmar...
method __init__ (line 10) | def __init__(
method get_dataloaders (line 38) | def get_dataloaders(self):
method run_training (line 41) | def run_training(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDA5.py
class nnUNetTrainerDA5 (line 35) | class nnUNetTrainerDA5(nnUNetTrainer):
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 36) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
method get_training_transforms (line 94) | def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]],
class nnUNetTrainerDA5ord0 (line 308) | class nnUNetTrainerDA5ord0(nnUNetTrainerDA5):
method get_dataloaders (line 309) | def get_dataloaders(self):
function _brightnessadditive_localgamma_transform_scale (line 357) | def _brightnessadditive_localgamma_transform_scale(x, y):
function _brightness_gradient_additive_max_strength (line 361) | def _brightness_gradient_additive_max_strength(_x, _y):
function _local_gamma_gamma (line 365) | def _local_gamma_gamma():
class nnUNetTrainerDA5Segord0 (line 369) | class nnUNetTrainerDA5Segord0(nnUNetTrainerDA5):
method get_dataloaders (line 370) | def get_dataloaders(self):
class nnUNetTrainerDA5_10epochs (line 418) | class nnUNetTrainerDA5_10epochs(nnUNetTrainerDA5):
method __init__ (line 419) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDAOrd0.py
class nnUNetTrainerDAOrd0 (line 9) | class nnUNetTrainerDAOrd0(nnUNetTrainer):
method get_dataloaders (line 10) | def get_dataloaders(self):
class nnUNetTrainer_DASegOrd0 (line 58) | class nnUNetTrainer_DASegOrd0(nnUNetTrainer):
method get_dataloaders (line 59) | def get_dataloaders(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py
class nnUNetTrainerNoDA (line 9) | class nnUNetTrainerNoDA(nnUNetTrainer):
method get_training_transforms (line 11) | def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]],
method get_plain_dataloaders (line 27) | def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], d...
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 33) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py
class nnUNetTrainerNoMirroring (line 4) | class nnUNetTrainerNoMirroring(nnUNetTrainer):
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 5) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
class nnUNetTrainer_onlyMirror01 (line 13) | class nnUNetTrainer_onlyMirror01(nnUNetTrainer):
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 17) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py
class nnUNetTrainerCELoss (line 8) | class nnUNetTrainerCELoss(nnUNetTrainer):
method _build_loss (line 9) | def _build_loss(self):
class nnUNetTrainerCELoss_5epochs (line 29) | class nnUNetTrainerCELoss_5epochs(nnUNetTrainerCELoss):
method __init__ (line 30) | def __init__(
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py
class nnUNetTrainerDiceLoss (line 11) | class nnUNetTrainerDiceLoss(nnUNetTrainer):
method _build_loss (line 12) | def _build_loss(self):
class nnUNetTrainerDiceCELoss_noSmooth (line 32) | class nnUNetTrainerDiceCELoss_noSmooth(nnUNetTrainer):
method _build_loss (line 33) | def _build_loss(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerTopkLoss.py
class nnUNetTrainerTopk10Loss (line 8) | class nnUNetTrainerTopk10Loss(nnUNetTrainer):
method _build_loss (line 9) | def _build_loss(self):
class nnUNetTrainerTopk10LossLS01 (line 30) | class nnUNetTrainerTopk10LossLS01(nnUNetTrainer):
method _build_loss (line 31) | def _build_loss(self):
class nnUNetTrainerDiceTopK10Loss (line 54) | class nnUNetTrainerDiceTopK10Loss(nnUNetTrainer):
method _build_loss (line 55) | def _build_loss(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py
class nnUNetTrainerCosAnneal (line 7) | class nnUNetTrainerCosAnneal(nnUNetTrainer):
method configure_optimizers (line 8) | def configure_optimizers(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerBN.py
class nnUNetTrainerBN (line 9) | class nnUNetTrainerBN(nnUNetTrainer):
method build_network_architecture (line 11) | def build_network_architecture(plans_manager: PlansManager,
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
class nnUNetTrainerNoDeepSupervision (line 5) | class nnUNetTrainerNoDeepSupervision(nnUNetTrainer):
method __init__ (line 6) | def __init__(
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py
class nnUNetTrainerAdam (line 8) | class nnUNetTrainerAdam(nnUNetTrainer):
method configure_optimizers (line 9) | def configure_optimizers(self):
class nnUNetTrainerVanillaAdam (line 20) | class nnUNetTrainerVanillaAdam(nnUNetTrainer):
method configure_optimizers (line 21) | def configure_optimizers(self):
class nnUNetTrainerVanillaAdam1en3 (line 31) | class nnUNetTrainerVanillaAdam1en3(nnUNetTrainerVanillaAdam):
method __init__ (line 32) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainerVanillaAdam3en4 (line 38) | class nnUNetTrainerVanillaAdam3en4(nnUNetTrainerVanillaAdam):
method __init__ (line 40) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainerAdam1en3 (line 46) | class nnUNetTrainerAdam1en3(nnUNetTrainerAdam):
method __init__ (line 47) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainerAdam3en4 (line 53) | class nnUNetTrainerAdam3en4(nnUNetTrainerAdam):
method __init__ (line 55) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdan.py
class nnUNetTrainerAdan (line 12) | class nnUNetTrainerAdan(nnUNetTrainer):
method configure_optimizers (line 13) | def configure_optimizers(self):
class nnUNetTrainerAdan1en3 (line 26) | class nnUNetTrainerAdan1en3(nnUNetTrainerAdan):
method __init__ (line 27) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainerAdan3en4 (line 33) | class nnUNetTrainerAdan3en4(nnUNetTrainerAdan):
method __init__ (line 35) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainerAdan1en1 (line 41) | class nnUNetTrainerAdan1en1(nnUNetTrainerAdan):
method __init__ (line 43) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainerAdanCosAnneal (line 49) | class nnUNetTrainerAdanCosAnneal(nnUNetTrainerAdan):
method configure_optimizers (line 55) | def configure_optimizers(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/sampling/nnUNetTrainer_probabilisticOversampling.py
class nnUNetTrainer_probabilisticOversampling (line 11) | class nnUNetTrainer_probabilisticOversampling(nnUNetTrainer):
method __init__ (line 19) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method get_plain_dataloaders (line 27) | def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], d...
class nnUNetTrainer_probabilisticOversampling_033 (line 63) | class nnUNetTrainer_probabilisticOversampling_033(nnUNetTrainer_probabil...
method __init__ (line 64) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_probabilisticOversampling_010 (line 70) | class nnUNetTrainer_probabilisticOversampling_010(nnUNetTrainer_probabil...
method __init__ (line 71) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs.py
class nnUNetTrainer_5epochs (line 6) | class nnUNetTrainer_5epochs(nnUNetTrainer):
method __init__ (line 7) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_1epoch (line 14) | class nnUNetTrainer_1epoch(nnUNetTrainer):
method __init__ (line 15) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_10epochs (line 22) | class nnUNetTrainer_10epochs(nnUNetTrainer):
method __init__ (line 23) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_20epochs (line 30) | class nnUNetTrainer_20epochs(nnUNetTrainer):
method __init__ (line 31) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_50epochs (line 37) | class nnUNetTrainer_50epochs(nnUNetTrainer):
method __init__ (line 38) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_100epochs (line 44) | class nnUNetTrainer_100epochs(nnUNetTrainer):
method __init__ (line 45) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_250epochs (line 51) | class nnUNetTrainer_250epochs(nnUNetTrainer):
method __init__ (line 52) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_2000epochs (line 58) | class nnUNetTrainer_2000epochs(nnUNetTrainer):
method __init__ (line 59) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_4000epochs (line 65) | class nnUNetTrainer_4000epochs(nnUNetTrainer):
method __init__ (line 66) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
class nnUNetTrainer_8000epochs (line 72) | class nnUNetTrainer_8000epochs(nnUNetTrainer):
method __init__ (line 73) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs_NoMirroring.py
class nnUNetTrainer_250epochs_NoMirroring (line 6) | class nnUNetTrainer_250epochs_NoMirroring(nnUNetTrainer):
method __init__ (line 7) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 12) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
class nnUNetTrainer_2000epochs_NoMirroring (line 20) | class nnUNetTrainer_2000epochs_NoMirroring(nnUNetTrainer):
method __init__ (line 21) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 26) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
class nnUNetTrainer_4000epochs_NoMirroring (line 34) | class nnUNetTrainer_4000epochs_NoMirroring(nnUNetTrainer):
method __init__ (line 35) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 40) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
class nnUNetTrainer_8000epochs_NoMirroring (line 48) | class nnUNetTrainer_8000epochs_NoMirroring(nnUNetTrainer):
method __init__ (line 49) | def __init__(self, plans: dict, configuration: str, fold: int, dataset...
method configure_rotation_dummyDA_mirroring_and_inital_patch_size (line 54) | def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
FILE: Finetune/nnUNet/nnunetv2/training/nnUNetTrainer/vit.py
class Swin (line 23) | class Swin(nn.Module):
method __init__ (line 24) | def __init__(self, input_channels: int,
method forward (line 153) | def forward(self, x_in):
method compute_conv_feature_map_size (line 170) | def compute_conv_feature_map_size(self, input_size):
FILE: Finetune/nnUNet/nnunetv2/utilities/collate_outputs.py
function collate_outputs (line 6) | def collate_outputs(outputs: List[dict]):
FILE: Finetune/nnUNet/nnunetv2/utilities/dataset_name_id_conversion.py
function find_candidate_datasets (line 21) | def find_candidate_datasets(dataset_id: int):
function convert_id_to_dataset_name (line 42) | def convert_id_to_dataset_name(dataset_id: int):
function convert_dataset_name_to_id (line 58) | def convert_dataset_name_to_id(dataset_name: str):
function maybe_convert_to_dataset_name (line 64) | def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -...
FILE: Finetune/nnUNet/nnunetv2/utilities/ddp_allgather.py
function print_if_rank0 (line 20) | def print_if_rank0(*args):
class AllGatherGrad (line 25) | class AllGatherGrad(torch.autograd.Function):
method forward (line 28) | def forward(
method backward (line 43) | def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tens...
FILE: Finetune/nnUNet/nnunetv2/utilities/default_n_proc_DA.py
function get_allowed_n_proc_DA (line 5) | def get_allowed_n_proc_DA():
FILE: Finetune/nnUNet/nnunetv2/utilities/file_path_utilities.py
function convert_trainer_plans_config_to_identifier (line 11) | def convert_trainer_plans_config_to_identifier(trainer_name, plans_ident...
function convert_identifier_to_trainer_plans_config (line 15) | def convert_identifier_to_trainer_plans_config(identifier: str):
function get_output_folder (line 19) | def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name:...
function parse_dataset_trainer_plans_configuration_from_path (line 29) | def parse_dataset_trainer_plans_configuration_from_path(path: str):
function get_ensemble_name (line 60) | def get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, .....
function get_ensemble_name_from_d_tr_c (line 66) | def get_ensemble_name_from_d_tr_c(dataset, tr1, p1, c1, tr2, p2, c2, fol...
function convert_ensemble_folder_to_model_identifiers_and_folds (line 73) | def convert_ensemble_folder_to_model_identifiers_and_folds(ensemble_fold...
function folds_tuple_to_string (line 78) | def folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]):
function folds_string_to_tuple (line 85) | def folds_string_to_tuple(folds_string: str):
function check_workers_alive_and_busy (line 96) | def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, r...
FILE: Finetune/nnUNet/nnunetv2/utilities/find_class_by_name.py
function recursive_find_python_class (line 7) | def recursive_find_python_class(folder: str, class_name: str, current_mo...
FILE: Finetune/nnUNet/nnunetv2/utilities/get_network_from_plans.py
function get_network_from_plans (line 9) | def get_network_from_plans(plans_manager: PlansManager,
FILE: Finetune/nnUNet/nnunetv2/utilities/helpers.py
function softmax_helper_dim0 (line 4) | def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor:
function softmax_helper_dim1 (line 8) | def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor:
function empty_cache (line 12) | def empty_cache(device: torch.device):
class dummy_context (line 22) | class dummy_context(object):
method __enter__ (line 23) | def __enter__(self):
method __exit__ (line 26) | def __exit__(self, exc_type, exc_val, exc_tb):
FILE: Finetune/nnUNet/nnunetv2/utilities/json_export.py
function recursive_fix_for_json_export (line 7) | def recursive_fix_for_json_export(my_dict: dict):
function fix_types_iterable (line 39) | def fix_types_iterable(iterable, output_type):
FILE: Finetune/nnUNet/nnunetv2/utilities/label_handling/label_handling.py
class LabelManager (line 21) | class LabelManager(object):
method __init__ (line 22) | def __init__(self, label_dict: dict, regions_class_order: Union[List[i...
method _sanity_check (line 51) | def _sanity_check(self, label_dict: dict):
method _get_all_labels (line 62) | def _get_all_labels(self) -> List[int]:
method _get_regions (line 77) | def _get_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]:
method _determine_ignore_label (line 101) | def _determine_ignore_label(self) -> Union[None, int]:
method has_regions (line 109) | def has_regions(self) -> bool:
method has_ignore_label (line 113) | def has_ignore_label(self) -> bool:
method all_regions (line 117) | def all_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]:
method all_labels (line 121) | def all_labels(self) -> List[int]:
method ignore_label (line 125) | def ignore_label(self) -> Union[None, int]:
method apply_inference_nonlin (line 128) | def apply_inference_nonlin(self, logits: Union[np.ndarray, torch.Tenso...
method convert_probabilities_to_segmentation (line 143) | def convert_probabilities_to_segmentation(self, predicted_probabilitie...
method convert_logits_to_segmentation (line 177) | def convert_logits_to_segmentation(self, predicted_logits: Union[np.nd...
method revert_cropping_on_probabilities (line 185) | def revert_cropping_on_probabilities(self, predicted_probabilities: Un...
method filter_background (line 212) | def filter_background(classes_or_regions: Union[List[int], List[Union[...
method foreground_regions (line 222) | def foreground_regions(self):
method foreground_labels (line 226) | def foreground_labels(self):
method num_segmentation_heads (line 230) | def num_segmentation_heads(self):
function get_labelmanager_class_from_plans (line 237) | def get_labelmanager_class_from_plans(plans: dict) -> Type[LabelManager]:
function convert_labelmap_to_one_hot (line 248) | def convert_labelmap_to_one_hot(segmentation: Union[np.ndarray, torch.Te...
function determine_num_input_channels (line 283) | def determine_num_input_channels(plans_manager: PlansManager,
FILE: Finetune/nnUNet/nnunetv2/utilities/network_initialization.py
class InitWeights_He (line 4) | class InitWeights_He(object):
method __init__ (line 5) | def __init__(self, neg_slope=1e-2):
method __call__ (line 8) | def __call__(self, module):
FILE: Finetune/nnUNet/nnunetv2/utilities/overlay_plots.py
function hex_to_rgb (line 48) | def hex_to_rgb(hex: str):
function generate_overlay (line 53) | def generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, ...
function select_slice_to_plot (line 97) | def select_slice_to_plot(image: np.ndarray, segmentation: np.ndarray) ->...
function select_slice_to_plot2 (line 111) | def select_slice_to_plot2(image: np.ndarray, segmentation: np.ndarray) -...
function plot_overlay (line 130) | def plot_overlay(image_file: str, segmentation_file: str, image_reader_w...
function plot_overlay_preprocessed (line 152) | def plot_overlay_preprocessed(case_file: str, output_file: str, overlay_...
function multiprocessing_plot_overlay (line 169) | def multiprocessing_plot_overlay(list_of_image_files, list_of_seg_files,...
function multiprocessing_plot_overlay_preprocessed (line 180) | def multiprocessing_plot_overlay_preprocessed(list_of_case_files, list_o...
function generate_overlays_from_raw (line 190) | def generate_overlays_from_raw(dataset_name_or_id: Union[int, str], outp...
function generate_overlays_from_preprocessed (line 210) | def generate_overlays_from_preprocessed(dataset_name_or_id: Union[int, s...
function entry_point_generate_overlay (line 243) | def entry_point_generate_overlay():
FILE: Finetune/nnUNet/nnunetv2/utilities/plans_handling/plans_handler.py
class ConfigurationManager (line 32) | class ConfigurationManager(object):
method __init__ (line 33) | def __init__(self, configuration_dict: dict):
method __repr__ (line 36) | def __repr__(self):
method data_identifier (line 40) | def data_identifier(self) -> str:
method preprocessor_name (line 44) | def preprocessor_name(self) -> str:
method preprocessor_class (line 49) | def preprocessor_class(self) -> Type[DefaultPreprocessor]:
method batch_size (line 56) | def batch_size(self) -> int:
method patch_size (line 60) | def patch_size(self) -> List[int]:
method median_image_size_in_voxels (line 64) | def median_image_size_in_voxels(self) -> List[int]:
method spacing (line 68) | def spacing(self) -> List[float]:
method normalization_schemes (line 72) | def normalization_schemes(self) -> List[str]:
method use_mask_for_norm (line 76) | def use_mask_for_norm(self) -> List[bool]:
method UNet_class_name (line 80) | def UNet_class_name(self) -> str:
method UNet_class (line 85) | def UNet_class(self) -> Type[nn.Module]:
method UNet_base_num_features (line 97) | def UNet_base_num_features(self) -> int:
method n_conv_per_stage_encoder (line 101) | def n_conv_per_stage_encoder(self) -> List[int]:
method n_conv_per_stage_decoder (line 105) | def n_conv_per_stage_decoder(self) -> List[int]:
method num_pool_per_axis (line 109) | def num_pool_per_axis(self) -> List[int]:
method pool_op_kernel_sizes (line 113) | def pool_op_kernel_sizes(self) -> List[List[int]]:
method conv_kernel_sizes (line 117) | def conv_kernel_sizes(self) -> List[List[int]]:
method unet_max_num_features (line 121) | def unet_max_num_features(self) -> int:
method resampling_fn_data (line 126) | def resampling_fn_data(self) -> Callable[
method resampling_fn_probabilities (line 139) | def resampling_fn_probabilities(self) -> Callable[
method resampling_fn_seg (line 152) | def resampling_fn_seg(self) -> Callable[
method batch_dice (line 164) | def batch_dice(self) -> bool:
method next_stage_names (line 168) | def next_stage_names(self) -> Union[List[str], None]:
method previous_stage_name (line 176) | def previous_stage_name(self) -> Union[str, None]:
class PlansManager (line 180) | class PlansManager(object):
method __init__ (line 181) | def __init__(self, plans_file_or_dict: Union[str, dict]):
method __repr__ (line 194) | def __repr__(self):
method _internal_resolve_configuration_inheritance (line 197) | def _internal_resolve_configuration_inheritance(self, configuration_na...
method get_configuration (line 222) | def get_configuration(self, configuration_name: str):
method dataset_name (line 231) | def dataset_name(self) -> str:
method plans_name (line 235) | def plans_name(self) -> str:
method original_median_spacing_after_transp (line 239) | def original_median_spacing_after_transp(self) -> List[float]:
method original_median_shape_after_transp (line 243) | def original_median_shape_after_transp(self) -> List[float]:
method image_reader_writer_class (line 248) | def image_reader_writer_class(self) -> Type[BaseReaderWriter]:
method transpose_forward (line 252) | def transpose_forward(self) -> List[int]:
method transpose_backward (line 256) | def transpose_backward(self) -> List[int]:
method available_configurations (line 260) | def available_configurations(self) -> List[str]:
method experiment_planner_class (line 265) | def experiment_planner_class(self) -> Type[ExperimentPlanner]:
method experiment_planner_name (line 273) | def experiment_planner_name(self) -> str:
method label_manager_class (line 278) | def label_manager_class(self) -> Type[LabelManager]:
method get_label_manager (line 281) | def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelMana...
method foreground_intensity_properties_per_channel (line 287) | def foreground_intensity_properties_per_channel(self) -> dict:
FILE: Finetune/nnUNet/nnunetv2/utilities/utils.py
function get_identifiers_from_splitted_dataset_folder (line 26) | def get_identifiers_from_splitted_dataset_folder(folder: str, file_endin...
function create_lists_from_splitted_dataset_folder (line 36) | def create_lists_from_splitted_dataset_folder(folder: str, file_ending: ...
function get_filenames_of_train_images_and_targets (line 51) | def get_filenames_of_train_images_and_targets(raw_dataset_folder: str, d...
FILE: models/voco_head.py
class projection_head (line 23) | class projection_head(nn.Module):
method __init__ (line 24) | def __init__(self, in_dim=768, hidden_dim=2048, out_dim=2048):
method forward (line 41) | def forward(self, input):
class Swin (line 56) | class Swin(nn.Module):
method __init__ (line 57) | def __init__(self, args):
method forward_encs (line 131) | def forward_encs(self, encs):
method forward (line 140) | def forward(self, x_in):
class VoCoHead (line 160) | class VoCoHead(nn.Module):
method __init__ (line 161) | def __init__(self, args):
method _EMA_update_encoder_teacher (line 167) | def _EMA_update_encoder_teacher(self):
method forward (line 173) | def forward(self, img, crops, labels):
function online_assign (line 218) | def online_assign(feats, bases):
function regularization_loss (line 234) | def regularization_loss(bases):
function ce_loss (line 249) | def ce_loss(labels, logits):
FILE: optimizers/lr_scheduler.py
class _LRSchedulerMONAI (line 23) | class _LRSchedulerMONAI(_LRScheduler):
method __init__ (line 27) | def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int,...
class LinearLR (line 42) | class LinearLR(_LRSchedulerMONAI):
method get_lr (line 47) | def get_lr(self):
class ExponentialLR (line 52) | class ExponentialLR(_LRSchedulerMONAI):
method get_lr (line 57) | def get_lr(self):
class WarmupCosineSchedule (line 62) | class WarmupCosineSchedule(LambdaLR):
method __init__ (line 67) | def __init__(
method lr_lambda (line 85) | def lr_lambda(self, step):
class LinearWarmupCosineAnnealingLR (line 92) | class LinearWarmupCosineAnnealingLR(_LRScheduler):
method __init__ (line 93) | def __init__(
method get_lr (line 118) | def get_lr(self) -> List[float]:
method _get_closed_form_lr (line 156) | def _get_closed_form_lr(self) -> List[float]:
FILE: utils/data_utils.py
function get_loader_1k (line 22) | def get_loader_1k(args):
function random_split (line 144) | def random_split(ls):
function get_loader (line 151) | def get_loader(args):
function threshold (line 265) | def threshold(x):
class VoCoAugmentation (line 270) | class VoCoAugmentation():
method __init__ (line 271) | def __init__(self, args, aug):
method __call__ (line 275) | def __call__(self, x_in):
function get_vanilla_transform (line 294) | def get_vanilla_transform(num=2, num_crops=4, roi_small=64, roi=96, max_...
function get_crop_transform (line 331) | def get_crop_transform(num=4, roi_small=64, roi=96, aug=False):
function get_position_label (line 377) | def get_position_label(roi=96, base_roi=96, max_roi=384, num_crops=4):
FILE: utils/ops.py
function patch_rand_drop (line 17) | def patch_rand_drop(args, x, x_rep=None, max_drop=0.3, max_block_sz=0.25...
function rot_rand (line 46) | def rot_rand(args, x_s):
function aug_rand (line 67) | def aug_rand(args, samples):
function concat_image (line 78) | def concat_image(imgs):
function concat_label (line 89) | def concat_label(labels):
FILE: utils/utils.py
function resample_3d (line 17) | def resample_3d(img, target_size):
function dice (line 25) | def dice(x, y):
class AverageMeter (line 34) | class AverageMeter(object):
method __init__ (line 35) | def __init__(self):
method reset (line 38) | def reset(self):
method update (line 44) | def update(self, val, n=1):
function distributed_all_gather (line 51) | def distributed_all_gather(
FILE: voco_train.py
function main (line 42) | def main():
function init_log (line 258) | def init_log(name, level=logging.INFO):
Condensed preview — 329 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (3,034K chars).
[
{
"path": "Finetune/AbdomenAtlas/Atlas_test.py",
"chars": 7484,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may n"
},
{
"path": "Finetune/AbdomenAtlas/Atlas_test.sh",
"chars": 234,
"preview": "test_data_path=./test_examples/AbdomenAtlasTest/\nsave_prediction_path=./test_examples/AbdomenAtlasPredict/\n\ntorchrun --m"
},
{
"path": "Finetune/AbdomenAtlas/check.py",
"chars": 6732,
"preview": "import torch\r\nimport os\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nfrom utils.utils import *\r\nfrom PIL import Image\r\nim"
},
{
"path": "Finetune/AbdomenAtlas/dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/AbdomenAtlas/dataset/dataloader_bdmap.py",
"chars": 9543,
"preview": "from monai.transforms import *\n\nimport sys\nimport nibabel as nib\nimport os\nimport torch\nimport numpy as np\nfrom typing i"
},
{
"path": "Finetune/AbdomenAtlas/dataset/dataloader_test.py",
"chars": 8287,
"preview": "from monai.transforms import *\n\nimport sys\nimport nibabel as nib\nimport os\nimport torch\nimport numpy as np\nfrom typing i"
},
{
"path": "Finetune/AbdomenAtlas/dataset/dataset_list/AbdomenAtlas1.0.txt",
"chars": 77924,
"preview": "BDMAP_00000001\nBDMAP_00000002\nBDMAP_00000003\nBDMAP_00000004\nBDMAP_00000005\nBDMAP_00000006\nBDMAP_00000007\nBDMAP_00000008\n"
},
{
"path": "Finetune/AbdomenAtlas/main.py",
"chars": 14863,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/AbdomenAtlas/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/AbdomenAtlas/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/AbdomenAtlas/preprocess/try_load.py",
"chars": 6136,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may n"
},
{
"path": "Finetune/AbdomenAtlas/readme.md",
"chars": 3216,
"preview": "# VoCo for AbdomenAtlas\r\n\r\n<a href=\"https://arxiv.org/abs/2402.17300\"><img src='https://img.shields.io/badge/arXiv-VoCo-"
},
{
"path": "Finetune/AbdomenAtlas/requirements.txt",
"chars": 4403,
"preview": "# packages in environment at /home/lwubf/anaconda3/envs/nnunet:\n#\n# Name Version Bu"
},
{
"path": "Finetune/AbdomenAtlas/train.sh",
"chars": 340,
"preview": "now=$(date +\"%Y%m%d_%H%M%S\")\nlogdir=runs/logs\nmkdir -p $logdir\n\ndata_dir=/project/medimgfmod/CT/AbdomenAtlasMini1.0/\ncac"
},
{
"path": "Finetune/AbdomenAtlas/train.slurm",
"chars": 724,
"preview": "#!/bin/bash\n\n# NOTE: Lines starting with \"#SBATCH\" are valid SLURM commands or statements,\n# while those starting "
},
{
"path": "Finetune/AbdomenAtlas/trainer.py",
"chars": 7901,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/AbdomenAtlas/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/AbdomenAtlas/utils/data_trans.py",
"chars": 4621,
"preview": "import math\r\nimport os\r\nfrom copy import deepcopy\r\nimport numpy as np\r\nimport torch\r\nimport pickle\r\nfrom monai import da"
},
{
"path": "Finetune/AbdomenAtlas/utils/mixup.py",
"chars": 732,
"preview": "import torch\r\nimport numpy as np\r\n\r\n\r\ndef mixup(inputs):\r\n batch_size = inputs[0].size(0)\r\n rand = torch.randperm("
},
{
"path": "Finetune/AbdomenAtlas/utils/utils.py",
"chars": 4609,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/check_test.py",
"chars": 3053,
"preview": "\r\nimport argparse\r\nimport os\r\nfrom functools import partial\r\nimport nibabel as nib\r\nimport numpy as np\r\nimport torch\r\nim"
},
{
"path": "Finetune/Amos/dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Amos/dataset/dataset.json",
"chars": 17594,
"preview": "{\r\n \"description\": \"0\",\r\n \"labels\": {\r\n \"0\": \"background\",\r\n \"1\": \"Liver\",\r\n \"10\": \"Esophagus"
},
{
"path": "Finetune/Amos/dataset/dataset_test50.json",
"chars": 10394,
"preview": "{\r\n \"description\": \"0\",\r\n \"labels\": {\r\n \"0\": \"background\",\r\n \"1\": \"Liver\",\r\n \"10\": \"Esophagus"
},
{
"path": "Finetune/Amos/dataset_CT.json",
"chars": 33831,
"preview": "{\"name\": \"AMOS\", \"description\": \"Amos: A large-scale abdominal multi-organ benchmark for versatile medical image segment"
},
{
"path": "Finetune/Amos/gen_json.py",
"chars": 4364,
"preview": "from typing import Tuple\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\n\ndef get"
},
{
"path": "Finetune/Amos/inferers.py",
"chars": 22348,
"preview": "\"\"\"Multiview inferer.\"\"\"\r\n\r\nimport warnings\r\nfrom typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Uni"
},
{
"path": "Finetune/Amos/main.py",
"chars": 14855,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Amos/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/pre_cache.py",
"chars": 5522,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/test.py",
"chars": 9675,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/train.sh",
"chars": 146,
"preview": "now=$(date +\"%Y%m%d_%H%M%S\")\nlogdir=runs/logs\nmkdir -p $logdir\n\ntorchrun --master_port=21198 main.py \\\n --logdir $log"
},
{
"path": "Finetune/Amos/trainer.py",
"chars": 8549,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Amos/utils/data_test.py",
"chars": 4211,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/utils/data_utils.py",
"chars": 8335,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/utils/utils.py",
"chars": 4450,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Amos/val.py",
"chars": 9297,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/BTCV/dataset/dataset_0.json",
"chars": 4902,
"preview": " {\n \"description\": \"btcv yucheng\",\n \"labels\": {\n \"0\": \"background\",\n \"1\": \"spleen\",\n \"2\": \"rk"
},
{
"path": "Finetune/BTCV/main.py",
"chars": 14920,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/BTCV/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/trainer.py",
"chars": 8564,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/BTCV/utils/data_test.py",
"chars": 4211,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/utils/data_utils.py",
"chars": 7946,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/utils/utils.py",
"chars": 2894,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/BTCV/val.py",
"chars": 9299,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_fold0_train.csv",
"chars": 119999,
"preview": "zip_file,target,label,patient_id,scan_id,n_slice,scan_count,all_scan_ids\r\nCP-20.zip,1,CP,2668,3259,52,11,\"[3249, 3250, 3"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_fold0_valid.csv",
"chars": 85178,
"preview": "zip_file,target,label,patient_id,scan_id,n_slice,scan_count,all_scan_ids\r\nNormal-12.zip,0,Normal,2015,470,94,1,[470]\r\nNC"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_fold1_train.csv",
"chars": 119434,
"preview": "zip_file,target,label,patient_id,scan_id,n_slice,scan_count,all_scan_ids\r\nCP-6.zip,1,CP,1229,3447,144,1,[3447]\r\nCP-26.zi"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_fold1_valid.csv",
"chars": 85743,
"preview": "zip_file,target,label,patient_id,scan_id,n_slice,scan_count,all_scan_ids\r\nNormal-2.zip,0,Normal,1740,1050,21,8,\"[1045, 1"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_fold2_train.csv",
"chars": 118709,
"preview": "zip_file,target,label,patient_id,scan_id,n_slice,scan_count,all_scan_ids\r\nCP-6.zip,1,CP,1229,3447,144,1,[3447]\r\nCP-26.zi"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_fold2_valid.csv",
"chars": 86468,
"preview": "zip_file,target,label,patient_id,scan_id,n_slice,scan_count,all_scan_ids\r\nNormal-2.zip,0,Normal,1740,1050,21,8,\"[1045, 1"
},
{
"path": "Finetune/CC-CCII/csv/CC_CCII_metadata.csv",
"chars": 10253,
"preview": "patient_id,scan_id,Age,Sex(Male1/Female2),Critical_illness,Liver_function,Lung_function,Progression (Days)\r\n1399,127,57,"
},
{
"path": "Finetune/CC-CCII/dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/CC-CCII/eval.py",
"chars": 6327,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may n"
},
{
"path": "Finetune/CC-CCII/main.py",
"chars": 11652,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/CC-CCII/model.py",
"chars": 5996,
"preview": "import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom monai.networks.nets.swin_unetr import *\r\nfrom monai.networ"
},
{
"path": "Finetune/CC-CCII/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/CC-CCII/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/CC-CCII/train.sh",
"chars": 146,
"preview": "now=$(date +\"%Y%m%d_%H%M%S\")\nlogdir=runs/logs\nmkdir -p $logdir\n\ntorchrun --master_port=25584 main.py \\\n --logdir $log"
},
{
"path": "Finetune/CC-CCII/trainer.py",
"chars": 7351,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/CC-CCII/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/CC-CCII/utils/data_utils.py",
"chars": 5418,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/CC-CCII/utils/utils.py",
"chars": 2894,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Flare22/dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Flare22/dataset/dataset.json",
"chars": 17594,
"preview": "{\r\n \"description\": \"0\",\r\n \"labels\": {\r\n \"0\": \"background\",\r\n \"1\": \"Liver\",\r\n \"10\": \"Esophagus"
},
{
"path": "Finetune/Flare22/dataset/dataset_test50.json",
"chars": 10394,
"preview": "{\r\n \"description\": \"0\",\r\n \"labels\": {\r\n \"0\": \"background\",\r\n \"1\": \"Liver\",\r\n \"10\": \"Esophagus"
},
{
"path": "Finetune/Flare22/inferers.py",
"chars": 22348,
"preview": "\"\"\"Multiview inferer.\"\"\"\r\n\r\nimport warnings\r\nfrom typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Uni"
},
{
"path": "Finetune/Flare22/main.py",
"chars": 14852,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Flare22/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/train.sh",
"chars": 146,
"preview": "now=$(date +\"%Y%m%d_%H%M%S\")\nlogdir=runs/logs\nmkdir -p $logdir\n\ntorchrun --master_port=21198 main.py \\\n --logdir $log"
},
{
"path": "Finetune/Flare22/trainer.py",
"chars": 8549,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Flare22/utils/data_test.py",
"chars": 4214,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/utils/data_utils.py",
"chars": 8341,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/utils/utils.py",
"chars": 4450,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Flare22/val.py",
"chars": 9297,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/MM-WHS/dataset.json",
"chars": 3366,
"preview": "{\n \"description\": \"0\",\n \"labels\": {\n \"0\": \"background\",\n \"1\": \"Left Ventricle\",\n \"2\": \"whole "
},
{
"path": "Finetune/MM-WHS/inferers.py",
"chars": 22348,
"preview": "\"\"\"Multiview inferer.\"\"\"\r\n\r\nimport warnings\r\nfrom typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Uni"
},
{
"path": "Finetune/MM-WHS/main.py",
"chars": 14398,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/MM-WHS/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/MM-WHS/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/MM-WHS/pretrained_models/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/MM-WHS/test.py",
"chars": 9322,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/MM-WHS/train.sh",
"chars": 163,
"preview": "now=$(date +\"%Y%m%d_%H%M%S\")\nlogdir=runs/logs\nmkdir -p $logdir\n\ntorchrun --master_port=21120 --max-restart=10 main.py \\\n"
},
{
"path": "Finetune/MM-WHS/trainer.py",
"chars": 8549,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/MM-WHS/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/MM-WHS/utils/data_utils.py",
"chars": 7171,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/MM-WHS/utils/utils.py",
"chars": 5862,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Word/dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Word/dataset/dataset_word.json",
"chars": 13617,
"preview": "{\n \"name\": \"WORD-V0.1.0\",\n \"description\": \"Whole abdomen ORgan segmentation Dataset (WORD), just for research use "
},
{
"path": "Finetune/Word/main.py",
"chars": 14804,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Word/optimizers/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Word/optimizers/lr_scheduler.py",
"chars": 6511,
"preview": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Word/train.sh",
"chars": 165,
"preview": "now=$(date +\"%Y%m%d_%H%M%S\")\nlogdir=runs/logs_swin_large_scratch\nmkdir -p $logdir\n\ntorchrun --master_port=20482 main.py "
},
{
"path": "Finetune/Word/train.slurm",
"chars": 730,
"preview": "#!/bin/bash\n\n# NOTE: Lines starting with \"#SBATCH\" are valid SLURM commands or statements,\n# while those starting "
},
{
"path": "Finetune/Word/trainer.py",
"chars": 8585,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Word/utils/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/Word/utils/data_utils.py",
"chars": 6681,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/Word/utils/utils.py",
"chars": 4450,
"preview": "# Copyright 2020 - 2022 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not"
},
{
"path": "Finetune/nnUNet/LICENSE",
"chars": 11427,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "Finetune/nnUNet/documentation/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/documentation/benchmarking.md",
"chars": 8235,
"preview": "# nnU-Netv2 benchmarks\n\nDoes your system run like it should? Is your epoch time longer than expected? What epoch times s"
},
{
"path": "Finetune/nnUNet/documentation/changelog.md",
"chars": 4022,
"preview": "# What is different in v2?\n\n- We now support **hierarchical labels** (named regions in nnU-Net). For example, instead of"
},
{
"path": "Finetune/nnUNet/documentation/competitions/AutoPETII.md",
"chars": 5756,
"preview": "# Look Ma, no code: fine tuning nnU-Net for the AutoPET II challenge by only adjusting its JSON plans\n\nPlease cite our p"
},
{
"path": "Finetune/nnUNet/documentation/convert_msd_dataset.md",
"chars": 99,
"preview": "Use `nnUNetv2_convert_MSD_dataset`.\n\nRead `nnUNetv2_convert_MSD_dataset -h` for usage instructions."
},
{
"path": "Finetune/nnUNet/documentation/dataset_format.md",
"chars": 13040,
"preview": "# nnU-Net dataset format\nThe only way to bring your data into nnU-Net is by storing it in a specific format. Due to nnU-"
},
{
"path": "Finetune/nnUNet/documentation/dataset_format_inference.md",
"chars": 1487,
"preview": "# Data format for Inference \nRead the documentation on the overall [data format](dataset_format.md) first!\n\nThe data for"
},
{
"path": "Finetune/nnUNet/documentation/explanation_normalization.md",
"chars": 2858,
"preview": "# Intensity normalization in nnU-Net \n\nThe type of intensity normalization applied in nnU-Net can be controlled via the "
},
{
"path": "Finetune/nnUNet/documentation/explanation_plans_files.md",
"chars": 11896,
"preview": "# Modifying the nnU-Net Configurations\n\nnnU-Net provides unprecedented out-of-the-box segmentation performance for essen"
},
{
"path": "Finetune/nnUNet/documentation/extending_nnunet.md",
"chars": 3581,
"preview": "# Extending nnU-Net\nWe hope that the new structure of nnU-Net v2 makes it much more intuitive on how to modify it! We ca"
},
{
"path": "Finetune/nnUNet/documentation/how_to_use_nnunet.md",
"chars": 16619,
"preview": "## How to run nnU-Net on a new dataset\nGiven some dataset, nnU-Net fully automatically configures an entire segmentation"
},
{
"path": "Finetune/nnUNet/documentation/installation_instructions.md",
"chars": 5192,
"preview": "# System requirements\n\n## Operating System\nnnU-Net has been tested on Linux (Ubuntu 18.04, 20.04, 22.04; centOS, RHEL), "
},
{
"path": "Finetune/nnUNet/documentation/manual_data_splits.md",
"chars": 2325,
"preview": "# How to generate custom splits in nnU-Net\n\nSometimes, the default 5-fold cross-validation split by nnU-Net does not fit"
},
{
"path": "Finetune/nnUNet/documentation/pretraining_and_finetuning.md",
"chars": 3336,
"preview": "# Pretraining with nnU-Net\n\n## Intro\n\nSo far nnU-Net only supports supervised pre-training, meaning that you train a reg"
},
{
"path": "Finetune/nnUNet/documentation/region_based_training.md",
"chars": 4153,
"preview": "# Region-based training\n\n## What is this about?\nIn some segmentation tasks, most prominently the \n[Brain Tumor Segmentat"
},
{
"path": "Finetune/nnUNet/documentation/run_inference_with_pretrained_models.md",
"chars": 450,
"preview": "# How to run inference with pretrained models\n**Important:** Pretrained weights from nnU-Net v1 are NOT compatible with "
},
{
"path": "Finetune/nnUNet/documentation/set_environment_variables.md",
"chars": 3133,
"preview": "# How to set environment variables\n\nnnU-Net requires some environment variables so that it always knows where the raw da"
},
{
"path": "Finetune/nnUNet/documentation/setting_up_paths.md",
"chars": 1430,
"preview": "# Setting up Paths\n\nnnU-Net relies on environment variables to know where raw data, preprocessed data and trained model "
},
{
"path": "Finetune/nnUNet/documentation/tldr_migration_guide_from_v1.md",
"chars": 1700,
"preview": "# TLDR Migration Guide from nnU-Net V1\n\n- nnU-Net V2 can be installed simultaneously with V1. They won't get in each oth"
},
{
"path": "Finetune/nnUNet/msd.txt",
"chars": 3684,
"preview": "A. convert\r\npython Dataset220_KiTS2023.py /data/linshan/CTs/kits23/dataset/\r\npython Dataset218_Amos2022_task1.py /data/l"
},
{
"path": "Finetune/nnUNet/nnunetv2/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/benchmarking/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py",
"chars": 2041,
"preview": "if __name__ == '__main__':\n \"\"\"\n This code probably only works within the DKFZ infrastructure (using LSF). You wil"
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/benchmarking/summarize_benchmark_results.py",
"chars": 3272,
"preview": "from batchgenerators.utilities.file_and_folder_operations import join, load_json, isfile\nfrom nnunetv2.utilities.dataset"
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/collect_results_custom_Decathlon.py",
"chars": 5696,
"preview": "from typing import Tuple\n\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\nfrom nn"
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py",
"chars": 708,
"preview": "from batchgenerators.utilities.file_and_folder_operations import *\n\nfrom nnunetv2.batch_running.collect_results_custom_D"
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/generate_lsf_runs_customDecathlon.py",
"chars": 3603,
"preview": "from copy import deepcopy\nimport numpy as np\n\n\ndef merge(dict1, dict2):\n keys = np.unique(list(dict1.keys()) + list(d"
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/release_trainings/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/collect_results.py",
"chars": 5662,
"preview": "from typing import Tuple\n\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\nfrom nn"
},
{
"path": "Finetune/nnUNet/nnunetv2/batch_running/release_trainings/nnunetv2_v1/generate_lsf_commands.py",
"chars": 3833,
"preview": "from copy import deepcopy\nimport numpy as np\n\n\ndef merge(dict1, dict2):\n keys = np.unique(list(dict1.keys()) + list(d"
},
{
"path": "Finetune/nnUNet/nnunetv2/configuration.py",
"chars": 416,
"preview": "import os\n\nfrom nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA\n\ndefault_num_processes = 8 if 'nnUNet_"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset017_BTCV.py",
"chars": 5356,
"preview": "# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\r\n#\r\n#"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset027_ACDC.py",
"chars": 3194,
"preview": "import os\nimport shutil\nfrom pathlib import Path\n\nfrom nnunetv2.dataset_conversion.generate_dataset_json import generate"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset073_Fluo_C3DH_A549_SIM.py",
"chars": 4162,
"preview": "from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json\nfrom nnunetv2.paths import nnUNet_ra"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset114_MNMs.py",
"chars": 9018,
"preview": "import csv\nimport os\nimport random\nfrom pathlib import Path\n\nimport nibabel as nib\nfrom batchgenerators.utilities.file_a"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py",
"chars": 2406,
"preview": "import shutil\nfrom pathlib import Path\n\nfrom nnunetv2.dataset_conversion.Dataset027_ACDC import make_out_dirs\nfrom nnune"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset120_RoadSegmentation.py",
"chars": 3430,
"preview": "import multiprocessing\nimport shutil\nfrom multiprocessing import Pool\n\nfrom batchgenerators.utilities.file_and_folder_op"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset137_BraTS21.py",
"chars": 4051,
"preview": "import multiprocessing\nimport shutil\nfrom multiprocessing import Pool\n\nimport SimpleITK as sitk\nimport numpy as np\nfrom "
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset218_Amos2022_task1.py",
"chars": 3808,
"preview": "from batchgenerators.utilities.file_and_folder_operations import *\nimport shutil\nfrom generate_dataset_json import gener"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset219_Amos2022_task2.py",
"chars": 3555,
"preview": "from batchgenerators.utilities.file_and_folder_operations import *\nimport shutil\nfrom nnunetv2.dataset_conversion.genera"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py",
"chars": 2109,
"preview": "from batchgenerators.utilities.file_and_folder_operations import *\nimport shutil\nfrom generate_dataset_json import gener"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset221_AutoPETII_2023.py",
"chars": 3167,
"preview": "from batchgenerators.utilities.file_and_folder_operations import *\nimport shutil\nfrom nnunetv2.dataset_conversion.genera"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py",
"chars": 1369,
"preview": "import os\n\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\nfrom nnunetv2.paths import nnUNet_raw\nfro"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/convert_MSD_dataset.py",
"chars": 6085,
"preview": "import argparse\nimport multiprocessing\nimport shutil\nfrom multiprocessing import Pool\nfrom typing import Optional\nimport"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py",
"chars": 2930,
"preview": "import shutil\nfrom copy import deepcopy\n\nfrom batchgenerators.utilities.file_and_folder_operations import join, maybe_mk"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py",
"chars": 3250,
"preview": "import SimpleITK as sitk\nimport shutil\n\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations imp"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py",
"chars": 1468,
"preview": "import shutil\n\nfrom batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json\n\nfrom "
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py",
"chars": 1364,
"preview": "import shutil\n\nfrom batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json\n\nfrom "
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py",
"chars": 1085,
"preview": "import shutil\n\nfrom batchgenerators.utilities.file_and_folder_operations import isdir, join\n\nfrom nnunetv2.utilities.dat"
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/dataset_conversion/generate_dataset_json.py",
"chars": 4045,
"preview": "from typing import Tuple\n\nfrom batchgenerators.utilities.file_and_folder_operations import save_json, join\n\n\ndef generat"
},
{
"path": "Finetune/nnUNet/nnunetv2/ensembling/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/ensembling/ensemble.py",
"chars": 10027,
"preview": "import argparse\nimport multiprocessing\nimport shutil\nfrom copy import deepcopy\nfrom multiprocessing import Pool\nfrom typ"
},
{
"path": "Finetune/nnUNet/nnunetv2/evaluation/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/evaluation/accumulate_cv_results.py",
"chars": 3273,
"preview": "import shutil\nfrom typing import Union, List, Tuple\n\nfrom batchgenerators.utilities.file_and_folder_operations import lo"
},
{
"path": "Finetune/nnUNet/nnunetv2/evaluation/evaluate_predictions.py",
"chars": 12563,
"preview": "import multiprocessing\nimport os\nfrom copy import deepcopy\nfrom multiprocessing import Pool\nfrom typing import Tuple, Li"
},
{
"path": "Finetune/nnUNet/nnunetv2/evaluation/find_best_configuration.py",
"chars": 18653,
"preview": "import argparse\nimport os.path\nfrom copy import deepcopy\nfrom typing import Union, List, Tuple\n\nfrom batchgenerators.uti"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/dataset_fingerprint/fingerprint_extractor.py",
"chars": 11563,
"preview": "import multiprocessing\nimport os\nfrom time import sleep\nfrom typing import List, Type, Union\n\nimport numpy as np\nfrom ba"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py",
"chars": 31121,
"preview": "import os.path\nimport shutil\nfrom copy import deepcopy\nfrom functools import lru_cache\nfrom typing import List, Union, T"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/network_topology.py",
"chars": 3796,
"preview": "from copy import deepcopy\nimport numpy as np\n\n\ndef get_shape_must_be_divisible_by(net_numpool_per_axis):\n return 2 **"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/readme.md",
"chars": 1575,
"preview": "What do experiment planners need to do (these are notes for myself while rewriting nnU-Net, they are provided as is \nwit"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/experiment_planners/resencUNet_planner.py",
"chars": 3224,
"preview": "from typing import Union, List, Tuple\n\nfrom torch import nn\n\nfrom nnunetv2.experiment_planning.experiment_planners.defau"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/plan_and_preprocess_api.py",
"chars": 7894,
"preview": "import shutil\nfrom typing import List, Type, Optional, Tuple, Union\n\nimport nnunetv2\nfrom batchgenerators.utilities.file"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py",
"chars": 16463,
"preview": "from nnunetv2.configuration import default_num_processes\nfrom nnunetv2.experiment_planning.plan_and_preprocess_api impor"
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/plans_for_pretraining/move_plans_between_datasets.py",
"chars": 4469,
"preview": "import argparse\nfrom typing import Union\n\nfrom batchgenerators.utilities.file_and_folder_operations import join, isdir, "
},
{
"path": "Finetune/nnUNet/nnunetv2/experiment_planning/verify_dataset_integrity.py",
"chars": 12227,
"preview": "# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center\n"
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/base_reader_writer.py",
"chars": 5626,
"preview": "# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center\n"
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/natural_image_reader_writer.py",
"chars": 3344,
"preview": "# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center\n"
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/nibabel_reader_writer.py",
"chars": 8631,
"preview": "# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center\n"
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/reader_writer_registry.py",
"chars": 3865,
"preview": "import traceback\nfrom typing import Type\n\nfrom batchgenerators.utilities.file_and_folder_operations import join\n\nimport "
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/readme.md",
"chars": 285,
"preview": "- Derive your adapter from `BaseReaderWriter`. \n- Reimplement all abstractmethods. \n- make sure to support 2d and 3d inp"
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/simpleitk_reader_writer.py",
"chars": 5661,
"preview": "# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center\n"
},
{
"path": "Finetune/nnUNet/nnunetv2/imageio/tif_reader_writer.py",
"chars": 4731,
"preview": "# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center\n"
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/data_iterators.py",
"chars": 16204,
"preview": "import multiprocessing\nimport queue\nfrom torch.multiprocessing import Event, Process, Queue, Manager\n\nfrom time import s"
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/examples.py",
"chars": 6485,
"preview": "if __name__ == '__main__':\n from nnunetv2.paths import nnUNet_results, nnUNet_raw\n import torch\n from batchgene"
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/export_prediction.py",
"chars": 8259,
"preview": "import os\nfrom copy import deepcopy\nfrom typing import Union, List\n\nimport numpy as np\nimport torch\nfrom acvl_utils.crop"
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/predict_from_raw_data.py",
"chars": 55491,
"preview": "import inspect\nimport itertools\nimport multiprocessing\nimport os\nimport traceback\nfrom copy import deepcopy\nfrom time im"
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/readme.md",
"chars": 10556,
"preview": "The nnU-Net inference is now much more dynamic than before, allowing you to more seamlessly integrate nnU-Net into \nyour"
},
{
"path": "Finetune/nnUNet/nnunetv2/inference/sliding_window_prediction.py",
"chars": 2928,
"preview": "from functools import lru_cache\n\nimport numpy as np\nimport torch\nfrom typing import Union, Tuple, List\nfrom acvl_utils.c"
},
{
"path": "Finetune/nnUNet/nnunetv2/model_sharing/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "Finetune/nnUNet/nnunetv2/model_sharing/entry_points.py",
"chars": 3528,
"preview": "from nnunetv2.model_sharing.model_download import download_and_install_from_url\nfrom nnunetv2.model_sharing.model_export"
},
{
"path": "Finetune/nnUNet/nnunetv2/model_sharing/model_download.py",
"chars": 1856,
"preview": "from typing import Optional\n\nimport requests\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom tim"
},
{
"path": "Finetune/nnUNet/nnunetv2/model_sharing/model_export.py",
"chars": 6830,
"preview": "import zipfile\n\nfrom nnunetv2.utilities.file_path_utilities import *\n\n\ndef export_pretrained_model(dataset_name_or_id: U"
},
{
"path": "Finetune/nnUNet/nnunetv2/model_sharing/model_import.py",
"chars": 202,
"preview": "import zipfile\n\nfrom nnunetv2.paths import nnUNet_results\n\n\ndef install_model_from_zip_file(zip_file: str):\n with zip"
}
]
// ... and 129 more files (download for full content)
About this extraction
This page contains the full source code of the Luffy03/VoCo GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 329 files (2.7 MB), approximately 734.1k tokens, and a symbol index with 971 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.